Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394...
[linux-2.6] / drivers / net / lib82596.c
1 /* lasi_82596.c -- driver for the intel 82596 ethernet controller, as
2    munged into HPPA boxen .
3
4    This driver is based upon 82596.c, original credits are below...
5    but there were too many hoops which HP wants jumped through to
6    keep this code in there in a sane manner.
7
8    3 primary sources of the mess --
9    1) hppa needs *lots* of cacheline flushing to keep this kind of
10    MMIO running.
11
12    2) The 82596 needs to see all of its pointers as their physical
13    address.  Thus virt_to_bus/bus_to_virt are *everywhere*.
14
15    3) The implementation HP is using seems to be significantly pickier
16    about when and how the command and RX units are started.  some
17    command ordering was changed.
18
19    Examination of the mach driver leads one to believe that there
20    might be a saner way to pull this off...  anyone who feels like a
21    full rewrite can be my guest.
22
23    Split 02/13/2000 Sam Creasey (sammy@oh.verio.com)
24
25    02/01/2000  Initial modifications for parisc by Helge Deller (deller@gmx.de)
26    03/02/2000  changes for better/correct(?) cache-flushing (deller)
27 */
28
29 /* 82596.c: A generic 82596 ethernet driver for linux. */
30 /*
31    Based on Apricot.c
32    Written 1994 by Mark Evans.
33    This driver is for the Apricot 82596 bus-master interface
34
35    Modularised 12/94 Mark Evans
36
37
38    Modified to support the 82596 ethernet chips on 680x0 VME boards.
39    by Richard Hirst <richard@sleepie.demon.co.uk>
40    Renamed to be 82596.c
41
42    980825:  Changed to receive directly in to sk_buffs which are
43    allocated at open() time.  Eliminates copy on incoming frames
44    (small ones are still copied).  Shared data now held in a
45    non-cached page, so we can run on 68060 in copyback mode.
46
47    TBD:
48    * look at deferring rx frames rather than discarding (as per tulip)
49    * handle tx ring full as per tulip
50    * performace test to tune rx_copybreak
51
52    Most of my modifications relate to the braindead big-endian
53    implementation by Intel.  When the i596 is operating in
54    'big-endian' mode, it thinks a 32 bit value of 0x12345678
55    should be stored as 0x56781234.  This is a real pain, when
56    you have linked lists which are shared by the 680x0 and the
57    i596.
58
59    Driver skeleton
60    Written 1993 by Donald Becker.
61    Copyright 1993 United States Government as represented by the Director,
62    National Security Agency. This software may only be used and distributed
63    according to the terms of the GNU General Public License as modified by SRC,
64    incorporated herein by reference.
65
66    The author may be reached as becker@scyld.com, or C/O
67    Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
68
69  */
70
71 #include <linux/module.h>
72 #include <linux/kernel.h>
73 #include <linux/string.h>
74 #include <linux/errno.h>
75 #include <linux/ioport.h>
76 #include <linux/slab.h>
77 #include <linux/interrupt.h>
78 #include <linux/delay.h>
79 #include <linux/netdevice.h>
80 #include <linux/etherdevice.h>
81 #include <linux/skbuff.h>
82 #include <linux/init.h>
83 #include <linux/types.h>
84 #include <linux/bitops.h>
85 #include <linux/dma-mapping.h>
86 #include <linux/io.h>
87 #include <linux/irq.h>
88
89 /* DEBUG flags
90  */
91
92 #define DEB_INIT        0x0001
93 #define DEB_PROBE       0x0002
94 #define DEB_SERIOUS     0x0004
95 #define DEB_ERRORS      0x0008
96 #define DEB_MULTI       0x0010
97 #define DEB_TDR         0x0020
98 #define DEB_OPEN        0x0040
99 #define DEB_RESET       0x0080
100 #define DEB_ADDCMD      0x0100
101 #define DEB_STATUS      0x0200
102 #define DEB_STARTTX     0x0400
103 #define DEB_RXADDR      0x0800
104 #define DEB_TXADDR      0x1000
105 #define DEB_RXFRAME     0x2000
106 #define DEB_INTS        0x4000
107 #define DEB_STRUCT      0x8000
108 #define DEB_ANY         0xffff
109
110
111 #define DEB(x, y)       if (i596_debug & (x)) { y; }
112
113
114 /*
115  * The MPU_PORT command allows direct access to the 82596. With PORT access
116  * the following commands are available (p5-18). The 32-bit port command
117  * must be word-swapped with the most significant word written first.
118  * This only applies to VME boards.
119  */
120 #define PORT_RESET              0x00    /* reset 82596 */
121 #define PORT_SELFTEST           0x01    /* selftest */
122 #define PORT_ALTSCP             0x02    /* alternate SCB address */
123 #define PORT_ALTDUMP            0x03    /* Alternate DUMP address */
124
125 static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
126
127 /* Copy frames shorter than rx_copybreak, otherwise pass on up in
128  * a full sized sk_buff.  Value of 100 stolen from tulip.c (!alpha).
129  */
130 static int rx_copybreak = 100;
131
132 #define PKT_BUF_SZ      1536
133 #define MAX_MC_CNT      64
134
135 #define ISCP_BUSY       0x0001
136
137 #define I596_NULL ((u32)0xffffffff)
138
139 #define CMD_EOL         0x8000  /* The last command of the list, stop. */
140 #define CMD_SUSP        0x4000  /* Suspend after doing cmd. */
141 #define CMD_INTR        0x2000  /* Interrupt after doing cmd. */
142
143 #define CMD_FLEX        0x0008  /* Enable flexible memory model */
144
145 enum commands {
146         CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
147         CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
148 };
149
150 #define STAT_C          0x8000  /* Set to 0 after execution */
151 #define STAT_B          0x4000  /* Command being executed */
152 #define STAT_OK         0x2000  /* Command executed ok */
153 #define STAT_A          0x1000  /* Command aborted */
154
155 #define  CUC_START      0x0100
156 #define  CUC_RESUME     0x0200
157 #define  CUC_SUSPEND    0x0300
158 #define  CUC_ABORT      0x0400
159 #define  RX_START       0x0010
160 #define  RX_RESUME      0x0020
161 #define  RX_SUSPEND     0x0030
162 #define  RX_ABORT       0x0040
163
164 #define TX_TIMEOUT      5
165
166
167 struct i596_reg {
168         unsigned short porthi;
169         unsigned short portlo;
170         u32            ca;
171 };
172
173 #define EOF             0x8000
174 #define SIZE_MASK       0x3fff
175
176 struct i596_tbd {
177         unsigned short size;
178         unsigned short pad;
179         dma_addr_t     next;
180         dma_addr_t     data;
181         u32 cache_pad[5];               /* Total 32 bytes... */
182 };
183
184 /* The command structure has two 'next' pointers; v_next is the address of
185  * the next command as seen by the CPU, b_next is the address of the next
186  * command as seen by the 82596.  The b_next pointer, as used by the 82596
187  * always references the status field of the next command, rather than the
188  * v_next field, because the 82596 is unaware of v_next.  It may seem more
189  * logical to put v_next at the end of the structure, but we cannot do that
190  * because the 82596 expects other fields to be there, depending on command
191  * type.
192  */
193
194 struct i596_cmd {
195         struct i596_cmd *v_next;        /* Address from CPUs viewpoint */
196         unsigned short status;
197         unsigned short command;
198         dma_addr_t     b_next;  /* Address from i596 viewpoint */
199 };
200
201 struct tx_cmd {
202         struct i596_cmd cmd;
203         dma_addr_t     tbd;
204         unsigned short size;
205         unsigned short pad;
206         struct sk_buff *skb;            /* So we can free it after tx */
207         dma_addr_t dma_addr;
208 #ifdef __LP64__
209         u32 cache_pad[6];               /* Total 64 bytes... */
210 #else
211         u32 cache_pad[1];               /* Total 32 bytes... */
212 #endif
213 };
214
215 struct tdr_cmd {
216         struct i596_cmd cmd;
217         unsigned short status;
218         unsigned short pad;
219 };
220
221 struct mc_cmd {
222         struct i596_cmd cmd;
223         short mc_cnt;
224         char mc_addrs[MAX_MC_CNT*6];
225 };
226
227 struct sa_cmd {
228         struct i596_cmd cmd;
229         char eth_addr[8];
230 };
231
232 struct cf_cmd {
233         struct i596_cmd cmd;
234         char i596_config[16];
235 };
236
237 struct i596_rfd {
238         unsigned short stat;
239         unsigned short cmd;
240         dma_addr_t     b_next;  /* Address from i596 viewpoint */
241         dma_addr_t     rbd;
242         unsigned short count;
243         unsigned short size;
244         struct i596_rfd *v_next;        /* Address from CPUs viewpoint */
245         struct i596_rfd *v_prev;
246 #ifndef __LP64__
247         u32 cache_pad[2];               /* Total 32 bytes... */
248 #endif
249 };
250
251 struct i596_rbd {
252     /* hardware data */
253     unsigned short count;
254     unsigned short zero1;
255     dma_addr_t     b_next;
256     dma_addr_t     b_data;              /* Address from i596 viewpoint */
257     unsigned short size;
258     unsigned short zero2;
259     /* driver data */
260     struct sk_buff *skb;
261     struct i596_rbd *v_next;
262     dma_addr_t     b_addr;              /* This rbd addr from i596 view */
263     unsigned char *v_data;              /* Address from CPUs viewpoint */
264                                         /* Total 32 bytes... */
265 #ifdef __LP64__
266     u32 cache_pad[4];
267 #endif
268 };
269
270 /* These values as chosen so struct i596_dma fits in one page... */
271
272 #define TX_RING_SIZE 32
273 #define RX_RING_SIZE 16
274
275 struct i596_scb {
276         unsigned short status;
277         unsigned short command;
278         dma_addr_t    cmd;
279         dma_addr_t    rfd;
280         u32           crc_err;
281         u32           align_err;
282         u32           resource_err;
283         u32           over_err;
284         u32           rcvdt_err;
285         u32           short_err;
286         unsigned short t_on;
287         unsigned short t_off;
288 };
289
290 struct i596_iscp {
291         u32           stat;
292         dma_addr_t    scb;
293 };
294
295 struct i596_scp {
296         u32           sysbus;
297         u32           pad;
298         dma_addr_t    iscp;
299 };
300
301 struct i596_dma {
302         struct i596_scp scp                     __attribute__((aligned(32)));
303         volatile struct i596_iscp iscp          __attribute__((aligned(32)));
304         volatile struct i596_scb scb            __attribute__((aligned(32)));
305         struct sa_cmd sa_cmd                    __attribute__((aligned(32)));
306         struct cf_cmd cf_cmd                    __attribute__((aligned(32)));
307         struct tdr_cmd tdr_cmd                  __attribute__((aligned(32)));
308         struct mc_cmd mc_cmd                    __attribute__((aligned(32)));
309         struct i596_rfd rfds[RX_RING_SIZE]      __attribute__((aligned(32)));
310         struct i596_rbd rbds[RX_RING_SIZE]      __attribute__((aligned(32)));
311         struct tx_cmd tx_cmds[TX_RING_SIZE]     __attribute__((aligned(32)));
312         struct i596_tbd tbds[TX_RING_SIZE]      __attribute__((aligned(32)));
313 };
314
315 struct i596_private {
316         struct i596_dma *dma;
317         u32    stat;
318         int last_restart;
319         struct i596_rfd *rfd_head;
320         struct i596_rbd *rbd_head;
321         struct i596_cmd *cmd_tail;
322         struct i596_cmd *cmd_head;
323         int cmd_backlog;
324         u32    last_cmd;
325         struct net_device_stats stats;
326         int next_tx_cmd;
327         int options;
328         spinlock_t lock;       /* serialize access to chip */
329         dma_addr_t dma_addr;
330         void __iomem *mpu_port;
331         void __iomem *ca;
332 };
333
334 static const char init_setup[] =
335 {
336         0x8E,           /* length, prefetch on */
337         0xC8,           /* fifo to 8, monitor off */
338         0x80,           /* don't save bad frames */
339         0x2E,           /* No source address insertion, 8 byte preamble */
340         0x00,           /* priority and backoff defaults */
341         0x60,           /* interframe spacing */
342         0x00,           /* slot time LSB */
343         0xf2,           /* slot time and retries */
344         0x00,           /* promiscuous mode */
345         0x00,           /* collision detect */
346         0x40,           /* minimum frame length */
347         0xff,
348         0x00,
349         0x7f /*  *multi IA */ };
350
351 static int i596_open(struct net_device *dev);
352 static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
353 static irqreturn_t i596_interrupt(int irq, void *dev_id);
354 static int i596_close(struct net_device *dev);
355 static struct net_device_stats *i596_get_stats(struct net_device *dev);
356 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
357 static void i596_tx_timeout (struct net_device *dev);
358 static void print_eth(unsigned char *buf, char *str);
359 static void set_multicast_list(struct net_device *dev);
360 static inline void ca(struct net_device *dev);
361 static void mpu_port(struct net_device *dev, int c, dma_addr_t x);
362
363 static int rx_ring_size = RX_RING_SIZE;
364 static int ticks_limit = 100;
365 static int max_cmd_backlog = TX_RING_SIZE-1;
366
367 #ifdef CONFIG_NET_POLL_CONTROLLER
368 static void i596_poll_controller(struct net_device *dev);
369 #endif
370
371
372 static inline int wait_istat(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
373 {
374         DMA_INV(dev, &(dma->iscp), sizeof(struct i596_iscp));
375         while (--delcnt && dma->iscp.stat) {
376                 udelay(10);
377                 DMA_INV(dev, &(dma->iscp), sizeof(struct i596_iscp));
378         }
379         if (!delcnt) {
380                 printk(KERN_ERR "%s: %s, iscp.stat %04x, didn't clear\n",
381                      dev->name, str, SWAP16(dma->iscp.stat));
382                 return -1;
383         } else
384                 return 0;
385 }
386
387
388 static inline int wait_cmd(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
389 {
390         DMA_INV(dev, &(dma->scb), sizeof(struct i596_scb));
391         while (--delcnt && dma->scb.command) {
392                 udelay(10);
393                 DMA_INV(dev, &(dma->scb), sizeof(struct i596_scb));
394         }
395         if (!delcnt) {
396                 printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
397                        dev->name, str,
398                        SWAP16(dma->scb.status),
399                        SWAP16(dma->scb.command));
400                 return -1;
401         } else
402                 return 0;
403 }
404
405
406 static void i596_display_data(struct net_device *dev)
407 {
408         struct i596_private *lp = netdev_priv(dev);
409         struct i596_dma *dma = lp->dma;
410         struct i596_cmd *cmd;
411         struct i596_rfd *rfd;
412         struct i596_rbd *rbd;
413
414         printk(KERN_DEBUG "lp and scp at %p, .sysbus = %08x, .iscp = %08x\n",
415                &dma->scp, dma->scp.sysbus, SWAP32(dma->scp.iscp));
416         printk(KERN_DEBUG "iscp at %p, iscp.stat = %08x, .scb = %08x\n",
417                &dma->iscp, SWAP32(dma->iscp.stat), SWAP32(dma->iscp.scb));
418         printk(KERN_DEBUG "scb at %p, scb.status = %04x, .command = %04x,"
419                 " .cmd = %08x, .rfd = %08x\n",
420                &dma->scb, SWAP16(dma->scb.status), SWAP16(dma->scb.command),
421                 SWAP16(dma->scb.cmd), SWAP32(dma->scb.rfd));
422         printk(KERN_DEBUG "   errors: crc %x, align %x, resource %x,"
423                " over %x, rcvdt %x, short %x\n",
424                SWAP32(dma->scb.crc_err), SWAP32(dma->scb.align_err),
425                SWAP32(dma->scb.resource_err), SWAP32(dma->scb.over_err),
426                SWAP32(dma->scb.rcvdt_err), SWAP32(dma->scb.short_err));
427         cmd = lp->cmd_head;
428         while (cmd != NULL) {
429                 printk(KERN_DEBUG
430                        "cmd at %p, .status = %04x, .command = %04x,"
431                        " .b_next = %08x\n",
432                        cmd, SWAP16(cmd->status), SWAP16(cmd->command),
433                        SWAP32(cmd->b_next));
434                 cmd = cmd->v_next;
435         }
436         rfd = lp->rfd_head;
437         printk(KERN_DEBUG "rfd_head = %p\n", rfd);
438         do {
439                 printk(KERN_DEBUG
440                        "   %p .stat %04x, .cmd %04x, b_next %08x, rbd %08x,"
441                        " count %04x\n",
442                        rfd, SWAP16(rfd->stat), SWAP16(rfd->cmd),
443                        SWAP32(rfd->b_next), SWAP32(rfd->rbd),
444                        SWAP16(rfd->count));
445                 rfd = rfd->v_next;
446         } while (rfd != lp->rfd_head);
447         rbd = lp->rbd_head;
448         printk(KERN_DEBUG "rbd_head = %p\n", rbd);
449         do {
450                 printk(KERN_DEBUG
451                        "   %p .count %04x, b_next %08x, b_data %08x,"
452                        " size %04x\n",
453                         rbd, SWAP16(rbd->count), SWAP32(rbd->b_next),
454                        SWAP32(rbd->b_data), SWAP16(rbd->size));
455                 rbd = rbd->v_next;
456         } while (rbd != lp->rbd_head);
457         DMA_INV(dev, dma, sizeof(struct i596_dma));
458 }
459
460
461 #define virt_to_dma(lp, v) ((lp)->dma_addr + (dma_addr_t)((unsigned long)(v)-(unsigned long)((lp)->dma)))
462
463 static inline int init_rx_bufs(struct net_device *dev)
464 {
465         struct i596_private *lp = netdev_priv(dev);
466         struct i596_dma *dma = lp->dma;
467         int i;
468         struct i596_rfd *rfd;
469         struct i596_rbd *rbd;
470
471         /* First build the Receive Buffer Descriptor List */
472
473         for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) {
474                 dma_addr_t dma_addr;
475                 struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ + 4);
476
477                 if (skb == NULL)
478                         return -1;
479                 skb_reserve(skb, 2);
480                 dma_addr = dma_map_single(dev->dev.parent, skb->data,
481                                           PKT_BUF_SZ, DMA_FROM_DEVICE);
482                 rbd->v_next = rbd+1;
483                 rbd->b_next = SWAP32(virt_to_dma(lp, rbd+1));
484                 rbd->b_addr = SWAP32(virt_to_dma(lp, rbd));
485                 rbd->skb = skb;
486                 rbd->v_data = skb->data;
487                 rbd->b_data = SWAP32(dma_addr);
488                 rbd->size = SWAP16(PKT_BUF_SZ);
489         }
490         lp->rbd_head = dma->rbds;
491         rbd = dma->rbds + rx_ring_size - 1;
492         rbd->v_next = dma->rbds;
493         rbd->b_next = SWAP32(virt_to_dma(lp, dma->rbds));
494
495         /* Now build the Receive Frame Descriptor List */
496
497         for (i = 0, rfd = dma->rfds; i < rx_ring_size; i++, rfd++) {
498                 rfd->rbd = I596_NULL;
499                 rfd->v_next = rfd+1;
500                 rfd->v_prev = rfd-1;
501                 rfd->b_next = SWAP32(virt_to_dma(lp, rfd+1));
502                 rfd->cmd = SWAP16(CMD_FLEX);
503         }
504         lp->rfd_head = dma->rfds;
505         dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
506         rfd = dma->rfds;
507         rfd->rbd = SWAP32(virt_to_dma(lp, lp->rbd_head));
508         rfd->v_prev = dma->rfds + rx_ring_size - 1;
509         rfd = dma->rfds + rx_ring_size - 1;
510         rfd->v_next = dma->rfds;
511         rfd->b_next = SWAP32(virt_to_dma(lp, dma->rfds));
512         rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
513
514         DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
515         return 0;
516 }
517
518 static inline void remove_rx_bufs(struct net_device *dev)
519 {
520         struct i596_private *lp = netdev_priv(dev);
521         struct i596_rbd *rbd;
522         int i;
523
524         for (i = 0, rbd = lp->dma->rbds; i < rx_ring_size; i++, rbd++) {
525                 if (rbd->skb == NULL)
526                         break;
527                 dma_unmap_single(dev->dev.parent,
528                                  (dma_addr_t)SWAP32(rbd->b_data),
529                                  PKT_BUF_SZ, DMA_FROM_DEVICE);
530                 dev_kfree_skb(rbd->skb);
531         }
532 }
533
534
535 static void rebuild_rx_bufs(struct net_device *dev)
536 {
537         struct i596_private *lp = netdev_priv(dev);
538         struct i596_dma *dma = lp->dma;
539         int i;
540
541         /* Ensure rx frame/buffer descriptors are tidy */
542
543         for (i = 0; i < rx_ring_size; i++) {
544                 dma->rfds[i].rbd = I596_NULL;
545                 dma->rfds[i].cmd = SWAP16(CMD_FLEX);
546         }
547         dma->rfds[rx_ring_size-1].cmd = SWAP16(CMD_EOL|CMD_FLEX);
548         lp->rfd_head = dma->rfds;
549         dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
550         lp->rbd_head = dma->rbds;
551         dma->rfds[0].rbd = SWAP32(virt_to_dma(lp, dma->rbds));
552
553         DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
554 }
555
556
557 static int init_i596_mem(struct net_device *dev)
558 {
559         struct i596_private *lp = netdev_priv(dev);
560         struct i596_dma *dma = lp->dma;
561         unsigned long flags;
562
563         mpu_port(dev, PORT_RESET, 0);
564         udelay(100);                    /* Wait 100us - seems to help */
565
566         /* change the scp address */
567
568         lp->last_cmd = jiffies;
569
570         dma->scp.sysbus = SYSBUS;
571         dma->scp.iscp = SWAP32(virt_to_dma(lp, &(dma->iscp)));
572         dma->iscp.scb = SWAP32(virt_to_dma(lp, &(dma->scb)));
573         dma->iscp.stat = SWAP32(ISCP_BUSY);
574         lp->cmd_backlog = 0;
575
576         lp->cmd_head = NULL;
577         dma->scb.cmd = I596_NULL;
578
579         DEB(DEB_INIT, printk(KERN_DEBUG "%s: starting i82596.\n", dev->name));
580
581         DMA_WBACK(dev, &(dma->scp), sizeof(struct i596_scp));
582         DMA_WBACK(dev, &(dma->iscp), sizeof(struct i596_iscp));
583         DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
584
585         mpu_port(dev, PORT_ALTSCP, virt_to_dma(lp, &dma->scp));
586         ca(dev);
587         if (wait_istat(dev, dma, 1000, "initialization timed out"))
588                 goto failed;
589         DEB(DEB_INIT, printk(KERN_DEBUG
590                              "%s: i82596 initialization successful\n",
591                              dev->name));
592
593         if (request_irq(dev->irq, &i596_interrupt, 0, "i82596", dev)) {
594                 printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
595                 goto failed;
596         }
597
598         /* Ensure rx frame/buffer descriptors are tidy */
599         rebuild_rx_bufs(dev);
600
601         dma->scb.command = 0;
602         DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
603
604         DEB(DEB_INIT, printk(KERN_DEBUG
605                              "%s: queuing CmdConfigure\n", dev->name));
606         memcpy(dma->cf_cmd.i596_config, init_setup, 14);
607         dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
608         DMA_WBACK(dev, &(dma->cf_cmd), sizeof(struct cf_cmd));
609         i596_add_cmd(dev, &dma->cf_cmd.cmd);
610
611         DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
612         memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, 6);
613         dma->sa_cmd.cmd.command = SWAP16(CmdSASetup);
614         DMA_WBACK(dev, &(dma->sa_cmd), sizeof(struct sa_cmd));
615         i596_add_cmd(dev, &dma->sa_cmd.cmd);
616
617         DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name));
618         dma->tdr_cmd.cmd.command = SWAP16(CmdTDR);
619         DMA_WBACK(dev, &(dma->tdr_cmd), sizeof(struct tdr_cmd));
620         i596_add_cmd(dev, &dma->tdr_cmd.cmd);
621
622         spin_lock_irqsave (&lp->lock, flags);
623
624         if (wait_cmd(dev, dma, 1000, "timed out waiting to issue RX_START")) {
625                 spin_unlock_irqrestore (&lp->lock, flags);
626                 goto failed_free_irq;
627         }
628         DEB(DEB_INIT, printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name));
629         dma->scb.command = SWAP16(RX_START);
630         dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
631         DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
632
633         ca(dev);
634
635         spin_unlock_irqrestore (&lp->lock, flags);
636         if (wait_cmd(dev, dma, 1000, "RX_START not processed"))
637                 goto failed_free_irq;
638         DEB(DEB_INIT, printk(KERN_DEBUG
639                              "%s: Receive unit started OK\n", dev->name));
640         return 0;
641
642 failed_free_irq:
643         free_irq(dev->irq, dev);
644 failed:
645         printk(KERN_ERR "%s: Failed to initialise 82596\n", dev->name);
646         mpu_port(dev, PORT_RESET, 0);
647         return -1;
648 }
649
650
651 static inline int i596_rx(struct net_device *dev)
652 {
653         struct i596_private *lp = netdev_priv(dev);
654         struct i596_rfd *rfd;
655         struct i596_rbd *rbd;
656         int frames = 0;
657
658         DEB(DEB_RXFRAME, printk(KERN_DEBUG
659                                 "i596_rx(), rfd_head %p, rbd_head %p\n",
660                                 lp->rfd_head, lp->rbd_head));
661
662
663         rfd = lp->rfd_head;             /* Ref next frame to check */
664
665         DMA_INV(dev, rfd, sizeof(struct i596_rfd));
666         while (rfd->stat & SWAP16(STAT_C)) {    /* Loop while complete frames */
667                 if (rfd->rbd == I596_NULL)
668                         rbd = NULL;
669                 else if (rfd->rbd == lp->rbd_head->b_addr) {
670                         rbd = lp->rbd_head;
671                         DMA_INV(dev, rbd, sizeof(struct i596_rbd));
672                 } else {
673                         printk(KERN_ERR "%s: rbd chain broken!\n", dev->name);
674                         /* XXX Now what? */
675                         rbd = NULL;
676                 }
677                 DEB(DEB_RXFRAME, printk(KERN_DEBUG
678                                       "  rfd %p, rfd.rbd %08x, rfd.stat %04x\n",
679                                       rfd, rfd->rbd, rfd->stat));
680
681                 if (rbd != NULL && (rfd->stat & SWAP16(STAT_OK))) {
682                         /* a good frame */
683                         int pkt_len = SWAP16(rbd->count) & 0x3fff;
684                         struct sk_buff *skb = rbd->skb;
685                         int rx_in_place = 0;
686
687                         DEB(DEB_RXADDR, print_eth(rbd->v_data, "received"));
688                         frames++;
689
690                         /* Check if the packet is long enough to just accept
691                          * without copying to a properly sized skbuff.
692                          */
693
694                         if (pkt_len > rx_copybreak) {
695                                 struct sk_buff *newskb;
696                                 dma_addr_t dma_addr;
697
698                                 dma_unmap_single(dev->dev.parent,
699                                                  (dma_addr_t)SWAP32(rbd->b_data),
700                                                  PKT_BUF_SZ, DMA_FROM_DEVICE);
701                                 /* Get fresh skbuff to replace filled one. */
702                                 newskb = netdev_alloc_skb(dev, PKT_BUF_SZ + 4);
703                                 if (newskb == NULL) {
704                                         skb = NULL;     /* drop pkt */
705                                         goto memory_squeeze;
706                                 }
707                                 skb_reserve(newskb, 2);
708
709                                 /* Pass up the skb already on the Rx ring. */
710                                 skb_put(skb, pkt_len);
711                                 rx_in_place = 1;
712                                 rbd->skb = newskb;
713                                 dma_addr = dma_map_single(dev->dev.parent,
714                                                           newskb->data,
715                                                           PKT_BUF_SZ,
716                                                           DMA_FROM_DEVICE);
717                                 rbd->v_data = newskb->data;
718                                 rbd->b_data = SWAP32(dma_addr);
719                                 DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
720                         } else
721                                 skb = netdev_alloc_skb(dev, pkt_len + 2);
722 memory_squeeze:
723                         if (skb == NULL) {
724                                 /* XXX tulip.c can defer packets here!! */
725                                 printk(KERN_ERR
726                                        "%s: i596_rx Memory squeeze, dropping packet.\n",
727                                        dev->name);
728                                 lp->stats.rx_dropped++;
729                         } else {
730                                 if (!rx_in_place) {
731                                         /* 16 byte align the data fields */
732                                         dma_sync_single_for_cpu(dev->dev.parent,
733                                                                 (dma_addr_t)SWAP32(rbd->b_data),
734                                                                 PKT_BUF_SZ, DMA_FROM_DEVICE);
735                                         skb_reserve(skb, 2);
736                                         memcpy(skb_put(skb, pkt_len), rbd->v_data, pkt_len);
737                                         dma_sync_single_for_device(dev->dev.parent,
738                                                                    (dma_addr_t)SWAP32(rbd->b_data),
739                                                                    PKT_BUF_SZ, DMA_FROM_DEVICE);
740                                 }
741                                 skb->len = pkt_len;
742                                 skb->protocol = eth_type_trans(skb, dev);
743                                 netif_rx(skb);
744                                 dev->last_rx = jiffies;
745                                 lp->stats.rx_packets++;
746                                 lp->stats.rx_bytes += pkt_len;
747                         }
748                 } else {
749                         DEB(DEB_ERRORS, printk(KERN_DEBUG
750                                                "%s: Error, rfd.stat = 0x%04x\n",
751                                                dev->name, rfd->stat));
752                         lp->stats.rx_errors++;
753                         if (rfd->stat & SWAP16(0x0100))
754                                 lp->stats.collisions++;
755                         if (rfd->stat & SWAP16(0x8000))
756                                 lp->stats.rx_length_errors++;
757                         if (rfd->stat & SWAP16(0x0001))
758                                 lp->stats.rx_over_errors++;
759                         if (rfd->stat & SWAP16(0x0002))
760                                 lp->stats.rx_fifo_errors++;
761                         if (rfd->stat & SWAP16(0x0004))
762                                 lp->stats.rx_frame_errors++;
763                         if (rfd->stat & SWAP16(0x0008))
764                                 lp->stats.rx_crc_errors++;
765                         if (rfd->stat & SWAP16(0x0010))
766                                 lp->stats.rx_length_errors++;
767                 }
768
769                 /* Clear the buffer descriptor count and EOF + F flags */
770
771                 if (rbd != NULL && (rbd->count & SWAP16(0x4000))) {
772                         rbd->count = 0;
773                         lp->rbd_head = rbd->v_next;
774                         DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
775                 }
776
777                 /* Tidy the frame descriptor, marking it as end of list */
778
779                 rfd->rbd = I596_NULL;
780                 rfd->stat = 0;
781                 rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
782                 rfd->count = 0;
783
784                 /* Update record of next frame descriptor to process */
785
786                 lp->dma->scb.rfd = rfd->b_next;
787                 lp->rfd_head = rfd->v_next;
788                 DMA_WBACK_INV(dev, rfd, sizeof(struct i596_rfd));
789
790                 /* Remove end-of-list from old end descriptor */
791
792                 rfd->v_prev->cmd = SWAP16(CMD_FLEX);
793                 DMA_WBACK_INV(dev, rfd->v_prev, sizeof(struct i596_rfd));
794                 rfd = lp->rfd_head;
795                 DMA_INV(dev, rfd, sizeof(struct i596_rfd));
796         }
797
798         DEB(DEB_RXFRAME, printk(KERN_DEBUG "frames %d\n", frames));
799
800         return 0;
801 }
802
803
804 static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
805 {
806         struct i596_cmd *ptr;
807
808         while (lp->cmd_head != NULL) {
809                 ptr = lp->cmd_head;
810                 lp->cmd_head = ptr->v_next;
811                 lp->cmd_backlog--;
812
813                 switch (SWAP16(ptr->command) & 0x7) {
814                 case CmdTx:
815                         {
816                                 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
817                                 struct sk_buff *skb = tx_cmd->skb;
818                                 dma_unmap_single(dev->dev.parent,
819                                                  tx_cmd->dma_addr,
820                                                  skb->len, DMA_TO_DEVICE);
821
822                                 dev_kfree_skb(skb);
823
824                                 lp->stats.tx_errors++;
825                                 lp->stats.tx_aborted_errors++;
826
827                                 ptr->v_next = NULL;
828                                 ptr->b_next = I596_NULL;
829                                 tx_cmd->cmd.command = 0;  /* Mark as free */
830                                 break;
831                         }
832                 default:
833                         ptr->v_next = NULL;
834                         ptr->b_next = I596_NULL;
835                 }
836                 DMA_WBACK_INV(dev, ptr, sizeof(struct i596_cmd));
837         }
838
839         wait_cmd(dev, lp->dma, 100, "i596_cleanup_cmd timed out");
840         lp->dma->scb.cmd = I596_NULL;
841         DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb));
842 }
843
844
845 static inline void i596_reset(struct net_device *dev, struct i596_private *lp)
846 {
847         unsigned long flags;
848
849         DEB(DEB_RESET, printk(KERN_DEBUG "i596_reset\n"));
850
851         spin_lock_irqsave (&lp->lock, flags);
852
853         wait_cmd(dev, lp->dma, 100, "i596_reset timed out");
854
855         netif_stop_queue(dev);
856
857         /* FIXME: this command might cause an lpmc */
858         lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
859         DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb));
860         ca(dev);
861
862         /* wait for shutdown */
863         wait_cmd(dev, lp->dma, 1000, "i596_reset 2 timed out");
864         spin_unlock_irqrestore (&lp->lock, flags);
865
866         i596_cleanup_cmd(dev, lp);
867         i596_rx(dev);
868
869         netif_start_queue(dev);
870         init_i596_mem(dev);
871 }
872
873
874 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
875 {
876         struct i596_private *lp = netdev_priv(dev);
877         struct i596_dma *dma = lp->dma;
878         unsigned long flags;
879
880         DEB(DEB_ADDCMD, printk(KERN_DEBUG "i596_add_cmd cmd_head %p\n",
881                                lp->cmd_head));
882
883         cmd->status = 0;
884         cmd->command |= SWAP16(CMD_EOL | CMD_INTR);
885         cmd->v_next = NULL;
886         cmd->b_next = I596_NULL;
887         DMA_WBACK(dev, cmd, sizeof(struct i596_cmd));
888
889         spin_lock_irqsave (&lp->lock, flags);
890
891         if (lp->cmd_head != NULL) {
892                 lp->cmd_tail->v_next = cmd;
893                 lp->cmd_tail->b_next = SWAP32(virt_to_dma(lp, &cmd->status));
894                 DMA_WBACK(dev, lp->cmd_tail, sizeof(struct i596_cmd));
895         } else {
896                 lp->cmd_head = cmd;
897                 wait_cmd(dev, dma, 100, "i596_add_cmd timed out");
898                 dma->scb.cmd = SWAP32(virt_to_dma(lp, &cmd->status));
899                 dma->scb.command = SWAP16(CUC_START);
900                 DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
901                 ca(dev);
902         }
903         lp->cmd_tail = cmd;
904         lp->cmd_backlog++;
905
906         spin_unlock_irqrestore (&lp->lock, flags);
907
908         if (lp->cmd_backlog > max_cmd_backlog) {
909                 unsigned long tickssofar = jiffies - lp->last_cmd;
910
911                 if (tickssofar < ticks_limit)
912                         return;
913
914                 printk(KERN_ERR
915                        "%s: command unit timed out, status resetting.\n",
916                        dev->name);
917 #if 1
918                 i596_reset(dev, lp);
919 #endif
920         }
921 }
922
923 static int i596_open(struct net_device *dev)
924 {
925         DEB(DEB_OPEN, printk(KERN_DEBUG
926                              "%s: i596_open() irq %d.\n", dev->name, dev->irq));
927
928         if (init_rx_bufs(dev)) {
929                 printk(KERN_ERR "%s: Failed to init rx bufs\n", dev->name);
930                 return -EAGAIN;
931         }
932         if (init_i596_mem(dev)) {
933                 printk(KERN_ERR "%s: Failed to init memory\n", dev->name);
934                 goto out_remove_rx_bufs;
935         }
936         netif_start_queue(dev);
937
938         return 0;
939
940 out_remove_rx_bufs:
941         remove_rx_bufs(dev);
942         return -EAGAIN;
943 }
944
945 static void i596_tx_timeout (struct net_device *dev)
946 {
947         struct i596_private *lp = netdev_priv(dev);
948
949         /* Transmitter timeout, serious problems. */
950         DEB(DEB_ERRORS, printk(KERN_DEBUG
951                                "%s: transmit timed out, status resetting.\n",
952                                dev->name));
953
954         lp->stats.tx_errors++;
955
956         /* Try to restart the adaptor */
957         if (lp->last_restart == lp->stats.tx_packets) {
958                 DEB(DEB_ERRORS, printk(KERN_DEBUG "Resetting board.\n"));
959                 /* Shutdown and restart */
960                 i596_reset (dev, lp);
961         } else {
962                 /* Issue a channel attention signal */
963                 DEB(DEB_ERRORS, printk(KERN_DEBUG "Kicking board.\n"));
964                 lp->dma->scb.command = SWAP16(CUC_START | RX_START);
965                 DMA_WBACK_INV(dev, &(lp->dma->scb), sizeof(struct i596_scb));
966                 ca (dev);
967                 lp->last_restart = lp->stats.tx_packets;
968         }
969
970         dev->trans_start = jiffies;
971         netif_wake_queue (dev);
972 }
973
974
975 static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
976 {
977         struct i596_private *lp = netdev_priv(dev);
978         struct tx_cmd *tx_cmd;
979         struct i596_tbd *tbd;
980         short length = skb->len;
981         dev->trans_start = jiffies;
982
983         DEB(DEB_STARTTX, printk(KERN_DEBUG
984                                 "%s: i596_start_xmit(%x,%p) called\n",
985                                 dev->name, skb->len, skb->data));
986
987         if (length < ETH_ZLEN) {
988                 if (skb_padto(skb, ETH_ZLEN))
989                         return 0;
990                 length = ETH_ZLEN;
991         }
992
993         netif_stop_queue(dev);
994
995         tx_cmd = lp->dma->tx_cmds + lp->next_tx_cmd;
996         tbd = lp->dma->tbds + lp->next_tx_cmd;
997
998         if (tx_cmd->cmd.command) {
999                 DEB(DEB_ERRORS, printk(KERN_DEBUG
1000                                        "%s: xmit ring full, dropping packet.\n",
1001                                        dev->name));
1002                 lp->stats.tx_dropped++;
1003
1004                 dev_kfree_skb(skb);
1005         } else {
1006                 if (++lp->next_tx_cmd == TX_RING_SIZE)
1007                         lp->next_tx_cmd = 0;
1008                 tx_cmd->tbd = SWAP32(virt_to_dma(lp, tbd));
1009                 tbd->next = I596_NULL;
1010
1011                 tx_cmd->cmd.command = SWAP16(CMD_FLEX | CmdTx);
1012                 tx_cmd->skb = skb;
1013
1014                 tx_cmd->pad = 0;
1015                 tx_cmd->size = 0;
1016                 tbd->pad = 0;
1017                 tbd->size = SWAP16(EOF | length);
1018
1019                 tx_cmd->dma_addr = dma_map_single(dev->dev.parent, skb->data,
1020                                                   skb->len, DMA_TO_DEVICE);
1021                 tbd->data = SWAP32(tx_cmd->dma_addr);
1022
1023                 DEB(DEB_TXADDR, print_eth(skb->data, "tx-queued"));
1024                 DMA_WBACK_INV(dev, tx_cmd, sizeof(struct tx_cmd));
1025                 DMA_WBACK_INV(dev, tbd, sizeof(struct i596_tbd));
1026                 i596_add_cmd(dev, &tx_cmd->cmd);
1027
1028                 lp->stats.tx_packets++;
1029                 lp->stats.tx_bytes += length;
1030         }
1031
1032         netif_start_queue(dev);
1033
1034         return 0;
1035 }
1036
1037 static void print_eth(unsigned char *add, char *str)
1038 {
1039         int i;
1040
1041         printk(KERN_DEBUG "i596 0x%p, ", add);
1042         for (i = 0; i < 6; i++)
1043                 printk(" %02X", add[i + 6]);
1044         printk(" -->");
1045         for (i = 0; i < 6; i++)
1046                 printk(" %02X", add[i]);
1047         printk(" %02X%02X, %s\n", add[12], add[13], str);
1048 }
1049
1050 static int __devinit i82596_probe(struct net_device *dev)
1051 {
1052         int i;
1053         struct i596_private *lp = netdev_priv(dev);
1054         struct i596_dma *dma;
1055
1056         /* This lot is ensure things have been cache line aligned. */
1057         BUILD_BUG_ON(sizeof(struct i596_rfd) != 32);
1058         BUILD_BUG_ON(sizeof(struct i596_rbd) &  31);
1059         BUILD_BUG_ON(sizeof(struct tx_cmd)   &  31);
1060         BUILD_BUG_ON(sizeof(struct i596_tbd) != 32);
1061 #ifndef __LP64__
1062         BUILD_BUG_ON(sizeof(struct i596_dma) > 4096);
1063 #endif
1064
1065         if (!dev->base_addr || !dev->irq)
1066                 return -ENODEV;
1067
1068         dma = (struct i596_dma *) DMA_ALLOC(dev->dev.parent,
1069                 sizeof(struct i596_dma), &lp->dma_addr, GFP_KERNEL);
1070         if (!dma) {
1071                 printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__);
1072                 return -ENOMEM;
1073         }
1074
1075         /* The 82596-specific entries in the device structure. */
1076         dev->open = i596_open;
1077         dev->stop = i596_close;
1078         dev->hard_start_xmit = i596_start_xmit;
1079         dev->get_stats = i596_get_stats;
1080         dev->set_multicast_list = set_multicast_list;
1081         dev->tx_timeout = i596_tx_timeout;
1082         dev->watchdog_timeo = TX_TIMEOUT;
1083 #ifdef CONFIG_NET_POLL_CONTROLLER
1084         dev->poll_controller = i596_poll_controller;
1085 #endif
1086
1087         memset(dma, 0, sizeof(struct i596_dma));
1088         lp->dma = dma;
1089
1090         dma->scb.command = 0;
1091         dma->scb.cmd = I596_NULL;
1092         dma->scb.rfd = I596_NULL;
1093         spin_lock_init(&lp->lock);
1094
1095         DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
1096
1097         i = register_netdev(dev);
1098         if (i) {
1099                 DMA_FREE(dev->dev.parent, sizeof(struct i596_dma),
1100                                     (void *)dma, lp->dma_addr);
1101                 return i;
1102         };
1103
1104         DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx,",
1105                               dev->name, dev->base_addr));
1106         for (i = 0; i < 6; i++)
1107                 DEB(DEB_PROBE, printk(" %2.2X", dev->dev_addr[i]));
1108         DEB(DEB_PROBE, printk(" IRQ %d.\n", dev->irq));
1109         DEB(DEB_INIT, printk(KERN_INFO
1110                              "%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n",
1111                              dev->name, dma, (int)sizeof(struct i596_dma),
1112                              &dma->scb));
1113
1114         return 0;
1115 }
1116
1117 #ifdef CONFIG_NET_POLL_CONTROLLER
1118 static void i596_poll_controller(struct net_device *dev)
1119 {
1120         disable_irq(dev->irq);
1121         i596_interrupt(dev->irq, dev);
1122         enable_irq(dev->irq);
1123 }
1124 #endif
1125
1126 static irqreturn_t i596_interrupt(int irq, void *dev_id)
1127 {
1128         struct net_device *dev = dev_id;
1129         struct i596_private *lp;
1130         struct i596_dma *dma;
1131         unsigned short status, ack_cmd = 0;
1132
1133         if (dev == NULL) {
1134                 printk(KERN_WARNING "%s: irq %d for unknown device.\n",
1135                        __FUNCTION__, irq);
1136                 return IRQ_NONE;
1137         }
1138
1139         lp = netdev_priv(dev);
1140         dma = lp->dma;
1141
1142         spin_lock (&lp->lock);
1143
1144         wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
1145         status = SWAP16(dma->scb.status);
1146
1147         DEB(DEB_INTS, printk(KERN_DEBUG
1148                              "%s: i596 interrupt, IRQ %d, status %4.4x.\n",
1149                         dev->name, irq, status));
1150
1151         ack_cmd = status & 0xf000;
1152
1153         if (!ack_cmd) {
1154                 DEB(DEB_ERRORS, printk(KERN_DEBUG
1155                                        "%s: interrupt with no events\n",
1156                                        dev->name));
1157                 spin_unlock (&lp->lock);
1158                 return IRQ_NONE;
1159         }
1160
1161         if ((status & 0x8000) || (status & 0x2000)) {
1162                 struct i596_cmd *ptr;
1163
1164                 if ((status & 0x8000))
1165                         DEB(DEB_INTS,
1166                             printk(KERN_DEBUG
1167                                    "%s: i596 interrupt completed command.\n",
1168                                    dev->name));
1169                 if ((status & 0x2000))
1170                         DEB(DEB_INTS,
1171                             printk(KERN_DEBUG
1172                                    "%s: i596 interrupt command unit inactive %x.\n",
1173                                    dev->name, status & 0x0700));
1174
1175                 while (lp->cmd_head != NULL) {
1176                         DMA_INV(dev, lp->cmd_head, sizeof(struct i596_cmd));
1177                         if (!(lp->cmd_head->status & SWAP16(STAT_C)))
1178                                 break;
1179
1180                         ptr = lp->cmd_head;
1181
1182                         DEB(DEB_STATUS,
1183                             printk(KERN_DEBUG
1184                                    "cmd_head->status = %04x, ->command = %04x\n",
1185                                    SWAP16(lp->cmd_head->status),
1186                                    SWAP16(lp->cmd_head->command)));
1187                         lp->cmd_head = ptr->v_next;
1188                         lp->cmd_backlog--;
1189
1190                         switch (SWAP16(ptr->command) & 0x7) {
1191                         case CmdTx:
1192                             {
1193                                 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
1194                                 struct sk_buff *skb = tx_cmd->skb;
1195
1196                                 if (ptr->status & SWAP16(STAT_OK)) {
1197                                         DEB(DEB_TXADDR,
1198                                             print_eth(skb->data, "tx-done"));
1199                                 } else {
1200                                         lp->stats.tx_errors++;
1201                                         if (ptr->status & SWAP16(0x0020))
1202                                                 lp->stats.collisions++;
1203                                         if (!(ptr->status & SWAP16(0x0040)))
1204                                                 lp->stats.tx_heartbeat_errors++;
1205                                         if (ptr->status & SWAP16(0x0400))
1206                                                 lp->stats.tx_carrier_errors++;
1207                                         if (ptr->status & SWAP16(0x0800))
1208                                                 lp->stats.collisions++;
1209                                         if (ptr->status & SWAP16(0x1000))
1210                                                 lp->stats.tx_aborted_errors++;
1211                                 }
1212                                 dma_unmap_single(dev->dev.parent,
1213                                                  tx_cmd->dma_addr,
1214                                                  skb->len, DMA_TO_DEVICE);
1215                                 dev_kfree_skb_irq(skb);
1216
1217                                 tx_cmd->cmd.command = 0; /* Mark free */
1218                                 break;
1219                             }
1220                         case CmdTDR:
1221                             {
1222                                 unsigned short status = SWAP16(((struct tdr_cmd *)ptr)->status);
1223
1224                                 if (status & 0x8000) {
1225                                         DEB(DEB_ANY,
1226                                             printk(KERN_DEBUG "%s: link ok.\n",
1227                                                    dev->name));
1228                                 } else {
1229                                         if (status & 0x4000)
1230                                                 printk(KERN_ERR
1231                                                        "%s: Transceiver problem.\n",
1232                                                        dev->name);
1233                                         if (status & 0x2000)
1234                                                 printk(KERN_ERR
1235                                                        "%s: Termination problem.\n",
1236                                                        dev->name);
1237                                         if (status & 0x1000)
1238                                                 printk(KERN_ERR
1239                                                        "%s: Short circuit.\n",
1240                                                        dev->name);
1241
1242                                         DEB(DEB_TDR,
1243                                             printk(KERN_DEBUG "%s: Time %d.\n",
1244                                                    dev->name, status & 0x07ff));
1245                                 }
1246                                 break;
1247                             }
1248                         case CmdConfigure:
1249                                 /*
1250                                  * Zap command so set_multicast_list() know
1251                                  * it is free
1252                                  */
1253                                 ptr->command = 0;
1254                                 break;
1255                         }
1256                         ptr->v_next = NULL;
1257                         ptr->b_next = I596_NULL;
1258                         DMA_WBACK(dev, ptr, sizeof(struct i596_cmd));
1259                         lp->last_cmd = jiffies;
1260                 }
1261
1262                 /* This mess is arranging that only the last of any outstanding
1263                  * commands has the interrupt bit set.  Should probably really
1264                  * only add to the cmd queue when the CU is stopped.
1265                  */
1266                 ptr = lp->cmd_head;
1267                 while ((ptr != NULL) && (ptr != lp->cmd_tail)) {
1268                         struct i596_cmd *prev = ptr;
1269
1270                         ptr->command &= SWAP16(0x1fff);
1271                         ptr = ptr->v_next;
1272                         DMA_WBACK_INV(dev, prev, sizeof(struct i596_cmd));
1273                 }
1274
1275                 if (lp->cmd_head != NULL)
1276                         ack_cmd |= CUC_START;
1277                 dma->scb.cmd = SWAP32(virt_to_dma(lp, &lp->cmd_head->status));
1278                 DMA_WBACK_INV(dev, &dma->scb, sizeof(struct i596_scb));
1279         }
1280         if ((status & 0x1000) || (status & 0x4000)) {
1281                 if ((status & 0x4000))
1282                         DEB(DEB_INTS,
1283                             printk(KERN_DEBUG
1284                                    "%s: i596 interrupt received a frame.\n",
1285                                    dev->name));
1286                 i596_rx(dev);
1287                 /* Only RX_START if stopped - RGH 07-07-96 */
1288                 if (status & 0x1000) {
1289                         if (netif_running(dev)) {
1290                                 DEB(DEB_ERRORS,
1291                                     printk(KERN_DEBUG
1292                                            "%s: i596 interrupt receive unit inactive, status 0x%x\n",
1293                                            dev->name, status));
1294                                 ack_cmd |= RX_START;
1295                                 lp->stats.rx_errors++;
1296                                 lp->stats.rx_fifo_errors++;
1297                                 rebuild_rx_bufs(dev);
1298                         }
1299                 }
1300         }
1301         wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
1302         dma->scb.command = SWAP16(ack_cmd);
1303         DMA_WBACK(dev, &dma->scb, sizeof(struct i596_scb));
1304
1305         /* DANGER: I suspect that some kind of interrupt
1306          acknowledgement aside from acking the 82596 might be needed
1307          here...  but it's running acceptably without */
1308
1309         ca(dev);
1310
1311         wait_cmd(dev, dma, 100, "i596 interrupt, exit timeout");
1312         DEB(DEB_INTS, printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name));
1313
1314         spin_unlock (&lp->lock);
1315         return IRQ_HANDLED;
1316 }
1317
1318 static int i596_close(struct net_device *dev)
1319 {
1320         struct i596_private *lp = netdev_priv(dev);
1321         unsigned long flags;
1322
1323         netif_stop_queue(dev);
1324
1325         DEB(DEB_INIT,
1326             printk(KERN_DEBUG
1327                    "%s: Shutting down ethercard, status was %4.4x.\n",
1328                    dev->name, SWAP16(lp->dma->scb.status)));
1329
1330         spin_lock_irqsave(&lp->lock, flags);
1331
1332         wait_cmd(dev, lp->dma, 100, "close1 timed out");
1333         lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
1334         DMA_WBACK(dev, &lp->dma->scb, sizeof(struct i596_scb));
1335
1336         ca(dev);
1337
1338         wait_cmd(dev, lp->dma, 100, "close2 timed out");
1339         spin_unlock_irqrestore(&lp->lock, flags);
1340         DEB(DEB_STRUCT, i596_display_data(dev));
1341         i596_cleanup_cmd(dev, lp);
1342
1343         free_irq(dev->irq, dev);
1344         remove_rx_bufs(dev);
1345
1346         return 0;
1347 }
1348
1349 static struct net_device_stats *i596_get_stats(struct net_device *dev)
1350 {
1351         struct i596_private *lp = netdev_priv(dev);
1352
1353         return &lp->stats;
1354 }
1355
1356 /*
1357  *    Set or clear the multicast filter for this adaptor.
1358  */
1359
1360 static void set_multicast_list(struct net_device *dev)
1361 {
1362         struct i596_private *lp = netdev_priv(dev);
1363         struct i596_dma *dma = lp->dma;
1364         int config = 0, cnt;
1365
1366         DEB(DEB_MULTI,
1367             printk(KERN_DEBUG
1368                    "%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
1369                    dev->name, dev->mc_count,
1370                    dev->flags & IFF_PROMISC ? "ON" : "OFF",
1371                    dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
1372
1373         if ((dev->flags & IFF_PROMISC) &&
1374             !(dma->cf_cmd.i596_config[8] & 0x01)) {
1375                 dma->cf_cmd.i596_config[8] |= 0x01;
1376                 config = 1;
1377         }
1378         if (!(dev->flags & IFF_PROMISC) &&
1379             (dma->cf_cmd.i596_config[8] & 0x01)) {
1380                 dma->cf_cmd.i596_config[8] &= ~0x01;
1381                 config = 1;
1382         }
1383         if ((dev->flags & IFF_ALLMULTI) &&
1384             (dma->cf_cmd.i596_config[11] & 0x20)) {
1385                 dma->cf_cmd.i596_config[11] &= ~0x20;
1386                 config = 1;
1387         }
1388         if (!(dev->flags & IFF_ALLMULTI) &&
1389             !(dma->cf_cmd.i596_config[11] & 0x20)) {
1390                 dma->cf_cmd.i596_config[11] |= 0x20;
1391                 config = 1;
1392         }
1393         if (config) {
1394                 if (dma->cf_cmd.cmd.command)
1395                         printk(KERN_INFO
1396                                "%s: config change request already queued\n",
1397                                dev->name);
1398                 else {
1399                         dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
1400                         DMA_WBACK_INV(dev, &dma->cf_cmd, sizeof(struct cf_cmd));
1401                         i596_add_cmd(dev, &dma->cf_cmd.cmd);
1402                 }
1403         }
1404
1405         cnt = dev->mc_count;
1406         if (cnt > MAX_MC_CNT) {
1407                 cnt = MAX_MC_CNT;
1408                 printk(KERN_NOTICE "%s: Only %d multicast addresses supported",
1409                         dev->name, cnt);
1410         }
1411
1412         if (dev->mc_count > 0) {
1413                 struct dev_mc_list *dmi;
1414                 unsigned char *cp;
1415                 struct mc_cmd *cmd;
1416
1417                 cmd = &dma->mc_cmd;
1418                 cmd->cmd.command = SWAP16(CmdMulticastList);
1419                 cmd->mc_cnt = SWAP16(dev->mc_count * 6);
1420                 cp = cmd->mc_addrs;
1421                 for (dmi = dev->mc_list;
1422                      cnt && dmi != NULL;
1423                      dmi = dmi->next, cnt--, cp += 6) {
1424                         memcpy(cp, dmi->dmi_addr, 6);
1425                         if (i596_debug > 1)
1426                                 DEB(DEB_MULTI,
1427                                     printk(KERN_DEBUG
1428                                            "%s: Adding address %02x:%02x:%02x:%02x:%02x:%02x\n",
1429                                            dev->name, cp[0], cp[1], cp[2], cp[3], cp[4], cp[5]));
1430                 }
1431                 DMA_WBACK_INV(dev, &dma->mc_cmd, sizeof(struct mc_cmd));
1432                 i596_add_cmd(dev, &cmd->cmd);
1433         }
1434 }