remove duplicate initializer (macvlan)
[linux-2.6] / drivers / net / lib82596.c
1 /* lasi_82596.c -- driver for the intel 82596 ethernet controller, as
2    munged into HPPA boxen .
3
4    This driver is based upon 82596.c, original credits are below...
5    but there were too many hoops which HP wants jumped through to
6    keep this code in there in a sane manner.
7
8    3 primary sources of the mess --
9    1) hppa needs *lots* of cacheline flushing to keep this kind of
10    MMIO running.
11
12    2) The 82596 needs to see all of its pointers as their physical
13    address.  Thus virt_to_bus/bus_to_virt are *everywhere*.
14
15    3) The implementation HP is using seems to be significantly pickier
16    about when and how the command and RX units are started.  some
17    command ordering was changed.
18
19    Examination of the mach driver leads one to believe that there
20    might be a saner way to pull this off...  anyone who feels like a
21    full rewrite can be my guest.
22
23    Split 02/13/2000 Sam Creasey (sammy@oh.verio.com)
24
25    02/01/2000  Initial modifications for parisc by Helge Deller (deller@gmx.de)
26    03/02/2000  changes for better/correct(?) cache-flushing (deller)
27 */
28
29 /* 82596.c: A generic 82596 ethernet driver for linux. */
30 /*
31    Based on Apricot.c
32    Written 1994 by Mark Evans.
33    This driver is for the Apricot 82596 bus-master interface
34
35    Modularised 12/94 Mark Evans
36
37
38    Modified to support the 82596 ethernet chips on 680x0 VME boards.
39    by Richard Hirst <richard@sleepie.demon.co.uk>
40    Renamed to be 82596.c
41
42    980825:  Changed to receive directly in to sk_buffs which are
43    allocated at open() time.  Eliminates copy on incoming frames
44    (small ones are still copied).  Shared data now held in a
45    non-cached page, so we can run on 68060 in copyback mode.
46
47    TBD:
48    * look at deferring rx frames rather than discarding (as per tulip)
49    * handle tx ring full as per tulip
50    * performace test to tune rx_copybreak
51
52    Most of my modifications relate to the braindead big-endian
53    implementation by Intel.  When the i596 is operating in
54    'big-endian' mode, it thinks a 32 bit value of 0x12345678
55    should be stored as 0x56781234.  This is a real pain, when
56    you have linked lists which are shared by the 680x0 and the
57    i596.
58
59    Driver skeleton
60    Written 1993 by Donald Becker.
61    Copyright 1993 United States Government as represented by the Director,
62    National Security Agency. This software may only be used and distributed
63    according to the terms of the GNU General Public License as modified by SRC,
64    incorporated herein by reference.
65
66    The author may be reached as becker@scyld.com, or C/O
67    Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
68
69  */
70
71 #include <linux/module.h>
72 #include <linux/kernel.h>
73 #include <linux/string.h>
74 #include <linux/errno.h>
75 #include <linux/ioport.h>
76 #include <linux/slab.h>
77 #include <linux/interrupt.h>
78 #include <linux/delay.h>
79 #include <linux/netdevice.h>
80 #include <linux/etherdevice.h>
81 #include <linux/skbuff.h>
82 #include <linux/init.h>
83 #include <linux/types.h>
84 #include <linux/bitops.h>
85 #include <linux/dma-mapping.h>
86 #include <linux/io.h>
87 #include <linux/irq.h>
88
89 /* DEBUG flags
90  */
91
92 #define DEB_INIT        0x0001
93 #define DEB_PROBE       0x0002
94 #define DEB_SERIOUS     0x0004
95 #define DEB_ERRORS      0x0008
96 #define DEB_MULTI       0x0010
97 #define DEB_TDR         0x0020
98 #define DEB_OPEN        0x0040
99 #define DEB_RESET       0x0080
100 #define DEB_ADDCMD      0x0100
101 #define DEB_STATUS      0x0200
102 #define DEB_STARTTX     0x0400
103 #define DEB_RXADDR      0x0800
104 #define DEB_TXADDR      0x1000
105 #define DEB_RXFRAME     0x2000
106 #define DEB_INTS        0x4000
107 #define DEB_STRUCT      0x8000
108 #define DEB_ANY         0xffff
109
110
111 #define DEB(x, y)       if (i596_debug & (x)) { y; }
112
113
114 /*
115  * The MPU_PORT command allows direct access to the 82596. With PORT access
116  * the following commands are available (p5-18). The 32-bit port command
117  * must be word-swapped with the most significant word written first.
118  * This only applies to VME boards.
119  */
120 #define PORT_RESET              0x00    /* reset 82596 */
121 #define PORT_SELFTEST           0x01    /* selftest */
122 #define PORT_ALTSCP             0x02    /* alternate SCB address */
123 #define PORT_ALTDUMP            0x03    /* Alternate DUMP address */
124
125 static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
126
127 /* Copy frames shorter than rx_copybreak, otherwise pass on up in
128  * a full sized sk_buff.  Value of 100 stolen from tulip.c (!alpha).
129  */
130 static int rx_copybreak = 100;
131
132 #define PKT_BUF_SZ      1536
133 #define MAX_MC_CNT      64
134
135 #define ISCP_BUSY       0x0001
136
137 #define I596_NULL ((u32)0xffffffff)
138
139 #define CMD_EOL         0x8000  /* The last command of the list, stop. */
140 #define CMD_SUSP        0x4000  /* Suspend after doing cmd. */
141 #define CMD_INTR        0x2000  /* Interrupt after doing cmd. */
142
143 #define CMD_FLEX        0x0008  /* Enable flexible memory model */
144
145 enum commands {
146         CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
147         CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
148 };
149
150 #define STAT_C          0x8000  /* Set to 0 after execution */
151 #define STAT_B          0x4000  /* Command being executed */
152 #define STAT_OK         0x2000  /* Command executed ok */
153 #define STAT_A          0x1000  /* Command aborted */
154
155 #define  CUC_START      0x0100
156 #define  CUC_RESUME     0x0200
157 #define  CUC_SUSPEND    0x0300
158 #define  CUC_ABORT      0x0400
159 #define  RX_START       0x0010
160 #define  RX_RESUME      0x0020
161 #define  RX_SUSPEND     0x0030
162 #define  RX_ABORT       0x0040
163
164 #define TX_TIMEOUT      5
165
166
167 struct i596_reg {
168         unsigned short porthi;
169         unsigned short portlo;
170         u32            ca;
171 };
172
173 #define EOF             0x8000
174 #define SIZE_MASK       0x3fff
175
176 struct i596_tbd {
177         unsigned short size;
178         unsigned short pad;
179         dma_addr_t     next;
180         dma_addr_t     data;
181         u32 cache_pad[5];               /* Total 32 bytes... */
182 };
183
184 /* The command structure has two 'next' pointers; v_next is the address of
185  * the next command as seen by the CPU, b_next is the address of the next
186  * command as seen by the 82596.  The b_next pointer, as used by the 82596
187  * always references the status field of the next command, rather than the
188  * v_next field, because the 82596 is unaware of v_next.  It may seem more
189  * logical to put v_next at the end of the structure, but we cannot do that
190  * because the 82596 expects other fields to be there, depending on command
191  * type.
192  */
193
194 struct i596_cmd {
195         struct i596_cmd *v_next;        /* Address from CPUs viewpoint */
196         unsigned short status;
197         unsigned short command;
198         dma_addr_t     b_next;  /* Address from i596 viewpoint */
199 };
200
201 struct tx_cmd {
202         struct i596_cmd cmd;
203         dma_addr_t     tbd;
204         unsigned short size;
205         unsigned short pad;
206         struct sk_buff *skb;            /* So we can free it after tx */
207         dma_addr_t dma_addr;
208 #ifdef __LP64__
209         u32 cache_pad[6];               /* Total 64 bytes... */
210 #else
211         u32 cache_pad[1];               /* Total 32 bytes... */
212 #endif
213 };
214
215 struct tdr_cmd {
216         struct i596_cmd cmd;
217         unsigned short status;
218         unsigned short pad;
219 };
220
221 struct mc_cmd {
222         struct i596_cmd cmd;
223         short mc_cnt;
224         char mc_addrs[MAX_MC_CNT*6];
225 };
226
227 struct sa_cmd {
228         struct i596_cmd cmd;
229         char eth_addr[8];
230 };
231
232 struct cf_cmd {
233         struct i596_cmd cmd;
234         char i596_config[16];
235 };
236
237 struct i596_rfd {
238         unsigned short stat;
239         unsigned short cmd;
240         dma_addr_t     b_next;  /* Address from i596 viewpoint */
241         dma_addr_t     rbd;
242         unsigned short count;
243         unsigned short size;
244         struct i596_rfd *v_next;        /* Address from CPUs viewpoint */
245         struct i596_rfd *v_prev;
246 #ifndef __LP64__
247         u32 cache_pad[2];               /* Total 32 bytes... */
248 #endif
249 };
250
251 struct i596_rbd {
252     /* hardware data */
253     unsigned short count;
254     unsigned short zero1;
255     dma_addr_t     b_next;
256     dma_addr_t     b_data;              /* Address from i596 viewpoint */
257     unsigned short size;
258     unsigned short zero2;
259     /* driver data */
260     struct sk_buff *skb;
261     struct i596_rbd *v_next;
262     dma_addr_t     b_addr;              /* This rbd addr from i596 view */
263     unsigned char *v_data;              /* Address from CPUs viewpoint */
264                                         /* Total 32 bytes... */
265 #ifdef __LP64__
266     u32 cache_pad[4];
267 #endif
268 };
269
270 /* These values as chosen so struct i596_dma fits in one page... */
271
272 #define TX_RING_SIZE 32
273 #define RX_RING_SIZE 16
274
275 struct i596_scb {
276         unsigned short status;
277         unsigned short command;
278         dma_addr_t    cmd;
279         dma_addr_t    rfd;
280         u32           crc_err;
281         u32           align_err;
282         u32           resource_err;
283         u32           over_err;
284         u32           rcvdt_err;
285         u32           short_err;
286         unsigned short t_on;
287         unsigned short t_off;
288 };
289
290 struct i596_iscp {
291         u32           stat;
292         dma_addr_t    scb;
293 };
294
295 struct i596_scp {
296         u32           sysbus;
297         u32           pad;
298         dma_addr_t    iscp;
299 };
300
301 struct i596_dma {
302         struct i596_scp scp                     __attribute__((aligned(32)));
303         volatile struct i596_iscp iscp          __attribute__((aligned(32)));
304         volatile struct i596_scb scb            __attribute__((aligned(32)));
305         struct sa_cmd sa_cmd                    __attribute__((aligned(32)));
306         struct cf_cmd cf_cmd                    __attribute__((aligned(32)));
307         struct tdr_cmd tdr_cmd                  __attribute__((aligned(32)));
308         struct mc_cmd mc_cmd                    __attribute__((aligned(32)));
309         struct i596_rfd rfds[RX_RING_SIZE]      __attribute__((aligned(32)));
310         struct i596_rbd rbds[RX_RING_SIZE]      __attribute__((aligned(32)));
311         struct tx_cmd tx_cmds[TX_RING_SIZE]     __attribute__((aligned(32)));
312         struct i596_tbd tbds[TX_RING_SIZE]      __attribute__((aligned(32)));
313 };
314
315 struct i596_private {
316         struct i596_dma *dma;
317         u32    stat;
318         int last_restart;
319         struct i596_rfd *rfd_head;
320         struct i596_rbd *rbd_head;
321         struct i596_cmd *cmd_tail;
322         struct i596_cmd *cmd_head;
323         int cmd_backlog;
324         u32    last_cmd;
325         int next_tx_cmd;
326         int options;
327         spinlock_t lock;       /* serialize access to chip */
328         dma_addr_t dma_addr;
329         void __iomem *mpu_port;
330         void __iomem *ca;
331 };
332
333 static const char init_setup[] =
334 {
335         0x8E,           /* length, prefetch on */
336         0xC8,           /* fifo to 8, monitor off */
337         0x80,           /* don't save bad frames */
338         0x2E,           /* No source address insertion, 8 byte preamble */
339         0x00,           /* priority and backoff defaults */
340         0x60,           /* interframe spacing */
341         0x00,           /* slot time LSB */
342         0xf2,           /* slot time and retries */
343         0x00,           /* promiscuous mode */
344         0x00,           /* collision detect */
345         0x40,           /* minimum frame length */
346         0xff,
347         0x00,
348         0x7f /*  *multi IA */ };
349
350 static int i596_open(struct net_device *dev);
351 static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
352 static irqreturn_t i596_interrupt(int irq, void *dev_id);
353 static int i596_close(struct net_device *dev);
354 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
355 static void i596_tx_timeout (struct net_device *dev);
356 static void print_eth(unsigned char *buf, char *str);
357 static void set_multicast_list(struct net_device *dev);
358 static inline void ca(struct net_device *dev);
359 static void mpu_port(struct net_device *dev, int c, dma_addr_t x);
360
361 static int rx_ring_size = RX_RING_SIZE;
362 static int ticks_limit = 100;
363 static int max_cmd_backlog = TX_RING_SIZE-1;
364
365 #ifdef CONFIG_NET_POLL_CONTROLLER
366 static void i596_poll_controller(struct net_device *dev);
367 #endif
368
369
370 static inline int wait_istat(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
371 {
372         DMA_INV(dev, &(dma->iscp), sizeof(struct i596_iscp));
373         while (--delcnt && dma->iscp.stat) {
374                 udelay(10);
375                 DMA_INV(dev, &(dma->iscp), sizeof(struct i596_iscp));
376         }
377         if (!delcnt) {
378                 printk(KERN_ERR "%s: %s, iscp.stat %04x, didn't clear\n",
379                      dev->name, str, SWAP16(dma->iscp.stat));
380                 return -1;
381         } else
382                 return 0;
383 }
384
385
386 static inline int wait_cmd(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
387 {
388         DMA_INV(dev, &(dma->scb), sizeof(struct i596_scb));
389         while (--delcnt && dma->scb.command) {
390                 udelay(10);
391                 DMA_INV(dev, &(dma->scb), sizeof(struct i596_scb));
392         }
393         if (!delcnt) {
394                 printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
395                        dev->name, str,
396                        SWAP16(dma->scb.status),
397                        SWAP16(dma->scb.command));
398                 return -1;
399         } else
400                 return 0;
401 }
402
403
404 static void i596_display_data(struct net_device *dev)
405 {
406         struct i596_private *lp = netdev_priv(dev);
407         struct i596_dma *dma = lp->dma;
408         struct i596_cmd *cmd;
409         struct i596_rfd *rfd;
410         struct i596_rbd *rbd;
411
412         printk(KERN_DEBUG "lp and scp at %p, .sysbus = %08x, .iscp = %08x\n",
413                &dma->scp, dma->scp.sysbus, SWAP32(dma->scp.iscp));
414         printk(KERN_DEBUG "iscp at %p, iscp.stat = %08x, .scb = %08x\n",
415                &dma->iscp, SWAP32(dma->iscp.stat), SWAP32(dma->iscp.scb));
416         printk(KERN_DEBUG "scb at %p, scb.status = %04x, .command = %04x,"
417                 " .cmd = %08x, .rfd = %08x\n",
418                &dma->scb, SWAP16(dma->scb.status), SWAP16(dma->scb.command),
419                 SWAP16(dma->scb.cmd), SWAP32(dma->scb.rfd));
420         printk(KERN_DEBUG "   errors: crc %x, align %x, resource %x,"
421                " over %x, rcvdt %x, short %x\n",
422                SWAP32(dma->scb.crc_err), SWAP32(dma->scb.align_err),
423                SWAP32(dma->scb.resource_err), SWAP32(dma->scb.over_err),
424                SWAP32(dma->scb.rcvdt_err), SWAP32(dma->scb.short_err));
425         cmd = lp->cmd_head;
426         while (cmd != NULL) {
427                 printk(KERN_DEBUG
428                        "cmd at %p, .status = %04x, .command = %04x,"
429                        " .b_next = %08x\n",
430                        cmd, SWAP16(cmd->status), SWAP16(cmd->command),
431                        SWAP32(cmd->b_next));
432                 cmd = cmd->v_next;
433         }
434         rfd = lp->rfd_head;
435         printk(KERN_DEBUG "rfd_head = %p\n", rfd);
436         do {
437                 printk(KERN_DEBUG
438                        "   %p .stat %04x, .cmd %04x, b_next %08x, rbd %08x,"
439                        " count %04x\n",
440                        rfd, SWAP16(rfd->stat), SWAP16(rfd->cmd),
441                        SWAP32(rfd->b_next), SWAP32(rfd->rbd),
442                        SWAP16(rfd->count));
443                 rfd = rfd->v_next;
444         } while (rfd != lp->rfd_head);
445         rbd = lp->rbd_head;
446         printk(KERN_DEBUG "rbd_head = %p\n", rbd);
447         do {
448                 printk(KERN_DEBUG
449                        "   %p .count %04x, b_next %08x, b_data %08x,"
450                        " size %04x\n",
451                         rbd, SWAP16(rbd->count), SWAP32(rbd->b_next),
452                        SWAP32(rbd->b_data), SWAP16(rbd->size));
453                 rbd = rbd->v_next;
454         } while (rbd != lp->rbd_head);
455         DMA_INV(dev, dma, sizeof(struct i596_dma));
456 }
457
458
459 #define virt_to_dma(lp, v) ((lp)->dma_addr + (dma_addr_t)((unsigned long)(v)-(unsigned long)((lp)->dma)))
460
461 static inline int init_rx_bufs(struct net_device *dev)
462 {
463         struct i596_private *lp = netdev_priv(dev);
464         struct i596_dma *dma = lp->dma;
465         int i;
466         struct i596_rfd *rfd;
467         struct i596_rbd *rbd;
468
469         /* First build the Receive Buffer Descriptor List */
470
471         for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) {
472                 dma_addr_t dma_addr;
473                 struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ + 4);
474
475                 if (skb == NULL)
476                         return -1;
477                 skb_reserve(skb, 2);
478                 dma_addr = dma_map_single(dev->dev.parent, skb->data,
479                                           PKT_BUF_SZ, DMA_FROM_DEVICE);
480                 rbd->v_next = rbd+1;
481                 rbd->b_next = SWAP32(virt_to_dma(lp, rbd+1));
482                 rbd->b_addr = SWAP32(virt_to_dma(lp, rbd));
483                 rbd->skb = skb;
484                 rbd->v_data = skb->data;
485                 rbd->b_data = SWAP32(dma_addr);
486                 rbd->size = SWAP16(PKT_BUF_SZ);
487         }
488         lp->rbd_head = dma->rbds;
489         rbd = dma->rbds + rx_ring_size - 1;
490         rbd->v_next = dma->rbds;
491         rbd->b_next = SWAP32(virt_to_dma(lp, dma->rbds));
492
493         /* Now build the Receive Frame Descriptor List */
494
495         for (i = 0, rfd = dma->rfds; i < rx_ring_size; i++, rfd++) {
496                 rfd->rbd = I596_NULL;
497                 rfd->v_next = rfd+1;
498                 rfd->v_prev = rfd-1;
499                 rfd->b_next = SWAP32(virt_to_dma(lp, rfd+1));
500                 rfd->cmd = SWAP16(CMD_FLEX);
501         }
502         lp->rfd_head = dma->rfds;
503         dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
504         rfd = dma->rfds;
505         rfd->rbd = SWAP32(virt_to_dma(lp, lp->rbd_head));
506         rfd->v_prev = dma->rfds + rx_ring_size - 1;
507         rfd = dma->rfds + rx_ring_size - 1;
508         rfd->v_next = dma->rfds;
509         rfd->b_next = SWAP32(virt_to_dma(lp, dma->rfds));
510         rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
511
512         DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
513         return 0;
514 }
515
516 static inline void remove_rx_bufs(struct net_device *dev)
517 {
518         struct i596_private *lp = netdev_priv(dev);
519         struct i596_rbd *rbd;
520         int i;
521
522         for (i = 0, rbd = lp->dma->rbds; i < rx_ring_size; i++, rbd++) {
523                 if (rbd->skb == NULL)
524                         break;
525                 dma_unmap_single(dev->dev.parent,
526                                  (dma_addr_t)SWAP32(rbd->b_data),
527                                  PKT_BUF_SZ, DMA_FROM_DEVICE);
528                 dev_kfree_skb(rbd->skb);
529         }
530 }
531
532
533 static void rebuild_rx_bufs(struct net_device *dev)
534 {
535         struct i596_private *lp = netdev_priv(dev);
536         struct i596_dma *dma = lp->dma;
537         int i;
538
539         /* Ensure rx frame/buffer descriptors are tidy */
540
541         for (i = 0; i < rx_ring_size; i++) {
542                 dma->rfds[i].rbd = I596_NULL;
543                 dma->rfds[i].cmd = SWAP16(CMD_FLEX);
544         }
545         dma->rfds[rx_ring_size-1].cmd = SWAP16(CMD_EOL|CMD_FLEX);
546         lp->rfd_head = dma->rfds;
547         dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
548         lp->rbd_head = dma->rbds;
549         dma->rfds[0].rbd = SWAP32(virt_to_dma(lp, dma->rbds));
550
551         DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
552 }
553
554
555 static int init_i596_mem(struct net_device *dev)
556 {
557         struct i596_private *lp = netdev_priv(dev);
558         struct i596_dma *dma = lp->dma;
559         unsigned long flags;
560
561         mpu_port(dev, PORT_RESET, 0);
562         udelay(100);                    /* Wait 100us - seems to help */
563
564         /* change the scp address */
565
566         lp->last_cmd = jiffies;
567
568         dma->scp.sysbus = SYSBUS;
569         dma->scp.iscp = SWAP32(virt_to_dma(lp, &(dma->iscp)));
570         dma->iscp.scb = SWAP32(virt_to_dma(lp, &(dma->scb)));
571         dma->iscp.stat = SWAP32(ISCP_BUSY);
572         lp->cmd_backlog = 0;
573
574         lp->cmd_head = NULL;
575         dma->scb.cmd = I596_NULL;
576
577         DEB(DEB_INIT, printk(KERN_DEBUG "%s: starting i82596.\n", dev->name));
578
579         DMA_WBACK(dev, &(dma->scp), sizeof(struct i596_scp));
580         DMA_WBACK(dev, &(dma->iscp), sizeof(struct i596_iscp));
581         DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
582
583         mpu_port(dev, PORT_ALTSCP, virt_to_dma(lp, &dma->scp));
584         ca(dev);
585         if (wait_istat(dev, dma, 1000, "initialization timed out"))
586                 goto failed;
587         DEB(DEB_INIT, printk(KERN_DEBUG
588                              "%s: i82596 initialization successful\n",
589                              dev->name));
590
591         if (request_irq(dev->irq, &i596_interrupt, 0, "i82596", dev)) {
592                 printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
593                 goto failed;
594         }
595
596         /* Ensure rx frame/buffer descriptors are tidy */
597         rebuild_rx_bufs(dev);
598
599         dma->scb.command = 0;
600         DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
601
602         DEB(DEB_INIT, printk(KERN_DEBUG
603                              "%s: queuing CmdConfigure\n", dev->name));
604         memcpy(dma->cf_cmd.i596_config, init_setup, 14);
605         dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
606         DMA_WBACK(dev, &(dma->cf_cmd), sizeof(struct cf_cmd));
607         i596_add_cmd(dev, &dma->cf_cmd.cmd);
608
609         DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
610         memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, 6);
611         dma->sa_cmd.cmd.command = SWAP16(CmdSASetup);
612         DMA_WBACK(dev, &(dma->sa_cmd), sizeof(struct sa_cmd));
613         i596_add_cmd(dev, &dma->sa_cmd.cmd);
614
615         DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name));
616         dma->tdr_cmd.cmd.command = SWAP16(CmdTDR);
617         DMA_WBACK(dev, &(dma->tdr_cmd), sizeof(struct tdr_cmd));
618         i596_add_cmd(dev, &dma->tdr_cmd.cmd);
619
620         spin_lock_irqsave (&lp->lock, flags);
621
622         if (wait_cmd(dev, dma, 1000, "timed out waiting to issue RX_START")) {
623                 spin_unlock_irqrestore (&lp->lock, flags);
624                 goto failed_free_irq;
625         }
626         DEB(DEB_INIT, printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name));
627         dma->scb.command = SWAP16(RX_START);
628         dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
629         DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
630
631         ca(dev);
632
633         spin_unlock_irqrestore (&lp->lock, flags);
634         if (wait_cmd(dev, dma, 1000, "RX_START not processed"))
635                 goto failed_free_irq;
636         DEB(DEB_INIT, printk(KERN_DEBUG
637                              "%s: Receive unit started OK\n", dev->name));
638         return 0;
639
640 failed_free_irq:
641         free_irq(dev->irq, dev);
642 failed:
643         printk(KERN_ERR "%s: Failed to initialise 82596\n", dev->name);
644         mpu_port(dev, PORT_RESET, 0);
645         return -1;
646 }
647
648
649 static inline int i596_rx(struct net_device *dev)
650 {
651         struct i596_private *lp = netdev_priv(dev);
652         struct i596_rfd *rfd;
653         struct i596_rbd *rbd;
654         int frames = 0;
655
656         DEB(DEB_RXFRAME, printk(KERN_DEBUG
657                                 "i596_rx(), rfd_head %p, rbd_head %p\n",
658                                 lp->rfd_head, lp->rbd_head));
659
660
661         rfd = lp->rfd_head;             /* Ref next frame to check */
662
663         DMA_INV(dev, rfd, sizeof(struct i596_rfd));
664         while (rfd->stat & SWAP16(STAT_C)) {    /* Loop while complete frames */
665                 if (rfd->rbd == I596_NULL)
666                         rbd = NULL;
667                 else if (rfd->rbd == lp->rbd_head->b_addr) {
668                         rbd = lp->rbd_head;
669                         DMA_INV(dev, rbd, sizeof(struct i596_rbd));
670                 } else {
671                         printk(KERN_ERR "%s: rbd chain broken!\n", dev->name);
672                         /* XXX Now what? */
673                         rbd = NULL;
674                 }
675                 DEB(DEB_RXFRAME, printk(KERN_DEBUG
676                                       "  rfd %p, rfd.rbd %08x, rfd.stat %04x\n",
677                                       rfd, rfd->rbd, rfd->stat));
678
679                 if (rbd != NULL && (rfd->stat & SWAP16(STAT_OK))) {
680                         /* a good frame */
681                         int pkt_len = SWAP16(rbd->count) & 0x3fff;
682                         struct sk_buff *skb = rbd->skb;
683                         int rx_in_place = 0;
684
685                         DEB(DEB_RXADDR, print_eth(rbd->v_data, "received"));
686                         frames++;
687
688                         /* Check if the packet is long enough to just accept
689                          * without copying to a properly sized skbuff.
690                          */
691
692                         if (pkt_len > rx_copybreak) {
693                                 struct sk_buff *newskb;
694                                 dma_addr_t dma_addr;
695
696                                 dma_unmap_single(dev->dev.parent,
697                                                  (dma_addr_t)SWAP32(rbd->b_data),
698                                                  PKT_BUF_SZ, DMA_FROM_DEVICE);
699                                 /* Get fresh skbuff to replace filled one. */
700                                 newskb = netdev_alloc_skb(dev, PKT_BUF_SZ + 4);
701                                 if (newskb == NULL) {
702                                         skb = NULL;     /* drop pkt */
703                                         goto memory_squeeze;
704                                 }
705                                 skb_reserve(newskb, 2);
706
707                                 /* Pass up the skb already on the Rx ring. */
708                                 skb_put(skb, pkt_len);
709                                 rx_in_place = 1;
710                                 rbd->skb = newskb;
711                                 dma_addr = dma_map_single(dev->dev.parent,
712                                                           newskb->data,
713                                                           PKT_BUF_SZ,
714                                                           DMA_FROM_DEVICE);
715                                 rbd->v_data = newskb->data;
716                                 rbd->b_data = SWAP32(dma_addr);
717                                 DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
718                         } else
719                                 skb = netdev_alloc_skb(dev, pkt_len + 2);
720 memory_squeeze:
721                         if (skb == NULL) {
722                                 /* XXX tulip.c can defer packets here!! */
723                                 printk(KERN_ERR
724                                        "%s: i596_rx Memory squeeze, dropping packet.\n",
725                                        dev->name);
726                                 dev->stats.rx_dropped++;
727                         } else {
728                                 if (!rx_in_place) {
729                                         /* 16 byte align the data fields */
730                                         dma_sync_single_for_cpu(dev->dev.parent,
731                                                                 (dma_addr_t)SWAP32(rbd->b_data),
732                                                                 PKT_BUF_SZ, DMA_FROM_DEVICE);
733                                         skb_reserve(skb, 2);
734                                         memcpy(skb_put(skb, pkt_len), rbd->v_data, pkt_len);
735                                         dma_sync_single_for_device(dev->dev.parent,
736                                                                    (dma_addr_t)SWAP32(rbd->b_data),
737                                                                    PKT_BUF_SZ, DMA_FROM_DEVICE);
738                                 }
739                                 skb->len = pkt_len;
740                                 skb->protocol = eth_type_trans(skb, dev);
741                                 netif_rx(skb);
742                                 dev->last_rx = jiffies;
743                                 dev->stats.rx_packets++;
744                                 dev->stats.rx_bytes += pkt_len;
745                         }
746                 } else {
747                         DEB(DEB_ERRORS, printk(KERN_DEBUG
748                                                "%s: Error, rfd.stat = 0x%04x\n",
749                                                dev->name, rfd->stat));
750                         dev->stats.rx_errors++;
751                         if (rfd->stat & SWAP16(0x0100))
752                                 dev->stats.collisions++;
753                         if (rfd->stat & SWAP16(0x8000))
754                                 dev->stats.rx_length_errors++;
755                         if (rfd->stat & SWAP16(0x0001))
756                                 dev->stats.rx_over_errors++;
757                         if (rfd->stat & SWAP16(0x0002))
758                                 dev->stats.rx_fifo_errors++;
759                         if (rfd->stat & SWAP16(0x0004))
760                                 dev->stats.rx_frame_errors++;
761                         if (rfd->stat & SWAP16(0x0008))
762                                 dev->stats.rx_crc_errors++;
763                         if (rfd->stat & SWAP16(0x0010))
764                                 dev->stats.rx_length_errors++;
765                 }
766
767                 /* Clear the buffer descriptor count and EOF + F flags */
768
769                 if (rbd != NULL && (rbd->count & SWAP16(0x4000))) {
770                         rbd->count = 0;
771                         lp->rbd_head = rbd->v_next;
772                         DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
773                 }
774
775                 /* Tidy the frame descriptor, marking it as end of list */
776
777                 rfd->rbd = I596_NULL;
778                 rfd->stat = 0;
779                 rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
780                 rfd->count = 0;
781
782                 /* Update record of next frame descriptor to process */
783
784                 lp->dma->scb.rfd = rfd->b_next;
785                 lp->rfd_head = rfd->v_next;
786                 DMA_WBACK_INV(dev, rfd, sizeof(struct i596_rfd));
787
788                 /* Remove end-of-list from old end descriptor */
789
790                 rfd->v_prev->cmd = SWAP16(CMD_FLEX);
791                 DMA_WBACK_INV(dev, rfd->v_prev, sizeof(struct i596_rfd));
792                 rfd = lp->rfd_head;
793                 DMA_INV(dev, rfd, sizeof(struct i596_rfd));
794         }
795
796         DEB(DEB_RXFRAME, printk(KERN_DEBUG "frames %d\n", frames));
797
798         return 0;
799 }
800
801
802 static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
803 {
804         struct i596_cmd *ptr;
805
806         while (lp->cmd_head != NULL) {
807                 ptr = lp->cmd_head;
808                 lp->cmd_head = ptr->v_next;
809                 lp->cmd_backlog--;
810
811                 switch (SWAP16(ptr->command) & 0x7) {
812                 case CmdTx:
813                         {
814                                 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
815                                 struct sk_buff *skb = tx_cmd->skb;
816                                 dma_unmap_single(dev->dev.parent,
817                                                  tx_cmd->dma_addr,
818                                                  skb->len, DMA_TO_DEVICE);
819
820                                 dev_kfree_skb(skb);
821
822                                 dev->stats.tx_errors++;
823                                 dev->stats.tx_aborted_errors++;
824
825                                 ptr->v_next = NULL;
826                                 ptr->b_next = I596_NULL;
827                                 tx_cmd->cmd.command = 0;  /* Mark as free */
828                                 break;
829                         }
830                 default:
831                         ptr->v_next = NULL;
832                         ptr->b_next = I596_NULL;
833                 }
834                 DMA_WBACK_INV(dev, ptr, sizeof(struct i596_cmd));
835         }
836
837         wait_cmd(dev, lp->dma, 100, "i596_cleanup_cmd timed out");
838         lp->dma->scb.cmd = I596_NULL;
839         DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb));
840 }
841
842
843 static inline void i596_reset(struct net_device *dev, struct i596_private *lp)
844 {
845         unsigned long flags;
846
847         DEB(DEB_RESET, printk(KERN_DEBUG "i596_reset\n"));
848
849         spin_lock_irqsave (&lp->lock, flags);
850
851         wait_cmd(dev, lp->dma, 100, "i596_reset timed out");
852
853         netif_stop_queue(dev);
854
855         /* FIXME: this command might cause an lpmc */
856         lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
857         DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb));
858         ca(dev);
859
860         /* wait for shutdown */
861         wait_cmd(dev, lp->dma, 1000, "i596_reset 2 timed out");
862         spin_unlock_irqrestore (&lp->lock, flags);
863
864         i596_cleanup_cmd(dev, lp);
865         i596_rx(dev);
866
867         netif_start_queue(dev);
868         init_i596_mem(dev);
869 }
870
871
872 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
873 {
874         struct i596_private *lp = netdev_priv(dev);
875         struct i596_dma *dma = lp->dma;
876         unsigned long flags;
877
878         DEB(DEB_ADDCMD, printk(KERN_DEBUG "i596_add_cmd cmd_head %p\n",
879                                lp->cmd_head));
880
881         cmd->status = 0;
882         cmd->command |= SWAP16(CMD_EOL | CMD_INTR);
883         cmd->v_next = NULL;
884         cmd->b_next = I596_NULL;
885         DMA_WBACK(dev, cmd, sizeof(struct i596_cmd));
886
887         spin_lock_irqsave (&lp->lock, flags);
888
889         if (lp->cmd_head != NULL) {
890                 lp->cmd_tail->v_next = cmd;
891                 lp->cmd_tail->b_next = SWAP32(virt_to_dma(lp, &cmd->status));
892                 DMA_WBACK(dev, lp->cmd_tail, sizeof(struct i596_cmd));
893         } else {
894                 lp->cmd_head = cmd;
895                 wait_cmd(dev, dma, 100, "i596_add_cmd timed out");
896                 dma->scb.cmd = SWAP32(virt_to_dma(lp, &cmd->status));
897                 dma->scb.command = SWAP16(CUC_START);
898                 DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
899                 ca(dev);
900         }
901         lp->cmd_tail = cmd;
902         lp->cmd_backlog++;
903
904         spin_unlock_irqrestore (&lp->lock, flags);
905
906         if (lp->cmd_backlog > max_cmd_backlog) {
907                 unsigned long tickssofar = jiffies - lp->last_cmd;
908
909                 if (tickssofar < ticks_limit)
910                         return;
911
912                 printk(KERN_ERR
913                        "%s: command unit timed out, status resetting.\n",
914                        dev->name);
915 #if 1
916                 i596_reset(dev, lp);
917 #endif
918         }
919 }
920
921 static int i596_open(struct net_device *dev)
922 {
923         DEB(DEB_OPEN, printk(KERN_DEBUG
924                              "%s: i596_open() irq %d.\n", dev->name, dev->irq));
925
926         if (init_rx_bufs(dev)) {
927                 printk(KERN_ERR "%s: Failed to init rx bufs\n", dev->name);
928                 return -EAGAIN;
929         }
930         if (init_i596_mem(dev)) {
931                 printk(KERN_ERR "%s: Failed to init memory\n", dev->name);
932                 goto out_remove_rx_bufs;
933         }
934         netif_start_queue(dev);
935
936         return 0;
937
938 out_remove_rx_bufs:
939         remove_rx_bufs(dev);
940         return -EAGAIN;
941 }
942
943 static void i596_tx_timeout (struct net_device *dev)
944 {
945         struct i596_private *lp = netdev_priv(dev);
946
947         /* Transmitter timeout, serious problems. */
948         DEB(DEB_ERRORS, printk(KERN_DEBUG
949                                "%s: transmit timed out, status resetting.\n",
950                                dev->name));
951
952         dev->stats.tx_errors++;
953
954         /* Try to restart the adaptor */
955         if (lp->last_restart == dev->stats.tx_packets) {
956                 DEB(DEB_ERRORS, printk(KERN_DEBUG "Resetting board.\n"));
957                 /* Shutdown and restart */
958                 i596_reset (dev, lp);
959         } else {
960                 /* Issue a channel attention signal */
961                 DEB(DEB_ERRORS, printk(KERN_DEBUG "Kicking board.\n"));
962                 lp->dma->scb.command = SWAP16(CUC_START | RX_START);
963                 DMA_WBACK_INV(dev, &(lp->dma->scb), sizeof(struct i596_scb));
964                 ca (dev);
965                 lp->last_restart = dev->stats.tx_packets;
966         }
967
968         dev->trans_start = jiffies;
969         netif_wake_queue (dev);
970 }
971
972
973 static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
974 {
975         struct i596_private *lp = netdev_priv(dev);
976         struct tx_cmd *tx_cmd;
977         struct i596_tbd *tbd;
978         short length = skb->len;
979         dev->trans_start = jiffies;
980
981         DEB(DEB_STARTTX, printk(KERN_DEBUG
982                                 "%s: i596_start_xmit(%x,%p) called\n",
983                                 dev->name, skb->len, skb->data));
984
985         if (length < ETH_ZLEN) {
986                 if (skb_padto(skb, ETH_ZLEN))
987                         return 0;
988                 length = ETH_ZLEN;
989         }
990
991         netif_stop_queue(dev);
992
993         tx_cmd = lp->dma->tx_cmds + lp->next_tx_cmd;
994         tbd = lp->dma->tbds + lp->next_tx_cmd;
995
996         if (tx_cmd->cmd.command) {
997                 DEB(DEB_ERRORS, printk(KERN_DEBUG
998                                        "%s: xmit ring full, dropping packet.\n",
999                                        dev->name));
1000                 dev->stats.tx_dropped++;
1001
1002                 dev_kfree_skb(skb);
1003         } else {
1004                 if (++lp->next_tx_cmd == TX_RING_SIZE)
1005                         lp->next_tx_cmd = 0;
1006                 tx_cmd->tbd = SWAP32(virt_to_dma(lp, tbd));
1007                 tbd->next = I596_NULL;
1008
1009                 tx_cmd->cmd.command = SWAP16(CMD_FLEX | CmdTx);
1010                 tx_cmd->skb = skb;
1011
1012                 tx_cmd->pad = 0;
1013                 tx_cmd->size = 0;
1014                 tbd->pad = 0;
1015                 tbd->size = SWAP16(EOF | length);
1016
1017                 tx_cmd->dma_addr = dma_map_single(dev->dev.parent, skb->data,
1018                                                   skb->len, DMA_TO_DEVICE);
1019                 tbd->data = SWAP32(tx_cmd->dma_addr);
1020
1021                 DEB(DEB_TXADDR, print_eth(skb->data, "tx-queued"));
1022                 DMA_WBACK_INV(dev, tx_cmd, sizeof(struct tx_cmd));
1023                 DMA_WBACK_INV(dev, tbd, sizeof(struct i596_tbd));
1024                 i596_add_cmd(dev, &tx_cmd->cmd);
1025
1026                 dev->stats.tx_packets++;
1027                 dev->stats.tx_bytes += length;
1028         }
1029
1030         netif_start_queue(dev);
1031
1032         return 0;
1033 }
1034
1035 static void print_eth(unsigned char *add, char *str)
1036 {
1037         DECLARE_MAC_BUF(mac);
1038         DECLARE_MAC_BUF(mac2);
1039
1040         printk(KERN_DEBUG "i596 0x%p, %s --> %s %02X%02X, %s\n",
1041                add, print_mac(mac, add + 6), print_mac(mac2, add),
1042                add[12], add[13], str);
1043 }
1044
1045 static int __devinit i82596_probe(struct net_device *dev)
1046 {
1047         int i;
1048         struct i596_private *lp = netdev_priv(dev);
1049         struct i596_dma *dma;
1050
1051         /* This lot is ensure things have been cache line aligned. */
1052         BUILD_BUG_ON(sizeof(struct i596_rfd) != 32);
1053         BUILD_BUG_ON(sizeof(struct i596_rbd) &  31);
1054         BUILD_BUG_ON(sizeof(struct tx_cmd)   &  31);
1055         BUILD_BUG_ON(sizeof(struct i596_tbd) != 32);
1056 #ifndef __LP64__
1057         BUILD_BUG_ON(sizeof(struct i596_dma) > 4096);
1058 #endif
1059
1060         if (!dev->base_addr || !dev->irq)
1061                 return -ENODEV;
1062
1063         dma = (struct i596_dma *) DMA_ALLOC(dev->dev.parent,
1064                 sizeof(struct i596_dma), &lp->dma_addr, GFP_KERNEL);
1065         if (!dma) {
1066                 printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__);
1067                 return -ENOMEM;
1068         }
1069
1070         /* The 82596-specific entries in the device structure. */
1071         dev->open = i596_open;
1072         dev->stop = i596_close;
1073         dev->hard_start_xmit = i596_start_xmit;
1074         dev->set_multicast_list = set_multicast_list;
1075         dev->tx_timeout = i596_tx_timeout;
1076         dev->watchdog_timeo = TX_TIMEOUT;
1077 #ifdef CONFIG_NET_POLL_CONTROLLER
1078         dev->poll_controller = i596_poll_controller;
1079 #endif
1080
1081         memset(dma, 0, sizeof(struct i596_dma));
1082         lp->dma = dma;
1083
1084         dma->scb.command = 0;
1085         dma->scb.cmd = I596_NULL;
1086         dma->scb.rfd = I596_NULL;
1087         spin_lock_init(&lp->lock);
1088
1089         DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
1090
1091         i = register_netdev(dev);
1092         if (i) {
1093                 DMA_FREE(dev->dev.parent, sizeof(struct i596_dma),
1094                                     (void *)dma, lp->dma_addr);
1095                 return i;
1096         };
1097
1098         DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx,",
1099                               dev->name, dev->base_addr));
1100         for (i = 0; i < 6; i++)
1101                 DEB(DEB_PROBE, printk(" %2.2X", dev->dev_addr[i]));
1102         DEB(DEB_PROBE, printk(" IRQ %d.\n", dev->irq));
1103         DEB(DEB_INIT, printk(KERN_INFO
1104                              "%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n",
1105                              dev->name, dma, (int)sizeof(struct i596_dma),
1106                              &dma->scb));
1107
1108         return 0;
1109 }
1110
1111 #ifdef CONFIG_NET_POLL_CONTROLLER
1112 static void i596_poll_controller(struct net_device *dev)
1113 {
1114         disable_irq(dev->irq);
1115         i596_interrupt(dev->irq, dev);
1116         enable_irq(dev->irq);
1117 }
1118 #endif
1119
1120 static irqreturn_t i596_interrupt(int irq, void *dev_id)
1121 {
1122         struct net_device *dev = dev_id;
1123         struct i596_private *lp;
1124         struct i596_dma *dma;
1125         unsigned short status, ack_cmd = 0;
1126
1127         if (dev == NULL) {
1128                 printk(KERN_WARNING "%s: irq %d for unknown device.\n",
1129                        __FUNCTION__, irq);
1130                 return IRQ_NONE;
1131         }
1132
1133         lp = netdev_priv(dev);
1134         dma = lp->dma;
1135
1136         spin_lock (&lp->lock);
1137
1138         wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
1139         status = SWAP16(dma->scb.status);
1140
1141         DEB(DEB_INTS, printk(KERN_DEBUG
1142                              "%s: i596 interrupt, IRQ %d, status %4.4x.\n",
1143                         dev->name, irq, status));
1144
1145         ack_cmd = status & 0xf000;
1146
1147         if (!ack_cmd) {
1148                 DEB(DEB_ERRORS, printk(KERN_DEBUG
1149                                        "%s: interrupt with no events\n",
1150                                        dev->name));
1151                 spin_unlock (&lp->lock);
1152                 return IRQ_NONE;
1153         }
1154
1155         if ((status & 0x8000) || (status & 0x2000)) {
1156                 struct i596_cmd *ptr;
1157
1158                 if ((status & 0x8000))
1159                         DEB(DEB_INTS,
1160                             printk(KERN_DEBUG
1161                                    "%s: i596 interrupt completed command.\n",
1162                                    dev->name));
1163                 if ((status & 0x2000))
1164                         DEB(DEB_INTS,
1165                             printk(KERN_DEBUG
1166                                    "%s: i596 interrupt command unit inactive %x.\n",
1167                                    dev->name, status & 0x0700));
1168
1169                 while (lp->cmd_head != NULL) {
1170                         DMA_INV(dev, lp->cmd_head, sizeof(struct i596_cmd));
1171                         if (!(lp->cmd_head->status & SWAP16(STAT_C)))
1172                                 break;
1173
1174                         ptr = lp->cmd_head;
1175
1176                         DEB(DEB_STATUS,
1177                             printk(KERN_DEBUG
1178                                    "cmd_head->status = %04x, ->command = %04x\n",
1179                                    SWAP16(lp->cmd_head->status),
1180                                    SWAP16(lp->cmd_head->command)));
1181                         lp->cmd_head = ptr->v_next;
1182                         lp->cmd_backlog--;
1183
1184                         switch (SWAP16(ptr->command) & 0x7) {
1185                         case CmdTx:
1186                             {
1187                                 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
1188                                 struct sk_buff *skb = tx_cmd->skb;
1189
1190                                 if (ptr->status & SWAP16(STAT_OK)) {
1191                                         DEB(DEB_TXADDR,
1192                                             print_eth(skb->data, "tx-done"));
1193                                 } else {
1194                                         dev->stats.tx_errors++;
1195                                         if (ptr->status & SWAP16(0x0020))
1196                                                 dev->stats.collisions++;
1197                                         if (!(ptr->status & SWAP16(0x0040)))
1198                                                 dev->stats.tx_heartbeat_errors++;
1199                                         if (ptr->status & SWAP16(0x0400))
1200                                                 dev->stats.tx_carrier_errors++;
1201                                         if (ptr->status & SWAP16(0x0800))
1202                                                 dev->stats.collisions++;
1203                                         if (ptr->status & SWAP16(0x1000))
1204                                                 dev->stats.tx_aborted_errors++;
1205                                 }
1206                                 dma_unmap_single(dev->dev.parent,
1207                                                  tx_cmd->dma_addr,
1208                                                  skb->len, DMA_TO_DEVICE);
1209                                 dev_kfree_skb_irq(skb);
1210
1211                                 tx_cmd->cmd.command = 0; /* Mark free */
1212                                 break;
1213                             }
1214                         case CmdTDR:
1215                             {
1216                                 unsigned short status = SWAP16(((struct tdr_cmd *)ptr)->status);
1217
1218                                 if (status & 0x8000) {
1219                                         DEB(DEB_ANY,
1220                                             printk(KERN_DEBUG "%s: link ok.\n",
1221                                                    dev->name));
1222                                 } else {
1223                                         if (status & 0x4000)
1224                                                 printk(KERN_ERR
1225                                                        "%s: Transceiver problem.\n",
1226                                                        dev->name);
1227                                         if (status & 0x2000)
1228                                                 printk(KERN_ERR
1229                                                        "%s: Termination problem.\n",
1230                                                        dev->name);
1231                                         if (status & 0x1000)
1232                                                 printk(KERN_ERR
1233                                                        "%s: Short circuit.\n",
1234                                                        dev->name);
1235
1236                                         DEB(DEB_TDR,
1237                                             printk(KERN_DEBUG "%s: Time %d.\n",
1238                                                    dev->name, status & 0x07ff));
1239                                 }
1240                                 break;
1241                             }
1242                         case CmdConfigure:
1243                                 /*
1244                                  * Zap command so set_multicast_list() know
1245                                  * it is free
1246                                  */
1247                                 ptr->command = 0;
1248                                 break;
1249                         }
1250                         ptr->v_next = NULL;
1251                         ptr->b_next = I596_NULL;
1252                         DMA_WBACK(dev, ptr, sizeof(struct i596_cmd));
1253                         lp->last_cmd = jiffies;
1254                 }
1255
1256                 /* This mess is arranging that only the last of any outstanding
1257                  * commands has the interrupt bit set.  Should probably really
1258                  * only add to the cmd queue when the CU is stopped.
1259                  */
1260                 ptr = lp->cmd_head;
1261                 while ((ptr != NULL) && (ptr != lp->cmd_tail)) {
1262                         struct i596_cmd *prev = ptr;
1263
1264                         ptr->command &= SWAP16(0x1fff);
1265                         ptr = ptr->v_next;
1266                         DMA_WBACK_INV(dev, prev, sizeof(struct i596_cmd));
1267                 }
1268
1269                 if (lp->cmd_head != NULL)
1270                         ack_cmd |= CUC_START;
1271                 dma->scb.cmd = SWAP32(virt_to_dma(lp, &lp->cmd_head->status));
1272                 DMA_WBACK_INV(dev, &dma->scb, sizeof(struct i596_scb));
1273         }
1274         if ((status & 0x1000) || (status & 0x4000)) {
1275                 if ((status & 0x4000))
1276                         DEB(DEB_INTS,
1277                             printk(KERN_DEBUG
1278                                    "%s: i596 interrupt received a frame.\n",
1279                                    dev->name));
1280                 i596_rx(dev);
1281                 /* Only RX_START if stopped - RGH 07-07-96 */
1282                 if (status & 0x1000) {
1283                         if (netif_running(dev)) {
1284                                 DEB(DEB_ERRORS,
1285                                     printk(KERN_DEBUG
1286                                            "%s: i596 interrupt receive unit inactive, status 0x%x\n",
1287                                            dev->name, status));
1288                                 ack_cmd |= RX_START;
1289                                 dev->stats.rx_errors++;
1290                                 dev->stats.rx_fifo_errors++;
1291                                 rebuild_rx_bufs(dev);
1292                         }
1293                 }
1294         }
1295         wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
1296         dma->scb.command = SWAP16(ack_cmd);
1297         DMA_WBACK(dev, &dma->scb, sizeof(struct i596_scb));
1298
1299         /* DANGER: I suspect that some kind of interrupt
1300          acknowledgement aside from acking the 82596 might be needed
1301          here...  but it's running acceptably without */
1302
1303         ca(dev);
1304
1305         wait_cmd(dev, dma, 100, "i596 interrupt, exit timeout");
1306         DEB(DEB_INTS, printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name));
1307
1308         spin_unlock (&lp->lock);
1309         return IRQ_HANDLED;
1310 }
1311
1312 static int i596_close(struct net_device *dev)
1313 {
1314         struct i596_private *lp = netdev_priv(dev);
1315         unsigned long flags;
1316
1317         netif_stop_queue(dev);
1318
1319         DEB(DEB_INIT,
1320             printk(KERN_DEBUG
1321                    "%s: Shutting down ethercard, status was %4.4x.\n",
1322                    dev->name, SWAP16(lp->dma->scb.status)));
1323
1324         spin_lock_irqsave(&lp->lock, flags);
1325
1326         wait_cmd(dev, lp->dma, 100, "close1 timed out");
1327         lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
1328         DMA_WBACK(dev, &lp->dma->scb, sizeof(struct i596_scb));
1329
1330         ca(dev);
1331
1332         wait_cmd(dev, lp->dma, 100, "close2 timed out");
1333         spin_unlock_irqrestore(&lp->lock, flags);
1334         DEB(DEB_STRUCT, i596_display_data(dev));
1335         i596_cleanup_cmd(dev, lp);
1336
1337         free_irq(dev->irq, dev);
1338         remove_rx_bufs(dev);
1339
1340         return 0;
1341 }
1342
1343 /*
1344  *    Set or clear the multicast filter for this adaptor.
1345  */
1346
1347 static void set_multicast_list(struct net_device *dev)
1348 {
1349         struct i596_private *lp = netdev_priv(dev);
1350         struct i596_dma *dma = lp->dma;
1351         int config = 0, cnt;
1352         DECLARE_MAC_BUF(mac);
1353
1354         DEB(DEB_MULTI,
1355             printk(KERN_DEBUG
1356                    "%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
1357                    dev->name, dev->mc_count,
1358                    dev->flags & IFF_PROMISC ? "ON" : "OFF",
1359                    dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
1360
1361         if ((dev->flags & IFF_PROMISC) &&
1362             !(dma->cf_cmd.i596_config[8] & 0x01)) {
1363                 dma->cf_cmd.i596_config[8] |= 0x01;
1364                 config = 1;
1365         }
1366         if (!(dev->flags & IFF_PROMISC) &&
1367             (dma->cf_cmd.i596_config[8] & 0x01)) {
1368                 dma->cf_cmd.i596_config[8] &= ~0x01;
1369                 config = 1;
1370         }
1371         if ((dev->flags & IFF_ALLMULTI) &&
1372             (dma->cf_cmd.i596_config[11] & 0x20)) {
1373                 dma->cf_cmd.i596_config[11] &= ~0x20;
1374                 config = 1;
1375         }
1376         if (!(dev->flags & IFF_ALLMULTI) &&
1377             !(dma->cf_cmd.i596_config[11] & 0x20)) {
1378                 dma->cf_cmd.i596_config[11] |= 0x20;
1379                 config = 1;
1380         }
1381         if (config) {
1382                 if (dma->cf_cmd.cmd.command)
1383                         printk(KERN_INFO
1384                                "%s: config change request already queued\n",
1385                                dev->name);
1386                 else {
1387                         dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
1388                         DMA_WBACK_INV(dev, &dma->cf_cmd, sizeof(struct cf_cmd));
1389                         i596_add_cmd(dev, &dma->cf_cmd.cmd);
1390                 }
1391         }
1392
1393         cnt = dev->mc_count;
1394         if (cnt > MAX_MC_CNT) {
1395                 cnt = MAX_MC_CNT;
1396                 printk(KERN_NOTICE "%s: Only %d multicast addresses supported",
1397                         dev->name, cnt);
1398         }
1399
1400         if (dev->mc_count > 0) {
1401                 struct dev_mc_list *dmi;
1402                 unsigned char *cp;
1403                 struct mc_cmd *cmd;
1404
1405                 cmd = &dma->mc_cmd;
1406                 cmd->cmd.command = SWAP16(CmdMulticastList);
1407                 cmd->mc_cnt = SWAP16(dev->mc_count * 6);
1408                 cp = cmd->mc_addrs;
1409                 for (dmi = dev->mc_list;
1410                      cnt && dmi != NULL;
1411                      dmi = dmi->next, cnt--, cp += 6) {
1412                         memcpy(cp, dmi->dmi_addr, 6);
1413                         if (i596_debug > 1)
1414                                 DEB(DEB_MULTI,
1415                                     printk(KERN_DEBUG
1416                                            "%s: Adding address %s\n",
1417                                            dev->name, print_mac(mac, cp)));
1418                 }
1419                 DMA_WBACK_INV(dev, &dma->mc_cmd, sizeof(struct mc_cmd));
1420                 i596_add_cmd(dev, &cmd->cmd);
1421         }
1422 }