Merge branches 'topic/fix/hda' and 'topic/fix/misc' into for-linus
[linux-2.6] / drivers / net / pcnet32.c
1 /* pcnet32.c: An AMD PCnet32 ethernet driver for linux. */
2 /*
3  *      Copyright 1996-1999 Thomas Bogendoerfer
4  *
5  *      Derived from the lance driver written 1993,1994,1995 by Donald Becker.
6  *
7  *      Copyright 1993 United States Government as represented by the
8  *      Director, National Security Agency.
9  *
10  *      This software may be used and distributed according to the terms
11  *      of the GNU General Public License, incorporated herein by reference.
12  *
13  *      This driver is for PCnet32 and PCnetPCI based ethercards
14  */
15 /**************************************************************************
16  *  23 Oct, 2000.
17  *  Fixed a few bugs, related to running the controller in 32bit mode.
18  *
19  *  Carsten Langgaard, carstenl@mips.com
20  *  Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
21  *
22  *************************************************************************/
23
24 #define DRV_NAME        "pcnet32"
25 #define DRV_VERSION     "1.35"
26 #define DRV_RELDATE     "21.Apr.2008"
27 #define PFX             DRV_NAME ": "
28
29 static const char *const version =
30     DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n";
31
32 #include <linux/module.h>
33 #include <linux/kernel.h>
34 #include <linux/string.h>
35 #include <linux/errno.h>
36 #include <linux/ioport.h>
37 #include <linux/slab.h>
38 #include <linux/interrupt.h>
39 #include <linux/pci.h>
40 #include <linux/delay.h>
41 #include <linux/init.h>
42 #include <linux/ethtool.h>
43 #include <linux/mii.h>
44 #include <linux/crc32.h>
45 #include <linux/netdevice.h>
46 #include <linux/etherdevice.h>
47 #include <linux/skbuff.h>
48 #include <linux/spinlock.h>
49 #include <linux/moduleparam.h>
50 #include <linux/bitops.h>
51
52 #include <asm/dma.h>
53 #include <asm/io.h>
54 #include <asm/uaccess.h>
55 #include <asm/irq.h>
56
57 /*
58  * PCI device identifiers for "new style" Linux PCI Device Drivers
59  */
60 static struct pci_device_id pcnet32_pci_tbl[] = {
61         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME), },
62         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE), },
63
64         /*
65          * Adapters that were sold with IBM's RS/6000 or pSeries hardware have
66          * the incorrect vendor id.
67          */
68         { PCI_DEVICE(PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE),
69           .class = (PCI_CLASS_NETWORK_ETHERNET << 8), .class_mask = 0xffff00, },
70
71         { }     /* terminate list */
72 };
73
74 MODULE_DEVICE_TABLE(pci, pcnet32_pci_tbl);
75
76 static int cards_found;
77
78 /*
79  * VLB I/O addresses
80  */
81 static unsigned int pcnet32_portlist[] __initdata =
82     { 0x300, 0x320, 0x340, 0x360, 0 };
83
84 static int pcnet32_debug = 0;
85 static int tx_start = 1;        /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */
86 static int pcnet32vlb;          /* check for VLB cards ? */
87
88 static struct net_device *pcnet32_dev;
89
90 static int max_interrupt_work = 2;
91 static int rx_copybreak = 200;
92
93 #define PCNET32_PORT_AUI      0x00
94 #define PCNET32_PORT_10BT     0x01
95 #define PCNET32_PORT_GPSI     0x02
96 #define PCNET32_PORT_MII      0x03
97
98 #define PCNET32_PORT_PORTSEL  0x03
99 #define PCNET32_PORT_ASEL     0x04
100 #define PCNET32_PORT_100      0x40
101 #define PCNET32_PORT_FD       0x80
102
103 #define PCNET32_DMA_MASK 0xffffffff
104
105 #define PCNET32_WATCHDOG_TIMEOUT (jiffies + (2 * HZ))
106 #define PCNET32_BLINK_TIMEOUT   (jiffies + (HZ/4))
107
108 /*
109  * table to translate option values from tulip
110  * to internal options
111  */
112 static const unsigned char options_mapping[] = {
113         PCNET32_PORT_ASEL,                      /*  0 Auto-select      */
114         PCNET32_PORT_AUI,                       /*  1 BNC/AUI          */
115         PCNET32_PORT_AUI,                       /*  2 AUI/BNC          */
116         PCNET32_PORT_ASEL,                      /*  3 not supported    */
117         PCNET32_PORT_10BT | PCNET32_PORT_FD,    /*  4 10baseT-FD       */
118         PCNET32_PORT_ASEL,                      /*  5 not supported    */
119         PCNET32_PORT_ASEL,                      /*  6 not supported    */
120         PCNET32_PORT_ASEL,                      /*  7 not supported    */
121         PCNET32_PORT_ASEL,                      /*  8 not supported    */
122         PCNET32_PORT_MII,                       /*  9 MII 10baseT      */
123         PCNET32_PORT_MII | PCNET32_PORT_FD,     /* 10 MII 10baseT-FD   */
124         PCNET32_PORT_MII,                       /* 11 MII (autosel)    */
125         PCNET32_PORT_10BT,                      /* 12 10BaseT          */
126         PCNET32_PORT_MII | PCNET32_PORT_100,    /* 13 MII 100BaseTx    */
127                                                 /* 14 MII 100BaseTx-FD */
128         PCNET32_PORT_MII | PCNET32_PORT_100 | PCNET32_PORT_FD,
129         PCNET32_PORT_ASEL                       /* 15 not supported    */
130 };
131
132 static const char pcnet32_gstrings_test[][ETH_GSTRING_LEN] = {
133         "Loopback test  (offline)"
134 };
135
136 #define PCNET32_TEST_LEN        ARRAY_SIZE(pcnet32_gstrings_test)
137
138 #define PCNET32_NUM_REGS 136
139
140 #define MAX_UNITS 8             /* More are supported, limit only on options */
141 static int options[MAX_UNITS];
142 static int full_duplex[MAX_UNITS];
143 static int homepna[MAX_UNITS];
144
145 /*
146  *                              Theory of Operation
147  *
148  * This driver uses the same software structure as the normal lance
149  * driver. So look for a verbose description in lance.c. The differences
150  * to the normal lance driver is the use of the 32bit mode of PCnet32
151  * and PCnetPCI chips. Because these chips are 32bit chips, there is no
152  * 16MB limitation and we don't need bounce buffers.
153  */
154
155 /*
156  * Set the number of Tx and Rx buffers, using Log_2(# buffers).
157  * Reasonable default values are 4 Tx buffers, and 16 Rx buffers.
158  * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4).
159  */
160 #ifndef PCNET32_LOG_TX_BUFFERS
161 #define PCNET32_LOG_TX_BUFFERS          4
162 #define PCNET32_LOG_RX_BUFFERS          5
163 #define PCNET32_LOG_MAX_TX_BUFFERS      9       /* 2^9 == 512 */
164 #define PCNET32_LOG_MAX_RX_BUFFERS      9
165 #endif
166
167 #define TX_RING_SIZE            (1 << (PCNET32_LOG_TX_BUFFERS))
168 #define TX_MAX_RING_SIZE        (1 << (PCNET32_LOG_MAX_TX_BUFFERS))
169
170 #define RX_RING_SIZE            (1 << (PCNET32_LOG_RX_BUFFERS))
171 #define RX_MAX_RING_SIZE        (1 << (PCNET32_LOG_MAX_RX_BUFFERS))
172
173 #define PKT_BUF_SKB             1544
174 /* actual buffer length after being aligned */
175 #define PKT_BUF_SIZE            (PKT_BUF_SKB - NET_IP_ALIGN)
176 /* chip wants twos complement of the (aligned) buffer length */
177 #define NEG_BUF_SIZE            (NET_IP_ALIGN - PKT_BUF_SKB)
178
179 /* Offsets from base I/O address. */
180 #define PCNET32_WIO_RDP         0x10
181 #define PCNET32_WIO_RAP         0x12
182 #define PCNET32_WIO_RESET       0x14
183 #define PCNET32_WIO_BDP         0x16
184
185 #define PCNET32_DWIO_RDP        0x10
186 #define PCNET32_DWIO_RAP        0x14
187 #define PCNET32_DWIO_RESET      0x18
188 #define PCNET32_DWIO_BDP        0x1C
189
190 #define PCNET32_TOTAL_SIZE      0x20
191
192 #define CSR0            0
193 #define CSR0_INIT       0x1
194 #define CSR0_START      0x2
195 #define CSR0_STOP       0x4
196 #define CSR0_TXPOLL     0x8
197 #define CSR0_INTEN      0x40
198 #define CSR0_IDON       0x0100
199 #define CSR0_NORMAL     (CSR0_START | CSR0_INTEN)
200 #define PCNET32_INIT_LOW        1
201 #define PCNET32_INIT_HIGH       2
202 #define CSR3            3
203 #define CSR4            4
204 #define CSR5            5
205 #define CSR5_SUSPEND    0x0001
206 #define CSR15           15
207 #define PCNET32_MC_FILTER       8
208
209 #define PCNET32_79C970A 0x2621
210
211 /* The PCNET32 Rx and Tx ring descriptors. */
212 struct pcnet32_rx_head {
213         __le32  base;
214         __le16  buf_length;     /* two`s complement of length */
215         __le16  status;
216         __le32  msg_length;
217         __le32  reserved;
218 };
219
220 struct pcnet32_tx_head {
221         __le32  base;
222         __le16  length;         /* two`s complement of length */
223         __le16  status;
224         __le32  misc;
225         __le32  reserved;
226 };
227
228 /* The PCNET32 32-Bit initialization block, described in databook. */
229 struct pcnet32_init_block {
230         __le16  mode;
231         __le16  tlen_rlen;
232         u8      phys_addr[6];
233         __le16  reserved;
234         __le32  filter[2];
235         /* Receive and transmit ring base, along with extra bits. */
236         __le32  rx_ring;
237         __le32  tx_ring;
238 };
239
240 /* PCnet32 access functions */
241 struct pcnet32_access {
242         u16     (*read_csr) (unsigned long, int);
243         void    (*write_csr) (unsigned long, int, u16);
244         u16     (*read_bcr) (unsigned long, int);
245         void    (*write_bcr) (unsigned long, int, u16);
246         u16     (*read_rap) (unsigned long);
247         void    (*write_rap) (unsigned long, u16);
248         void    (*reset) (unsigned long);
249 };
250
251 /*
252  * The first field of pcnet32_private is read by the ethernet device
253  * so the structure should be allocated using pci_alloc_consistent().
254  */
255 struct pcnet32_private {
256         struct pcnet32_init_block *init_block;
257         /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */
258         struct pcnet32_rx_head  *rx_ring;
259         struct pcnet32_tx_head  *tx_ring;
260         dma_addr_t              init_dma_addr;/* DMA address of beginning of the init block,
261                                    returned by pci_alloc_consistent */
262         struct pci_dev          *pci_dev;
263         const char              *name;
264         /* The saved address of a sent-in-place packet/buffer, for skfree(). */
265         struct sk_buff          **tx_skbuff;
266         struct sk_buff          **rx_skbuff;
267         dma_addr_t              *tx_dma_addr;
268         dma_addr_t              *rx_dma_addr;
269         struct pcnet32_access   a;
270         spinlock_t              lock;           /* Guard lock */
271         unsigned int            cur_rx, cur_tx; /* The next free ring entry */
272         unsigned int            rx_ring_size;   /* current rx ring size */
273         unsigned int            tx_ring_size;   /* current tx ring size */
274         unsigned int            rx_mod_mask;    /* rx ring modular mask */
275         unsigned int            tx_mod_mask;    /* tx ring modular mask */
276         unsigned short          rx_len_bits;
277         unsigned short          tx_len_bits;
278         dma_addr_t              rx_ring_dma_addr;
279         dma_addr_t              tx_ring_dma_addr;
280         unsigned int            dirty_rx,       /* ring entries to be freed. */
281                                 dirty_tx;
282
283         struct net_device       *dev;
284         struct napi_struct      napi;
285         char                    tx_full;
286         char                    phycount;       /* number of phys found */
287         int                     options;
288         unsigned int            shared_irq:1,   /* shared irq possible */
289                                 dxsuflo:1,   /* disable transmit stop on uflo */
290                                 mii:1;          /* mii port available */
291         struct net_device       *next;
292         struct mii_if_info      mii_if;
293         struct timer_list       watchdog_timer;
294         struct timer_list       blink_timer;
295         u32                     msg_enable;     /* debug message level */
296
297         /* each bit indicates an available PHY */
298         u32                     phymask;
299         unsigned short          chip_version;   /* which variant this is */
300 };
301
302 static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *);
303 static int pcnet32_probe1(unsigned long, int, struct pci_dev *);
304 static int pcnet32_open(struct net_device *);
305 static int pcnet32_init_ring(struct net_device *);
306 static int pcnet32_start_xmit(struct sk_buff *, struct net_device *);
307 static void pcnet32_tx_timeout(struct net_device *dev);
308 static irqreturn_t pcnet32_interrupt(int, void *);
309 static int pcnet32_close(struct net_device *);
310 static struct net_device_stats *pcnet32_get_stats(struct net_device *);
311 static void pcnet32_load_multicast(struct net_device *dev);
312 static void pcnet32_set_multicast_list(struct net_device *);
313 static int pcnet32_ioctl(struct net_device *, struct ifreq *, int);
314 static void pcnet32_watchdog(struct net_device *);
315 static int mdio_read(struct net_device *dev, int phy_id, int reg_num);
316 static void mdio_write(struct net_device *dev, int phy_id, int reg_num,
317                        int val);
318 static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits);
319 static void pcnet32_ethtool_test(struct net_device *dev,
320                                  struct ethtool_test *eth_test, u64 * data);
321 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1);
322 static int pcnet32_phys_id(struct net_device *dev, u32 data);
323 static void pcnet32_led_blink_callback(struct net_device *dev);
324 static int pcnet32_get_regs_len(struct net_device *dev);
325 static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
326                              void *ptr);
327 static void pcnet32_purge_tx_ring(struct net_device *dev);
328 static int pcnet32_alloc_ring(struct net_device *dev, const char *name);
329 static void pcnet32_free_ring(struct net_device *dev);
330 static void pcnet32_check_media(struct net_device *dev, int verbose);
331
332 static u16 pcnet32_wio_read_csr(unsigned long addr, int index)
333 {
334         outw(index, addr + PCNET32_WIO_RAP);
335         return inw(addr + PCNET32_WIO_RDP);
336 }
337
338 static void pcnet32_wio_write_csr(unsigned long addr, int index, u16 val)
339 {
340         outw(index, addr + PCNET32_WIO_RAP);
341         outw(val, addr + PCNET32_WIO_RDP);
342 }
343
344 static u16 pcnet32_wio_read_bcr(unsigned long addr, int index)
345 {
346         outw(index, addr + PCNET32_WIO_RAP);
347         return inw(addr + PCNET32_WIO_BDP);
348 }
349
350 static void pcnet32_wio_write_bcr(unsigned long addr, int index, u16 val)
351 {
352         outw(index, addr + PCNET32_WIO_RAP);
353         outw(val, addr + PCNET32_WIO_BDP);
354 }
355
356 static u16 pcnet32_wio_read_rap(unsigned long addr)
357 {
358         return inw(addr + PCNET32_WIO_RAP);
359 }
360
361 static void pcnet32_wio_write_rap(unsigned long addr, u16 val)
362 {
363         outw(val, addr + PCNET32_WIO_RAP);
364 }
365
366 static void pcnet32_wio_reset(unsigned long addr)
367 {
368         inw(addr + PCNET32_WIO_RESET);
369 }
370
371 static int pcnet32_wio_check(unsigned long addr)
372 {
373         outw(88, addr + PCNET32_WIO_RAP);
374         return (inw(addr + PCNET32_WIO_RAP) == 88);
375 }
376
377 static struct pcnet32_access pcnet32_wio = {
378         .read_csr = pcnet32_wio_read_csr,
379         .write_csr = pcnet32_wio_write_csr,
380         .read_bcr = pcnet32_wio_read_bcr,
381         .write_bcr = pcnet32_wio_write_bcr,
382         .read_rap = pcnet32_wio_read_rap,
383         .write_rap = pcnet32_wio_write_rap,
384         .reset = pcnet32_wio_reset
385 };
386
387 static u16 pcnet32_dwio_read_csr(unsigned long addr, int index)
388 {
389         outl(index, addr + PCNET32_DWIO_RAP);
390         return (inl(addr + PCNET32_DWIO_RDP) & 0xffff);
391 }
392
393 static void pcnet32_dwio_write_csr(unsigned long addr, int index, u16 val)
394 {
395         outl(index, addr + PCNET32_DWIO_RAP);
396         outl(val, addr + PCNET32_DWIO_RDP);
397 }
398
399 static u16 pcnet32_dwio_read_bcr(unsigned long addr, int index)
400 {
401         outl(index, addr + PCNET32_DWIO_RAP);
402         return (inl(addr + PCNET32_DWIO_BDP) & 0xffff);
403 }
404
405 static void pcnet32_dwio_write_bcr(unsigned long addr, int index, u16 val)
406 {
407         outl(index, addr + PCNET32_DWIO_RAP);
408         outl(val, addr + PCNET32_DWIO_BDP);
409 }
410
411 static u16 pcnet32_dwio_read_rap(unsigned long addr)
412 {
413         return (inl(addr + PCNET32_DWIO_RAP) & 0xffff);
414 }
415
416 static void pcnet32_dwio_write_rap(unsigned long addr, u16 val)
417 {
418         outl(val, addr + PCNET32_DWIO_RAP);
419 }
420
421 static void pcnet32_dwio_reset(unsigned long addr)
422 {
423         inl(addr + PCNET32_DWIO_RESET);
424 }
425
426 static int pcnet32_dwio_check(unsigned long addr)
427 {
428         outl(88, addr + PCNET32_DWIO_RAP);
429         return ((inl(addr + PCNET32_DWIO_RAP) & 0xffff) == 88);
430 }
431
432 static struct pcnet32_access pcnet32_dwio = {
433         .read_csr = pcnet32_dwio_read_csr,
434         .write_csr = pcnet32_dwio_write_csr,
435         .read_bcr = pcnet32_dwio_read_bcr,
436         .write_bcr = pcnet32_dwio_write_bcr,
437         .read_rap = pcnet32_dwio_read_rap,
438         .write_rap = pcnet32_dwio_write_rap,
439         .reset = pcnet32_dwio_reset
440 };
441
442 static void pcnet32_netif_stop(struct net_device *dev)
443 {
444         struct pcnet32_private *lp = netdev_priv(dev);
445
446         dev->trans_start = jiffies;
447         napi_disable(&lp->napi);
448         netif_tx_disable(dev);
449 }
450
451 static void pcnet32_netif_start(struct net_device *dev)
452 {
453         struct pcnet32_private *lp = netdev_priv(dev);
454         ulong ioaddr = dev->base_addr;
455         u16 val;
456
457         netif_wake_queue(dev);
458         val = lp->a.read_csr(ioaddr, CSR3);
459         val &= 0x00ff;
460         lp->a.write_csr(ioaddr, CSR3, val);
461         napi_enable(&lp->napi);
462 }
463
464 /*
465  * Allocate space for the new sized tx ring.
466  * Free old resources
467  * Save new resources.
468  * Any failure keeps old resources.
469  * Must be called with lp->lock held.
470  */
471 static void pcnet32_realloc_tx_ring(struct net_device *dev,
472                                     struct pcnet32_private *lp,
473                                     unsigned int size)
474 {
475         dma_addr_t new_ring_dma_addr;
476         dma_addr_t *new_dma_addr_list;
477         struct pcnet32_tx_head *new_tx_ring;
478         struct sk_buff **new_skb_list;
479
480         pcnet32_purge_tx_ring(dev);
481
482         new_tx_ring = pci_alloc_consistent(lp->pci_dev,
483                                            sizeof(struct pcnet32_tx_head) *
484                                            (1 << size),
485                                            &new_ring_dma_addr);
486         if (new_tx_ring == NULL) {
487                 if (netif_msg_drv(lp))
488                         printk("\n" KERN_ERR
489                                "%s: Consistent memory allocation failed.\n",
490                                dev->name);
491                 return;
492         }
493         memset(new_tx_ring, 0, sizeof(struct pcnet32_tx_head) * (1 << size));
494
495         new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t),
496                                 GFP_ATOMIC);
497         if (!new_dma_addr_list) {
498                 if (netif_msg_drv(lp))
499                         printk("\n" KERN_ERR
500                                "%s: Memory allocation failed.\n", dev->name);
501                 goto free_new_tx_ring;
502         }
503
504         new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *),
505                                 GFP_ATOMIC);
506         if (!new_skb_list) {
507                 if (netif_msg_drv(lp))
508                         printk("\n" KERN_ERR
509                                "%s: Memory allocation failed.\n", dev->name);
510                 goto free_new_lists;
511         }
512
513         kfree(lp->tx_skbuff);
514         kfree(lp->tx_dma_addr);
515         pci_free_consistent(lp->pci_dev,
516                             sizeof(struct pcnet32_tx_head) *
517                             lp->tx_ring_size, lp->tx_ring,
518                             lp->tx_ring_dma_addr);
519
520         lp->tx_ring_size = (1 << size);
521         lp->tx_mod_mask = lp->tx_ring_size - 1;
522         lp->tx_len_bits = (size << 12);
523         lp->tx_ring = new_tx_ring;
524         lp->tx_ring_dma_addr = new_ring_dma_addr;
525         lp->tx_dma_addr = new_dma_addr_list;
526         lp->tx_skbuff = new_skb_list;
527         return;
528
529     free_new_lists:
530         kfree(new_dma_addr_list);
531     free_new_tx_ring:
532         pci_free_consistent(lp->pci_dev,
533                             sizeof(struct pcnet32_tx_head) *
534                             (1 << size),
535                             new_tx_ring,
536                             new_ring_dma_addr);
537         return;
538 }
539
540 /*
541  * Allocate space for the new sized rx ring.
542  * Re-use old receive buffers.
543  *   alloc extra buffers
544  *   free unneeded buffers
545  *   free unneeded buffers
546  * Save new resources.
547  * Any failure keeps old resources.
548  * Must be called with lp->lock held.
549  */
550 static void pcnet32_realloc_rx_ring(struct net_device *dev,
551                                     struct pcnet32_private *lp,
552                                     unsigned int size)
553 {
554         dma_addr_t new_ring_dma_addr;
555         dma_addr_t *new_dma_addr_list;
556         struct pcnet32_rx_head *new_rx_ring;
557         struct sk_buff **new_skb_list;
558         int new, overlap;
559
560         new_rx_ring = pci_alloc_consistent(lp->pci_dev,
561                                            sizeof(struct pcnet32_rx_head) *
562                                            (1 << size),
563                                            &new_ring_dma_addr);
564         if (new_rx_ring == NULL) {
565                 if (netif_msg_drv(lp))
566                         printk("\n" KERN_ERR
567                                "%s: Consistent memory allocation failed.\n",
568                                dev->name);
569                 return;
570         }
571         memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size));
572
573         new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t),
574                                 GFP_ATOMIC);
575         if (!new_dma_addr_list) {
576                 if (netif_msg_drv(lp))
577                         printk("\n" KERN_ERR
578                                "%s: Memory allocation failed.\n", dev->name);
579                 goto free_new_rx_ring;
580         }
581
582         new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *),
583                                 GFP_ATOMIC);
584         if (!new_skb_list) {
585                 if (netif_msg_drv(lp))
586                         printk("\n" KERN_ERR
587                                "%s: Memory allocation failed.\n", dev->name);
588                 goto free_new_lists;
589         }
590
591         /* first copy the current receive buffers */
592         overlap = min(size, lp->rx_ring_size);
593         for (new = 0; new < overlap; new++) {
594                 new_rx_ring[new] = lp->rx_ring[new];
595                 new_dma_addr_list[new] = lp->rx_dma_addr[new];
596                 new_skb_list[new] = lp->rx_skbuff[new];
597         }
598         /* now allocate any new buffers needed */
599         for (; new < size; new++ ) {
600                 struct sk_buff *rx_skbuff;
601                 new_skb_list[new] = dev_alloc_skb(PKT_BUF_SKB);
602                 if (!(rx_skbuff = new_skb_list[new])) {
603                         /* keep the original lists and buffers */
604                         if (netif_msg_drv(lp))
605                                 printk(KERN_ERR
606                                        "%s: pcnet32_realloc_rx_ring dev_alloc_skb failed.\n",
607                                        dev->name);
608                         goto free_all_new;
609                 }
610                 skb_reserve(rx_skbuff, NET_IP_ALIGN);
611
612                 new_dma_addr_list[new] =
613                             pci_map_single(lp->pci_dev, rx_skbuff->data,
614                                            PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
615                 new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]);
616                 new_rx_ring[new].buf_length = cpu_to_le16(NEG_BUF_SIZE);
617                 new_rx_ring[new].status = cpu_to_le16(0x8000);
618         }
619         /* and free any unneeded buffers */
620         for (; new < lp->rx_ring_size; new++) {
621                 if (lp->rx_skbuff[new]) {
622                         pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new],
623                                          PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
624                         dev_kfree_skb(lp->rx_skbuff[new]);
625                 }
626         }
627
628         kfree(lp->rx_skbuff);
629         kfree(lp->rx_dma_addr);
630         pci_free_consistent(lp->pci_dev,
631                             sizeof(struct pcnet32_rx_head) *
632                             lp->rx_ring_size, lp->rx_ring,
633                             lp->rx_ring_dma_addr);
634
635         lp->rx_ring_size = (1 << size);
636         lp->rx_mod_mask = lp->rx_ring_size - 1;
637         lp->rx_len_bits = (size << 4);
638         lp->rx_ring = new_rx_ring;
639         lp->rx_ring_dma_addr = new_ring_dma_addr;
640         lp->rx_dma_addr = new_dma_addr_list;
641         lp->rx_skbuff = new_skb_list;
642         return;
643
644     free_all_new:
645         for (; --new >= lp->rx_ring_size; ) {
646                 if (new_skb_list[new]) {
647                         pci_unmap_single(lp->pci_dev, new_dma_addr_list[new],
648                                          PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
649                         dev_kfree_skb(new_skb_list[new]);
650                 }
651         }
652         kfree(new_skb_list);
653     free_new_lists:
654         kfree(new_dma_addr_list);
655     free_new_rx_ring:
656         pci_free_consistent(lp->pci_dev,
657                             sizeof(struct pcnet32_rx_head) *
658                             (1 << size),
659                             new_rx_ring,
660                             new_ring_dma_addr);
661         return;
662 }
663
664 static void pcnet32_purge_rx_ring(struct net_device *dev)
665 {
666         struct pcnet32_private *lp = netdev_priv(dev);
667         int i;
668
669         /* free all allocated skbuffs */
670         for (i = 0; i < lp->rx_ring_size; i++) {
671                 lp->rx_ring[i].status = 0;      /* CPU owns buffer */
672                 wmb();          /* Make sure adapter sees owner change */
673                 if (lp->rx_skbuff[i]) {
674                         pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i],
675                                          PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
676                         dev_kfree_skb_any(lp->rx_skbuff[i]);
677                 }
678                 lp->rx_skbuff[i] = NULL;
679                 lp->rx_dma_addr[i] = 0;
680         }
681 }
682
683 #ifdef CONFIG_NET_POLL_CONTROLLER
684 static void pcnet32_poll_controller(struct net_device *dev)
685 {
686         disable_irq(dev->irq);
687         pcnet32_interrupt(0, dev);
688         enable_irq(dev->irq);
689 }
690 #endif
691
692 static int pcnet32_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
693 {
694         struct pcnet32_private *lp = netdev_priv(dev);
695         unsigned long flags;
696         int r = -EOPNOTSUPP;
697
698         if (lp->mii) {
699                 spin_lock_irqsave(&lp->lock, flags);
700                 mii_ethtool_gset(&lp->mii_if, cmd);
701                 spin_unlock_irqrestore(&lp->lock, flags);
702                 r = 0;
703         }
704         return r;
705 }
706
707 static int pcnet32_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
708 {
709         struct pcnet32_private *lp = netdev_priv(dev);
710         unsigned long flags;
711         int r = -EOPNOTSUPP;
712
713         if (lp->mii) {
714                 spin_lock_irqsave(&lp->lock, flags);
715                 r = mii_ethtool_sset(&lp->mii_if, cmd);
716                 spin_unlock_irqrestore(&lp->lock, flags);
717         }
718         return r;
719 }
720
721 static void pcnet32_get_drvinfo(struct net_device *dev,
722                                 struct ethtool_drvinfo *info)
723 {
724         struct pcnet32_private *lp = netdev_priv(dev);
725
726         strcpy(info->driver, DRV_NAME);
727         strcpy(info->version, DRV_VERSION);
728         if (lp->pci_dev)
729                 strcpy(info->bus_info, pci_name(lp->pci_dev));
730         else
731                 sprintf(info->bus_info, "VLB 0x%lx", dev->base_addr);
732 }
733
734 static u32 pcnet32_get_link(struct net_device *dev)
735 {
736         struct pcnet32_private *lp = netdev_priv(dev);
737         unsigned long flags;
738         int r;
739
740         spin_lock_irqsave(&lp->lock, flags);
741         if (lp->mii) {
742                 r = mii_link_ok(&lp->mii_if);
743         } else if (lp->chip_version >= PCNET32_79C970A) {
744                 ulong ioaddr = dev->base_addr;  /* card base I/O address */
745                 r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
746         } else {        /* can not detect link on really old chips */
747                 r = 1;
748         }
749         spin_unlock_irqrestore(&lp->lock, flags);
750
751         return r;
752 }
753
754 static u32 pcnet32_get_msglevel(struct net_device *dev)
755 {
756         struct pcnet32_private *lp = netdev_priv(dev);
757         return lp->msg_enable;
758 }
759
760 static void pcnet32_set_msglevel(struct net_device *dev, u32 value)
761 {
762         struct pcnet32_private *lp = netdev_priv(dev);
763         lp->msg_enable = value;
764 }
765
766 static int pcnet32_nway_reset(struct net_device *dev)
767 {
768         struct pcnet32_private *lp = netdev_priv(dev);
769         unsigned long flags;
770         int r = -EOPNOTSUPP;
771
772         if (lp->mii) {
773                 spin_lock_irqsave(&lp->lock, flags);
774                 r = mii_nway_restart(&lp->mii_if);
775                 spin_unlock_irqrestore(&lp->lock, flags);
776         }
777         return r;
778 }
779
780 static void pcnet32_get_ringparam(struct net_device *dev,
781                                   struct ethtool_ringparam *ering)
782 {
783         struct pcnet32_private *lp = netdev_priv(dev);
784
785         ering->tx_max_pending = TX_MAX_RING_SIZE;
786         ering->tx_pending = lp->tx_ring_size;
787         ering->rx_max_pending = RX_MAX_RING_SIZE;
788         ering->rx_pending = lp->rx_ring_size;
789 }
790
791 static int pcnet32_set_ringparam(struct net_device *dev,
792                                  struct ethtool_ringparam *ering)
793 {
794         struct pcnet32_private *lp = netdev_priv(dev);
795         unsigned long flags;
796         unsigned int size;
797         ulong ioaddr = dev->base_addr;
798         int i;
799
800         if (ering->rx_mini_pending || ering->rx_jumbo_pending)
801                 return -EINVAL;
802
803         if (netif_running(dev))
804                 pcnet32_netif_stop(dev);
805
806         spin_lock_irqsave(&lp->lock, flags);
807         lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);       /* stop the chip */
808
809         size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
810
811         /* set the minimum ring size to 4, to allow the loopback test to work
812          * unchanged.
813          */
814         for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) {
815                 if (size <= (1 << i))
816                         break;
817         }
818         if ((1 << i) != lp->tx_ring_size)
819                 pcnet32_realloc_tx_ring(dev, lp, i);
820
821         size = min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE);
822         for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) {
823                 if (size <= (1 << i))
824                         break;
825         }
826         if ((1 << i) != lp->rx_ring_size)
827                 pcnet32_realloc_rx_ring(dev, lp, i);
828
829         lp->napi.weight = lp->rx_ring_size / 2;
830
831         if (netif_running(dev)) {
832                 pcnet32_netif_start(dev);
833                 pcnet32_restart(dev, CSR0_NORMAL);
834         }
835
836         spin_unlock_irqrestore(&lp->lock, flags);
837
838         if (netif_msg_drv(lp))
839                 printk(KERN_INFO
840                        "%s: Ring Param Settings: RX: %d, TX: %d\n", dev->name,
841                        lp->rx_ring_size, lp->tx_ring_size);
842
843         return 0;
844 }
845
846 static void pcnet32_get_strings(struct net_device *dev, u32 stringset,
847                                 u8 * data)
848 {
849         memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test));
850 }
851
852 static int pcnet32_get_sset_count(struct net_device *dev, int sset)
853 {
854         switch (sset) {
855         case ETH_SS_TEST:
856                 return PCNET32_TEST_LEN;
857         default:
858                 return -EOPNOTSUPP;
859         }
860 }
861
862 static void pcnet32_ethtool_test(struct net_device *dev,
863                                  struct ethtool_test *test, u64 * data)
864 {
865         struct pcnet32_private *lp = netdev_priv(dev);
866         int rc;
867
868         if (test->flags == ETH_TEST_FL_OFFLINE) {
869                 rc = pcnet32_loopback_test(dev, data);
870                 if (rc) {
871                         if (netif_msg_hw(lp))
872                                 printk(KERN_DEBUG "%s: Loopback test failed.\n",
873                                        dev->name);
874                         test->flags |= ETH_TEST_FL_FAILED;
875                 } else if (netif_msg_hw(lp))
876                         printk(KERN_DEBUG "%s: Loopback test passed.\n",
877                                dev->name);
878         } else if (netif_msg_hw(lp))
879                 printk(KERN_DEBUG
880                        "%s: No tests to run (specify 'Offline' on ethtool).",
881                        dev->name);
882 }                               /* end pcnet32_ethtool_test */
883
884 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
885 {
886         struct pcnet32_private *lp = netdev_priv(dev);
887         struct pcnet32_access *a = &lp->a;      /* access to registers */
888         ulong ioaddr = dev->base_addr;  /* card base I/O address */
889         struct sk_buff *skb;    /* sk buff */
890         int x, i;               /* counters */
891         int numbuffs = 4;       /* number of TX/RX buffers and descs */
892         u16 status = 0x8300;    /* TX ring status */
893         __le16 teststatus;      /* test of ring status */
894         int rc;                 /* return code */
895         int size;               /* size of packets */
896         unsigned char *packet;  /* source packet data */
897         static const int data_len = 60; /* length of source packets */
898         unsigned long flags;
899         unsigned long ticks;
900
901         rc = 1;                 /* default to fail */
902
903         if (netif_running(dev))
904                 pcnet32_netif_stop(dev);
905
906         spin_lock_irqsave(&lp->lock, flags);
907         lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);       /* stop the chip */
908
909         numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
910
911         /* Reset the PCNET32 */
912         lp->a.reset(ioaddr);
913         lp->a.write_csr(ioaddr, CSR4, 0x0915);  /* auto tx pad */
914
915         /* switch pcnet32 to 32bit mode */
916         lp->a.write_bcr(ioaddr, 20, 2);
917
918         /* purge & init rings but don't actually restart */
919         pcnet32_restart(dev, 0x0000);
920
921         lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);       /* Set STOP bit */
922
923         /* Initialize Transmit buffers. */
924         size = data_len + 15;
925         for (x = 0; x < numbuffs; x++) {
926                 if (!(skb = dev_alloc_skb(size))) {
927                         if (netif_msg_hw(lp))
928                                 printk(KERN_DEBUG
929                                        "%s: Cannot allocate skb at line: %d!\n",
930                                        dev->name, __LINE__);
931                         goto clean_up;
932                 } else {
933                         packet = skb->data;
934                         skb_put(skb, size);     /* create space for data */
935                         lp->tx_skbuff[x] = skb;
936                         lp->tx_ring[x].length = cpu_to_le16(-skb->len);
937                         lp->tx_ring[x].misc = 0;
938
939                         /* put DA and SA into the skb */
940                         for (i = 0; i < 6; i++)
941                                 *packet++ = dev->dev_addr[i];
942                         for (i = 0; i < 6; i++)
943                                 *packet++ = dev->dev_addr[i];
944                         /* type */
945                         *packet++ = 0x08;
946                         *packet++ = 0x06;
947                         /* packet number */
948                         *packet++ = x;
949                         /* fill packet with data */
950                         for (i = 0; i < data_len; i++)
951                                 *packet++ = i;
952
953                         lp->tx_dma_addr[x] =
954                             pci_map_single(lp->pci_dev, skb->data, skb->len,
955                                            PCI_DMA_TODEVICE);
956                         lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]);
957                         wmb();  /* Make sure owner changes after all others are visible */
958                         lp->tx_ring[x].status = cpu_to_le16(status);
959                 }
960         }
961
962         x = a->read_bcr(ioaddr, 32);    /* set internal loopback in BCR32 */
963         a->write_bcr(ioaddr, 32, x | 0x0002);
964
965         /* set int loopback in CSR15 */
966         x = a->read_csr(ioaddr, CSR15) & 0xfffc;
967         lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
968
969         teststatus = cpu_to_le16(0x8000);
970         lp->a.write_csr(ioaddr, CSR0, CSR0_START);      /* Set STRT bit */
971
972         /* Check status of descriptors */
973         for (x = 0; x < numbuffs; x++) {
974                 ticks = 0;
975                 rmb();
976                 while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) {
977                         spin_unlock_irqrestore(&lp->lock, flags);
978                         msleep(1);
979                         spin_lock_irqsave(&lp->lock, flags);
980                         rmb();
981                         ticks++;
982                 }
983                 if (ticks == 200) {
984                         if (netif_msg_hw(lp))
985                                 printk("%s: Desc %d failed to reset!\n",
986                                        dev->name, x);
987                         break;
988                 }
989         }
990
991         lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);       /* Set STOP bit */
992         wmb();
993         if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
994                 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
995
996                 for (x = 0; x < numbuffs; x++) {
997                         printk(KERN_DEBUG "%s: Packet %d:\n", dev->name, x);
998                         skb = lp->rx_skbuff[x];
999                         for (i = 0; i < size; i++) {
1000                                 printk("%02x ", *(skb->data + i));
1001                         }
1002                         printk("\n");
1003                 }
1004         }
1005
1006         x = 0;
1007         rc = 0;
1008         while (x < numbuffs && !rc) {
1009                 skb = lp->rx_skbuff[x];
1010                 packet = lp->tx_skbuff[x]->data;
1011                 for (i = 0; i < size; i++) {
1012                         if (*(skb->data + i) != packet[i]) {
1013                                 if (netif_msg_hw(lp))
1014                                         printk(KERN_DEBUG
1015                                                "%s: Error in compare! %2x - %02x %02x\n",
1016                                                dev->name, i, *(skb->data + i),
1017                                                packet[i]);
1018                                 rc = 1;
1019                                 break;
1020                         }
1021                 }
1022                 x++;
1023         }
1024
1025       clean_up:
1026         *data1 = rc;
1027         pcnet32_purge_tx_ring(dev);
1028
1029         x = a->read_csr(ioaddr, CSR15);
1030         a->write_csr(ioaddr, CSR15, (x & ~0x0044));     /* reset bits 6 and 2 */
1031
1032         x = a->read_bcr(ioaddr, 32);    /* reset internal loopback */
1033         a->write_bcr(ioaddr, 32, (x & ~0x0002));
1034
1035         if (netif_running(dev)) {
1036                 pcnet32_netif_start(dev);
1037                 pcnet32_restart(dev, CSR0_NORMAL);
1038         } else {
1039                 pcnet32_purge_rx_ring(dev);
1040                 lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
1041         }
1042         spin_unlock_irqrestore(&lp->lock, flags);
1043
1044         return (rc);
1045 }                               /* end pcnet32_loopback_test  */
1046
1047 static void pcnet32_led_blink_callback(struct net_device *dev)
1048 {
1049         struct pcnet32_private *lp = netdev_priv(dev);
1050         struct pcnet32_access *a = &lp->a;
1051         ulong ioaddr = dev->base_addr;
1052         unsigned long flags;
1053         int i;
1054
1055         spin_lock_irqsave(&lp->lock, flags);
1056         for (i = 4; i < 8; i++) {
1057                 a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000);
1058         }
1059         spin_unlock_irqrestore(&lp->lock, flags);
1060
1061         mod_timer(&lp->blink_timer, PCNET32_BLINK_TIMEOUT);
1062 }
1063
1064 static int pcnet32_phys_id(struct net_device *dev, u32 data)
1065 {
1066         struct pcnet32_private *lp = netdev_priv(dev);
1067         struct pcnet32_access *a = &lp->a;
1068         ulong ioaddr = dev->base_addr;
1069         unsigned long flags;
1070         int i, regs[4];
1071
1072         if (!lp->blink_timer.function) {
1073                 init_timer(&lp->blink_timer);
1074                 lp->blink_timer.function = (void *)pcnet32_led_blink_callback;
1075                 lp->blink_timer.data = (unsigned long)dev;
1076         }
1077
1078         /* Save the current value of the bcrs */
1079         spin_lock_irqsave(&lp->lock, flags);
1080         for (i = 4; i < 8; i++) {
1081                 regs[i - 4] = a->read_bcr(ioaddr, i);
1082         }
1083         spin_unlock_irqrestore(&lp->lock, flags);
1084
1085         mod_timer(&lp->blink_timer, jiffies);
1086         set_current_state(TASK_INTERRUPTIBLE);
1087
1088         /* AV: the limit here makes no sense whatsoever */
1089         if ((!data) || (data > (u32) (MAX_SCHEDULE_TIMEOUT / HZ)))
1090                 data = (u32) (MAX_SCHEDULE_TIMEOUT / HZ);
1091
1092         msleep_interruptible(data * 1000);
1093         del_timer_sync(&lp->blink_timer);
1094
1095         /* Restore the original value of the bcrs */
1096         spin_lock_irqsave(&lp->lock, flags);
1097         for (i = 4; i < 8; i++) {
1098                 a->write_bcr(ioaddr, i, regs[i - 4]);
1099         }
1100         spin_unlock_irqrestore(&lp->lock, flags);
1101
1102         return 0;
1103 }
1104
1105 /*
1106  * lp->lock must be held.
1107  */
1108 static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
1109                 int can_sleep)
1110 {
1111         int csr5;
1112         struct pcnet32_private *lp = netdev_priv(dev);
1113         struct pcnet32_access *a = &lp->a;
1114         ulong ioaddr = dev->base_addr;
1115         int ticks;
1116
1117         /* really old chips have to be stopped. */
1118         if (lp->chip_version < PCNET32_79C970A)
1119                 return 0;
1120
1121         /* set SUSPEND (SPND) - CSR5 bit 0 */
1122         csr5 = a->read_csr(ioaddr, CSR5);
1123         a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND);
1124
1125         /* poll waiting for bit to be set */
1126         ticks = 0;
1127         while (!(a->read_csr(ioaddr, CSR5) & CSR5_SUSPEND)) {
1128                 spin_unlock_irqrestore(&lp->lock, *flags);
1129                 if (can_sleep)
1130                         msleep(1);
1131                 else
1132                         mdelay(1);
1133                 spin_lock_irqsave(&lp->lock, *flags);
1134                 ticks++;
1135                 if (ticks > 200) {
1136                         if (netif_msg_hw(lp))
1137                                 printk(KERN_DEBUG
1138                                        "%s: Error getting into suspend!\n",
1139                                        dev->name);
1140                         return 0;
1141                 }
1142         }
1143         return 1;
1144 }
1145
1146 /*
1147  * process one receive descriptor entry
1148  */
1149
1150 static void pcnet32_rx_entry(struct net_device *dev,
1151                              struct pcnet32_private *lp,
1152                              struct pcnet32_rx_head *rxp,
1153                              int entry)
1154 {
1155         int status = (short)le16_to_cpu(rxp->status) >> 8;
1156         int rx_in_place = 0;
1157         struct sk_buff *skb;
1158         short pkt_len;
1159
1160         if (status != 0x03) {   /* There was an error. */
1161                 /*
1162                  * There is a tricky error noted by John Murphy,
1163                  * <murf@perftech.com> to Russ Nelson: Even with full-sized
1164                  * buffers it's possible for a jabber packet to use two
1165                  * buffers, with only the last correctly noting the error.
1166                  */
1167                 if (status & 0x01)      /* Only count a general error at the */
1168                         dev->stats.rx_errors++; /* end of a packet. */
1169                 if (status & 0x20)
1170                         dev->stats.rx_frame_errors++;
1171                 if (status & 0x10)
1172                         dev->stats.rx_over_errors++;
1173                 if (status & 0x08)
1174                         dev->stats.rx_crc_errors++;
1175                 if (status & 0x04)
1176                         dev->stats.rx_fifo_errors++;
1177                 return;
1178         }
1179
1180         pkt_len = (le32_to_cpu(rxp->msg_length) & 0xfff) - 4;
1181
1182         /* Discard oversize frames. */
1183         if (unlikely(pkt_len > PKT_BUF_SIZE)) {
1184                 if (netif_msg_drv(lp))
1185                         printk(KERN_ERR "%s: Impossible packet size %d!\n",
1186                                dev->name, pkt_len);
1187                 dev->stats.rx_errors++;
1188                 return;
1189         }
1190         if (pkt_len < 60) {
1191                 if (netif_msg_rx_err(lp))
1192                         printk(KERN_ERR "%s: Runt packet!\n", dev->name);
1193                 dev->stats.rx_errors++;
1194                 return;
1195         }
1196
1197         if (pkt_len > rx_copybreak) {
1198                 struct sk_buff *newskb;
1199
1200                 if ((newskb = dev_alloc_skb(PKT_BUF_SKB))) {
1201                         skb_reserve(newskb, NET_IP_ALIGN);
1202                         skb = lp->rx_skbuff[entry];
1203                         pci_unmap_single(lp->pci_dev,
1204                                          lp->rx_dma_addr[entry],
1205                                          PKT_BUF_SIZE,
1206                                          PCI_DMA_FROMDEVICE);
1207                         skb_put(skb, pkt_len);
1208                         lp->rx_skbuff[entry] = newskb;
1209                         lp->rx_dma_addr[entry] =
1210                                             pci_map_single(lp->pci_dev,
1211                                                            newskb->data,
1212                                                            PKT_BUF_SIZE,
1213                                                            PCI_DMA_FROMDEVICE);
1214                         rxp->base = cpu_to_le32(lp->rx_dma_addr[entry]);
1215                         rx_in_place = 1;
1216                 } else
1217                         skb = NULL;
1218         } else {
1219                 skb = dev_alloc_skb(pkt_len + NET_IP_ALIGN);
1220         }
1221
1222         if (skb == NULL) {
1223                 if (netif_msg_drv(lp))
1224                         printk(KERN_ERR
1225                                "%s: Memory squeeze, dropping packet.\n",
1226                                dev->name);
1227                 dev->stats.rx_dropped++;
1228                 return;
1229         }
1230         skb->dev = dev;
1231         if (!rx_in_place) {
1232                 skb_reserve(skb, NET_IP_ALIGN);
1233                 skb_put(skb, pkt_len);  /* Make room */
1234                 pci_dma_sync_single_for_cpu(lp->pci_dev,
1235                                             lp->rx_dma_addr[entry],
1236                                             pkt_len,
1237                                             PCI_DMA_FROMDEVICE);
1238                 skb_copy_to_linear_data(skb,
1239                                  (unsigned char *)(lp->rx_skbuff[entry]->data),
1240                                  pkt_len);
1241                 pci_dma_sync_single_for_device(lp->pci_dev,
1242                                                lp->rx_dma_addr[entry],
1243                                                pkt_len,
1244                                                PCI_DMA_FROMDEVICE);
1245         }
1246         dev->stats.rx_bytes += skb->len;
1247         skb->protocol = eth_type_trans(skb, dev);
1248         netif_receive_skb(skb);
1249         dev->last_rx = jiffies;
1250         dev->stats.rx_packets++;
1251         return;
1252 }
1253
1254 static int pcnet32_rx(struct net_device *dev, int budget)
1255 {
1256         struct pcnet32_private *lp = netdev_priv(dev);
1257         int entry = lp->cur_rx & lp->rx_mod_mask;
1258         struct pcnet32_rx_head *rxp = &lp->rx_ring[entry];
1259         int npackets = 0;
1260
1261         /* If we own the next entry, it's a new packet. Send it up. */
1262         while (npackets < budget && (short)le16_to_cpu(rxp->status) >= 0) {
1263                 pcnet32_rx_entry(dev, lp, rxp, entry);
1264                 npackets += 1;
1265                 /*
1266                  * The docs say that the buffer length isn't touched, but Andrew
1267                  * Boyd of QNX reports that some revs of the 79C965 clear it.
1268                  */
1269                 rxp->buf_length = cpu_to_le16(NEG_BUF_SIZE);
1270                 wmb();  /* Make sure owner changes after others are visible */
1271                 rxp->status = cpu_to_le16(0x8000);
1272                 entry = (++lp->cur_rx) & lp->rx_mod_mask;
1273                 rxp = &lp->rx_ring[entry];
1274         }
1275
1276         return npackets;
1277 }
1278
1279 static int pcnet32_tx(struct net_device *dev)
1280 {
1281         struct pcnet32_private *lp = netdev_priv(dev);
1282         unsigned int dirty_tx = lp->dirty_tx;
1283         int delta;
1284         int must_restart = 0;
1285
1286         while (dirty_tx != lp->cur_tx) {
1287                 int entry = dirty_tx & lp->tx_mod_mask;
1288                 int status = (short)le16_to_cpu(lp->tx_ring[entry].status);
1289
1290                 if (status < 0)
1291                         break;  /* It still hasn't been Txed */
1292
1293                 lp->tx_ring[entry].base = 0;
1294
1295                 if (status & 0x4000) {
1296                         /* There was a major error, log it. */
1297                         int err_status = le32_to_cpu(lp->tx_ring[entry].misc);
1298                         dev->stats.tx_errors++;
1299                         if (netif_msg_tx_err(lp))
1300                                 printk(KERN_ERR
1301                                        "%s: Tx error status=%04x err_status=%08x\n",
1302                                        dev->name, status,
1303                                        err_status);
1304                         if (err_status & 0x04000000)
1305                                 dev->stats.tx_aborted_errors++;
1306                         if (err_status & 0x08000000)
1307                                 dev->stats.tx_carrier_errors++;
1308                         if (err_status & 0x10000000)
1309                                 dev->stats.tx_window_errors++;
1310 #ifndef DO_DXSUFLO
1311                         if (err_status & 0x40000000) {
1312                                 dev->stats.tx_fifo_errors++;
1313                                 /* Ackk!  On FIFO errors the Tx unit is turned off! */
1314                                 /* Remove this verbosity later! */
1315                                 if (netif_msg_tx_err(lp))
1316                                         printk(KERN_ERR
1317                                                "%s: Tx FIFO error!\n",
1318                                                dev->name);
1319                                 must_restart = 1;
1320                         }
1321 #else
1322                         if (err_status & 0x40000000) {
1323                                 dev->stats.tx_fifo_errors++;
1324                                 if (!lp->dxsuflo) {     /* If controller doesn't recover ... */
1325                                         /* Ackk!  On FIFO errors the Tx unit is turned off! */
1326                                         /* Remove this verbosity later! */
1327                                         if (netif_msg_tx_err(lp))
1328                                                 printk(KERN_ERR
1329                                                        "%s: Tx FIFO error!\n",
1330                                                        dev->name);
1331                                         must_restart = 1;
1332                                 }
1333                         }
1334 #endif
1335                 } else {
1336                         if (status & 0x1800)
1337                                 dev->stats.collisions++;
1338                         dev->stats.tx_packets++;
1339                 }
1340
1341                 /* We must free the original skb */
1342                 if (lp->tx_skbuff[entry]) {
1343                         pci_unmap_single(lp->pci_dev,
1344                                          lp->tx_dma_addr[entry],
1345                                          lp->tx_skbuff[entry]->
1346                                          len, PCI_DMA_TODEVICE);
1347                         dev_kfree_skb_any(lp->tx_skbuff[entry]);
1348                         lp->tx_skbuff[entry] = NULL;
1349                         lp->tx_dma_addr[entry] = 0;
1350                 }
1351                 dirty_tx++;
1352         }
1353
1354         delta = (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + lp->tx_ring_size);
1355         if (delta > lp->tx_ring_size) {
1356                 if (netif_msg_drv(lp))
1357                         printk(KERN_ERR
1358                                "%s: out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1359                                dev->name, dirty_tx, lp->cur_tx,
1360                                lp->tx_full);
1361                 dirty_tx += lp->tx_ring_size;
1362                 delta -= lp->tx_ring_size;
1363         }
1364
1365         if (lp->tx_full &&
1366             netif_queue_stopped(dev) &&
1367             delta < lp->tx_ring_size - 2) {
1368                 /* The ring is no longer full, clear tbusy. */
1369                 lp->tx_full = 0;
1370                 netif_wake_queue(dev);
1371         }
1372         lp->dirty_tx = dirty_tx;
1373
1374         return must_restart;
1375 }
1376
1377 static int pcnet32_poll(struct napi_struct *napi, int budget)
1378 {
1379         struct pcnet32_private *lp = container_of(napi, struct pcnet32_private, napi);
1380         struct net_device *dev = lp->dev;
1381         unsigned long ioaddr = dev->base_addr;
1382         unsigned long flags;
1383         int work_done;
1384         u16 val;
1385
1386         work_done = pcnet32_rx(dev, budget);
1387
1388         spin_lock_irqsave(&lp->lock, flags);
1389         if (pcnet32_tx(dev)) {
1390                 /* reset the chip to clear the error condition, then restart */
1391                 lp->a.reset(ioaddr);
1392                 lp->a.write_csr(ioaddr, CSR4, 0x0915);  /* auto tx pad */
1393                 pcnet32_restart(dev, CSR0_START);
1394                 netif_wake_queue(dev);
1395         }
1396         spin_unlock_irqrestore(&lp->lock, flags);
1397
1398         if (work_done < budget) {
1399                 spin_lock_irqsave(&lp->lock, flags);
1400
1401                 __netif_rx_complete(dev, napi);
1402
1403                 /* clear interrupt masks */
1404                 val = lp->a.read_csr(ioaddr, CSR3);
1405                 val &= 0x00ff;
1406                 lp->a.write_csr(ioaddr, CSR3, val);
1407
1408                 /* Set interrupt enable. */
1409                 lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
1410                 mmiowb();
1411                 spin_unlock_irqrestore(&lp->lock, flags);
1412         }
1413         return work_done;
1414 }
1415
1416 #define PCNET32_REGS_PER_PHY    32
1417 #define PCNET32_MAX_PHYS        32
1418 static int pcnet32_get_regs_len(struct net_device *dev)
1419 {
1420         struct pcnet32_private *lp = netdev_priv(dev);
1421         int j = lp->phycount * PCNET32_REGS_PER_PHY;
1422
1423         return ((PCNET32_NUM_REGS + j) * sizeof(u16));
1424 }
1425
1426 static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1427                              void *ptr)
1428 {
1429         int i, csr0;
1430         u16 *buff = ptr;
1431         struct pcnet32_private *lp = netdev_priv(dev);
1432         struct pcnet32_access *a = &lp->a;
1433         ulong ioaddr = dev->base_addr;
1434         unsigned long flags;
1435
1436         spin_lock_irqsave(&lp->lock, flags);
1437
1438         csr0 = a->read_csr(ioaddr, CSR0);
1439         if (!(csr0 & CSR0_STOP))        /* If not stopped */
1440                 pcnet32_suspend(dev, &flags, 1);
1441
1442         /* read address PROM */
1443         for (i = 0; i < 16; i += 2)
1444                 *buff++ = inw(ioaddr + i);
1445
1446         /* read control and status registers */
1447         for (i = 0; i < 90; i++) {
1448                 *buff++ = a->read_csr(ioaddr, i);
1449         }
1450
1451         *buff++ = a->read_csr(ioaddr, 112);
1452         *buff++ = a->read_csr(ioaddr, 114);
1453
1454         /* read bus configuration registers */
1455         for (i = 0; i < 30; i++) {
1456                 *buff++ = a->read_bcr(ioaddr, i);
1457         }
1458         *buff++ = 0;            /* skip bcr30 so as not to hang 79C976 */
1459         for (i = 31; i < 36; i++) {
1460                 *buff++ = a->read_bcr(ioaddr, i);
1461         }
1462
1463         /* read mii phy registers */
1464         if (lp->mii) {
1465                 int j;
1466                 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
1467                         if (lp->phymask & (1 << j)) {
1468                                 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
1469                                         lp->a.write_bcr(ioaddr, 33,
1470                                                         (j << 5) | i);
1471                                         *buff++ = lp->a.read_bcr(ioaddr, 34);
1472                                 }
1473                         }
1474                 }
1475         }
1476
1477         if (!(csr0 & CSR0_STOP)) {      /* If not stopped */
1478                 int csr5;
1479
1480                 /* clear SUSPEND (SPND) - CSR5 bit 0 */
1481                 csr5 = a->read_csr(ioaddr, CSR5);
1482                 a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
1483         }
1484
1485         spin_unlock_irqrestore(&lp->lock, flags);
1486 }
1487
1488 static const struct ethtool_ops pcnet32_ethtool_ops = {
1489         .get_settings           = pcnet32_get_settings,
1490         .set_settings           = pcnet32_set_settings,
1491         .get_drvinfo            = pcnet32_get_drvinfo,
1492         .get_msglevel           = pcnet32_get_msglevel,
1493         .set_msglevel           = pcnet32_set_msglevel,
1494         .nway_reset             = pcnet32_nway_reset,
1495         .get_link               = pcnet32_get_link,
1496         .get_ringparam          = pcnet32_get_ringparam,
1497         .set_ringparam          = pcnet32_set_ringparam,
1498         .get_strings            = pcnet32_get_strings,
1499         .self_test              = pcnet32_ethtool_test,
1500         .phys_id                = pcnet32_phys_id,
1501         .get_regs_len           = pcnet32_get_regs_len,
1502         .get_regs               = pcnet32_get_regs,
1503         .get_sset_count         = pcnet32_get_sset_count,
1504 };
1505
1506 /* only probes for non-PCI devices, the rest are handled by
1507  * pci_register_driver via pcnet32_probe_pci */
1508
1509 static void __devinit pcnet32_probe_vlbus(unsigned int *pcnet32_portlist)
1510 {
1511         unsigned int *port, ioaddr;
1512
1513         /* search for PCnet32 VLB cards at known addresses */
1514         for (port = pcnet32_portlist; (ioaddr = *port); port++) {
1515                 if (request_region
1516                     (ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_vlbus")) {
1517                         /* check if there is really a pcnet chip on that ioaddr */
1518                         if ((inb(ioaddr + 14) == 0x57)
1519                             && (inb(ioaddr + 15) == 0x57)) {
1520                                 pcnet32_probe1(ioaddr, 0, NULL);
1521                         } else {
1522                                 release_region(ioaddr, PCNET32_TOTAL_SIZE);
1523                         }
1524                 }
1525         }
1526 }
1527
1528 static int __devinit
1529 pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
1530 {
1531         unsigned long ioaddr;
1532         int err;
1533
1534         err = pci_enable_device(pdev);
1535         if (err < 0) {
1536                 if (pcnet32_debug & NETIF_MSG_PROBE)
1537                         printk(KERN_ERR PFX
1538                                "failed to enable device -- err=%d\n", err);
1539                 return err;
1540         }
1541         pci_set_master(pdev);
1542
1543         ioaddr = pci_resource_start(pdev, 0);
1544         if (!ioaddr) {
1545                 if (pcnet32_debug & NETIF_MSG_PROBE)
1546                         printk(KERN_ERR PFX
1547                                "card has no PCI IO resources, aborting\n");
1548                 return -ENODEV;
1549         }
1550
1551         if (!pci_dma_supported(pdev, PCNET32_DMA_MASK)) {
1552                 if (pcnet32_debug & NETIF_MSG_PROBE)
1553                         printk(KERN_ERR PFX
1554                                "architecture does not support 32bit PCI busmaster DMA\n");
1555                 return -ENODEV;
1556         }
1557         if (request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci") ==
1558             NULL) {
1559                 if (pcnet32_debug & NETIF_MSG_PROBE)
1560                         printk(KERN_ERR PFX
1561                                "io address range already allocated\n");
1562                 return -EBUSY;
1563         }
1564
1565         err = pcnet32_probe1(ioaddr, 1, pdev);
1566         if (err < 0) {
1567                 pci_disable_device(pdev);
1568         }
1569         return err;
1570 }
1571
1572 /* pcnet32_probe1
1573  *  Called from both pcnet32_probe_vlbus and pcnet_probe_pci.
1574  *  pdev will be NULL when called from pcnet32_probe_vlbus.
1575  */
1576 static int __devinit
1577 pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1578 {
1579         struct pcnet32_private *lp;
1580         int i, media;
1581         int fdx, mii, fset, dxsuflo;
1582         int chip_version;
1583         char *chipname;
1584         struct net_device *dev;
1585         struct pcnet32_access *a = NULL;
1586         u8 promaddr[6];
1587         int ret = -ENODEV;
1588
1589         /* reset the chip */
1590         pcnet32_wio_reset(ioaddr);
1591
1592         /* NOTE: 16-bit check is first, otherwise some older PCnet chips fail */
1593         if (pcnet32_wio_read_csr(ioaddr, 0) == 4 && pcnet32_wio_check(ioaddr)) {
1594                 a = &pcnet32_wio;
1595         } else {
1596                 pcnet32_dwio_reset(ioaddr);
1597                 if (pcnet32_dwio_read_csr(ioaddr, 0) == 4
1598                     && pcnet32_dwio_check(ioaddr)) {
1599                         a = &pcnet32_dwio;
1600                 } else
1601                         goto err_release_region;
1602         }
1603
1604         chip_version =
1605             a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr, 89) << 16);
1606         if ((pcnet32_debug & NETIF_MSG_PROBE) && (pcnet32_debug & NETIF_MSG_HW))
1607                 printk(KERN_INFO "  PCnet chip version is %#x.\n",
1608                        chip_version);
1609         if ((chip_version & 0xfff) != 0x003) {
1610                 if (pcnet32_debug & NETIF_MSG_PROBE)
1611                         printk(KERN_INFO PFX "Unsupported chip version.\n");
1612                 goto err_release_region;
1613         }
1614
1615         /* initialize variables */
1616         fdx = mii = fset = dxsuflo = 0;
1617         chip_version = (chip_version >> 12) & 0xffff;
1618
1619         switch (chip_version) {
1620         case 0x2420:
1621                 chipname = "PCnet/PCI 79C970";  /* PCI */
1622                 break;
1623         case 0x2430:
1624                 if (shared)
1625                         chipname = "PCnet/PCI 79C970";  /* 970 gives the wrong chip id back */
1626                 else
1627                         chipname = "PCnet/32 79C965";   /* 486/VL bus */
1628                 break;
1629         case 0x2621:
1630                 chipname = "PCnet/PCI II 79C970A";      /* PCI */
1631                 fdx = 1;
1632                 break;
1633         case 0x2623:
1634                 chipname = "PCnet/FAST 79C971"; /* PCI */
1635                 fdx = 1;
1636                 mii = 1;
1637                 fset = 1;
1638                 break;
1639         case 0x2624:
1640                 chipname = "PCnet/FAST+ 79C972";        /* PCI */
1641                 fdx = 1;
1642                 mii = 1;
1643                 fset = 1;
1644                 break;
1645         case 0x2625:
1646                 chipname = "PCnet/FAST III 79C973";     /* PCI */
1647                 fdx = 1;
1648                 mii = 1;
1649                 break;
1650         case 0x2626:
1651                 chipname = "PCnet/Home 79C978"; /* PCI */
1652                 fdx = 1;
1653                 /*
1654                  * This is based on specs published at www.amd.com.  This section
1655                  * assumes that a card with a 79C978 wants to go into standard
1656                  * ethernet mode.  The 79C978 can also go into 1Mb HomePNA mode,
1657                  * and the module option homepna=1 can select this instead.
1658                  */
1659                 media = a->read_bcr(ioaddr, 49);
1660                 media &= ~3;    /* default to 10Mb ethernet */
1661                 if (cards_found < MAX_UNITS && homepna[cards_found])
1662                         media |= 1;     /* switch to home wiring mode */
1663                 if (pcnet32_debug & NETIF_MSG_PROBE)
1664                         printk(KERN_DEBUG PFX "media set to %sMbit mode.\n",
1665                                (media & 1) ? "1" : "10");
1666                 a->write_bcr(ioaddr, 49, media);
1667                 break;
1668         case 0x2627:
1669                 chipname = "PCnet/FAST III 79C975";     /* PCI */
1670                 fdx = 1;
1671                 mii = 1;
1672                 break;
1673         case 0x2628:
1674                 chipname = "PCnet/PRO 79C976";
1675                 fdx = 1;
1676                 mii = 1;
1677                 break;
1678         default:
1679                 if (pcnet32_debug & NETIF_MSG_PROBE)
1680                         printk(KERN_INFO PFX
1681                                "PCnet version %#x, no PCnet32 chip.\n",
1682                                chip_version);
1683                 goto err_release_region;
1684         }
1685
1686         /*
1687          *  On selected chips turn on the BCR18:NOUFLO bit. This stops transmit
1688          *  starting until the packet is loaded. Strike one for reliability, lose
1689          *  one for latency - although on PCI this isnt a big loss. Older chips
1690          *  have FIFO's smaller than a packet, so you can't do this.
1691          *  Turn on BCR18:BurstRdEn and BCR18:BurstWrEn.
1692          */
1693
1694         if (fset) {
1695                 a->write_bcr(ioaddr, 18, (a->read_bcr(ioaddr, 18) | 0x0860));
1696                 a->write_csr(ioaddr, 80,
1697                              (a->read_csr(ioaddr, 80) & 0x0C00) | 0x0c00);
1698                 dxsuflo = 1;
1699         }
1700
1701         dev = alloc_etherdev(sizeof(*lp));
1702         if (!dev) {
1703                 if (pcnet32_debug & NETIF_MSG_PROBE)
1704                         printk(KERN_ERR PFX "Memory allocation failed.\n");
1705                 ret = -ENOMEM;
1706                 goto err_release_region;
1707         }
1708         SET_NETDEV_DEV(dev, &pdev->dev);
1709
1710         if (pcnet32_debug & NETIF_MSG_PROBE)
1711                 printk(KERN_INFO PFX "%s at %#3lx,", chipname, ioaddr);
1712
1713         /* In most chips, after a chip reset, the ethernet address is read from the
1714          * station address PROM at the base address and programmed into the
1715          * "Physical Address Registers" CSR12-14.
1716          * As a precautionary measure, we read the PROM values and complain if
1717          * they disagree with the CSRs.  If they miscompare, and the PROM addr
1718          * is valid, then the PROM addr is used.
1719          */
1720         for (i = 0; i < 3; i++) {
1721                 unsigned int val;
1722                 val = a->read_csr(ioaddr, i + 12) & 0x0ffff;
1723                 /* There may be endianness issues here. */
1724                 dev->dev_addr[2 * i] = val & 0x0ff;
1725                 dev->dev_addr[2 * i + 1] = (val >> 8) & 0x0ff;
1726         }
1727
1728         /* read PROM address and compare with CSR address */
1729         for (i = 0; i < 6; i++)
1730                 promaddr[i] = inb(ioaddr + i);
1731
1732         if (memcmp(promaddr, dev->dev_addr, 6)
1733             || !is_valid_ether_addr(dev->dev_addr)) {
1734                 if (is_valid_ether_addr(promaddr)) {
1735                         if (pcnet32_debug & NETIF_MSG_PROBE) {
1736                                 printk(" warning: CSR address invalid,\n");
1737                                 printk(KERN_INFO
1738                                        "    using instead PROM address of");
1739                         }
1740                         memcpy(dev->dev_addr, promaddr, 6);
1741                 }
1742         }
1743         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1744
1745         /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
1746         if (!is_valid_ether_addr(dev->perm_addr))
1747                 memset(dev->dev_addr, 0, sizeof(dev->dev_addr));
1748
1749         if (pcnet32_debug & NETIF_MSG_PROBE) {
1750                 DECLARE_MAC_BUF(mac);
1751                 printk(" %s", print_mac(mac, dev->dev_addr));
1752
1753                 /* Version 0x2623 and 0x2624 */
1754                 if (((chip_version + 1) & 0xfffe) == 0x2624) {
1755                         i = a->read_csr(ioaddr, 80) & 0x0C00;   /* Check tx_start_pt */
1756                         printk("\n" KERN_INFO "    tx_start_pt(0x%04x):", i);
1757                         switch (i >> 10) {
1758                         case 0:
1759                                 printk("  20 bytes,");
1760                                 break;
1761                         case 1:
1762                                 printk("  64 bytes,");
1763                                 break;
1764                         case 2:
1765                                 printk(" 128 bytes,");
1766                                 break;
1767                         case 3:
1768                                 printk("~220 bytes,");
1769                                 break;
1770                         }
1771                         i = a->read_bcr(ioaddr, 18);    /* Check Burst/Bus control */
1772                         printk(" BCR18(%x):", i & 0xffff);
1773                         if (i & (1 << 5))
1774                                 printk("BurstWrEn ");
1775                         if (i & (1 << 6))
1776                                 printk("BurstRdEn ");
1777                         if (i & (1 << 7))
1778                                 printk("DWordIO ");
1779                         if (i & (1 << 11))
1780                                 printk("NoUFlow ");
1781                         i = a->read_bcr(ioaddr, 25);
1782                         printk("\n" KERN_INFO "    SRAMSIZE=0x%04x,", i << 8);
1783                         i = a->read_bcr(ioaddr, 26);
1784                         printk(" SRAM_BND=0x%04x,", i << 8);
1785                         i = a->read_bcr(ioaddr, 27);
1786                         if (i & (1 << 14))
1787                                 printk("LowLatRx");
1788                 }
1789         }
1790
1791         dev->base_addr = ioaddr;
1792         lp = netdev_priv(dev);
1793         /* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */
1794         if ((lp->init_block =
1795              pci_alloc_consistent(pdev, sizeof(*lp->init_block), &lp->init_dma_addr)) == NULL) {
1796                 if (pcnet32_debug & NETIF_MSG_PROBE)
1797                         printk(KERN_ERR PFX
1798                                "Consistent memory allocation failed.\n");
1799                 ret = -ENOMEM;
1800                 goto err_free_netdev;
1801         }
1802         lp->pci_dev = pdev;
1803
1804         lp->dev = dev;
1805
1806         spin_lock_init(&lp->lock);
1807
1808         SET_NETDEV_DEV(dev, &pdev->dev);
1809         lp->name = chipname;
1810         lp->shared_irq = shared;
1811         lp->tx_ring_size = TX_RING_SIZE;        /* default tx ring size */
1812         lp->rx_ring_size = RX_RING_SIZE;        /* default rx ring size */
1813         lp->tx_mod_mask = lp->tx_ring_size - 1;
1814         lp->rx_mod_mask = lp->rx_ring_size - 1;
1815         lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12);
1816         lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4);
1817         lp->mii_if.full_duplex = fdx;
1818         lp->mii_if.phy_id_mask = 0x1f;
1819         lp->mii_if.reg_num_mask = 0x1f;
1820         lp->dxsuflo = dxsuflo;
1821         lp->mii = mii;
1822         lp->chip_version = chip_version;
1823         lp->msg_enable = pcnet32_debug;
1824         if ((cards_found >= MAX_UNITS)
1825             || (options[cards_found] > sizeof(options_mapping)))
1826                 lp->options = PCNET32_PORT_ASEL;
1827         else
1828                 lp->options = options_mapping[options[cards_found]];
1829         lp->mii_if.dev = dev;
1830         lp->mii_if.mdio_read = mdio_read;
1831         lp->mii_if.mdio_write = mdio_write;
1832
1833         /* napi.weight is used in both the napi and non-napi cases */
1834         lp->napi.weight = lp->rx_ring_size / 2;
1835
1836         netif_napi_add(dev, &lp->napi, pcnet32_poll, lp->rx_ring_size / 2);
1837
1838         if (fdx && !(lp->options & PCNET32_PORT_ASEL) &&
1839             ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
1840                 lp->options |= PCNET32_PORT_FD;
1841
1842         if (!a) {
1843                 if (pcnet32_debug & NETIF_MSG_PROBE)
1844                         printk(KERN_ERR PFX "No access methods\n");
1845                 ret = -ENODEV;
1846                 goto err_free_consistent;
1847         }
1848         lp->a = *a;
1849
1850         /* prior to register_netdev, dev->name is not yet correct */
1851         if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
1852                 ret = -ENOMEM;
1853                 goto err_free_ring;
1854         }
1855         /* detect special T1/E1 WAN card by checking for MAC address */
1856         if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0
1857             && dev->dev_addr[2] == 0x75)
1858                 lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI;
1859
1860         lp->init_block->mode = cpu_to_le16(0x0003);     /* Disable Rx and Tx. */
1861         lp->init_block->tlen_rlen =
1862             cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits);
1863         for (i = 0; i < 6; i++)
1864                 lp->init_block->phys_addr[i] = dev->dev_addr[i];
1865         lp->init_block->filter[0] = 0x00000000;
1866         lp->init_block->filter[1] = 0x00000000;
1867         lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr);
1868         lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr);
1869
1870         /* switch pcnet32 to 32bit mode */
1871         a->write_bcr(ioaddr, 20, 2);
1872
1873         a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
1874         a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
1875
1876         if (pdev) {             /* use the IRQ provided by PCI */
1877                 dev->irq = pdev->irq;
1878                 if (pcnet32_debug & NETIF_MSG_PROBE)
1879                         printk(" assigned IRQ %d.\n", dev->irq);
1880         } else {
1881                 unsigned long irq_mask = probe_irq_on();
1882
1883                 /*
1884                  * To auto-IRQ we enable the initialization-done and DMA error
1885                  * interrupts. For ISA boards we get a DMA error, but VLB and PCI
1886                  * boards will work.
1887                  */
1888                 /* Trigger an initialization just for the interrupt. */
1889                 a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_INIT);
1890                 mdelay(1);
1891
1892                 dev->irq = probe_irq_off(irq_mask);
1893                 if (!dev->irq) {
1894                         if (pcnet32_debug & NETIF_MSG_PROBE)
1895                                 printk(", failed to detect IRQ line.\n");
1896                         ret = -ENODEV;
1897                         goto err_free_ring;
1898                 }
1899                 if (pcnet32_debug & NETIF_MSG_PROBE)
1900                         printk(", probed IRQ %d.\n", dev->irq);
1901         }
1902
1903         /* Set the mii phy_id so that we can query the link state */
1904         if (lp->mii) {
1905                 /* lp->phycount and lp->phymask are set to 0 by memset above */
1906
1907                 lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
1908                 /* scan for PHYs */
1909                 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
1910                         unsigned short id1, id2;
1911
1912                         id1 = mdio_read(dev, i, MII_PHYSID1);
1913                         if (id1 == 0xffff)
1914                                 continue;
1915                         id2 = mdio_read(dev, i, MII_PHYSID2);
1916                         if (id2 == 0xffff)
1917                                 continue;
1918                         if (i == 31 && ((chip_version + 1) & 0xfffe) == 0x2624)
1919                                 continue;       /* 79C971 & 79C972 have phantom phy at id 31 */
1920                         lp->phycount++;
1921                         lp->phymask |= (1 << i);
1922                         lp->mii_if.phy_id = i;
1923                         if (pcnet32_debug & NETIF_MSG_PROBE)
1924                                 printk(KERN_INFO PFX
1925                                        "Found PHY %04x:%04x at address %d.\n",
1926                                        id1, id2, i);
1927                 }
1928                 lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
1929                 if (lp->phycount > 1) {
1930                         lp->options |= PCNET32_PORT_MII;
1931                 }
1932         }
1933
1934         init_timer(&lp->watchdog_timer);
1935         lp->watchdog_timer.data = (unsigned long)dev;
1936         lp->watchdog_timer.function = (void *)&pcnet32_watchdog;
1937
1938         /* The PCNET32-specific entries in the device structure. */
1939         dev->open = &pcnet32_open;
1940         dev->hard_start_xmit = &pcnet32_start_xmit;
1941         dev->stop = &pcnet32_close;
1942         dev->get_stats = &pcnet32_get_stats;
1943         dev->set_multicast_list = &pcnet32_set_multicast_list;
1944         dev->do_ioctl = &pcnet32_ioctl;
1945         dev->ethtool_ops = &pcnet32_ethtool_ops;
1946         dev->tx_timeout = pcnet32_tx_timeout;
1947         dev->watchdog_timeo = (5 * HZ);
1948
1949 #ifdef CONFIG_NET_POLL_CONTROLLER
1950         dev->poll_controller = pcnet32_poll_controller;
1951 #endif
1952
1953         /* Fill in the generic fields of the device structure. */
1954         if (register_netdev(dev))
1955                 goto err_free_ring;
1956
1957         if (pdev) {
1958                 pci_set_drvdata(pdev, dev);
1959         } else {
1960                 lp->next = pcnet32_dev;
1961                 pcnet32_dev = dev;
1962         }
1963
1964         if (pcnet32_debug & NETIF_MSG_PROBE)
1965                 printk(KERN_INFO "%s: registered as %s\n", dev->name, lp->name);
1966         cards_found++;
1967
1968         /* enable LED writes */
1969         a->write_bcr(ioaddr, 2, a->read_bcr(ioaddr, 2) | 0x1000);
1970
1971         return 0;
1972
1973       err_free_ring:
1974         pcnet32_free_ring(dev);
1975       err_free_consistent:
1976         pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block),
1977                             lp->init_block, lp->init_dma_addr);
1978       err_free_netdev:
1979         free_netdev(dev);
1980       err_release_region:
1981         release_region(ioaddr, PCNET32_TOTAL_SIZE);
1982         return ret;
1983 }
1984
1985 /* if any allocation fails, caller must also call pcnet32_free_ring */
1986 static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
1987 {
1988         struct pcnet32_private *lp = netdev_priv(dev);
1989
1990         lp->tx_ring = pci_alloc_consistent(lp->pci_dev,
1991                                            sizeof(struct pcnet32_tx_head) *
1992                                            lp->tx_ring_size,
1993                                            &lp->tx_ring_dma_addr);
1994         if (lp->tx_ring == NULL) {
1995                 if (netif_msg_drv(lp))
1996                         printk("\n" KERN_ERR PFX
1997                                "%s: Consistent memory allocation failed.\n",
1998                                name);
1999                 return -ENOMEM;
2000         }
2001
2002         lp->rx_ring = pci_alloc_consistent(lp->pci_dev,
2003                                            sizeof(struct pcnet32_rx_head) *
2004                                            lp->rx_ring_size,
2005                                            &lp->rx_ring_dma_addr);
2006         if (lp->rx_ring == NULL) {
2007                 if (netif_msg_drv(lp))
2008                         printk("\n" KERN_ERR PFX
2009                                "%s: Consistent memory allocation failed.\n",
2010                                name);
2011                 return -ENOMEM;
2012         }
2013
2014         lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t),
2015                                   GFP_ATOMIC);
2016         if (!lp->tx_dma_addr) {
2017                 if (netif_msg_drv(lp))
2018                         printk("\n" KERN_ERR PFX
2019                                "%s: Memory allocation failed.\n", name);
2020                 return -ENOMEM;
2021         }
2022
2023         lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t),
2024                                   GFP_ATOMIC);
2025         if (!lp->rx_dma_addr) {
2026                 if (netif_msg_drv(lp))
2027                         printk("\n" KERN_ERR PFX
2028                                "%s: Memory allocation failed.\n", name);
2029                 return -ENOMEM;
2030         }
2031
2032         lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *),
2033                                 GFP_ATOMIC);
2034         if (!lp->tx_skbuff) {
2035                 if (netif_msg_drv(lp))
2036                         printk("\n" KERN_ERR PFX
2037                                "%s: Memory allocation failed.\n", name);
2038                 return -ENOMEM;
2039         }
2040
2041         lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *),
2042                                 GFP_ATOMIC);
2043         if (!lp->rx_skbuff) {
2044                 if (netif_msg_drv(lp))
2045                         printk("\n" KERN_ERR PFX
2046                                "%s: Memory allocation failed.\n", name);
2047                 return -ENOMEM;
2048         }
2049
2050         return 0;
2051 }
2052
2053 static void pcnet32_free_ring(struct net_device *dev)
2054 {
2055         struct pcnet32_private *lp = netdev_priv(dev);
2056
2057         kfree(lp->tx_skbuff);
2058         lp->tx_skbuff = NULL;
2059
2060         kfree(lp->rx_skbuff);
2061         lp->rx_skbuff = NULL;
2062
2063         kfree(lp->tx_dma_addr);
2064         lp->tx_dma_addr = NULL;
2065
2066         kfree(lp->rx_dma_addr);
2067         lp->rx_dma_addr = NULL;
2068
2069         if (lp->tx_ring) {
2070                 pci_free_consistent(lp->pci_dev,
2071                                     sizeof(struct pcnet32_tx_head) *
2072                                     lp->tx_ring_size, lp->tx_ring,
2073                                     lp->tx_ring_dma_addr);
2074                 lp->tx_ring = NULL;
2075         }
2076
2077         if (lp->rx_ring) {
2078                 pci_free_consistent(lp->pci_dev,
2079                                     sizeof(struct pcnet32_rx_head) *
2080                                     lp->rx_ring_size, lp->rx_ring,
2081                                     lp->rx_ring_dma_addr);
2082                 lp->rx_ring = NULL;
2083         }
2084 }
2085
2086 static int pcnet32_open(struct net_device *dev)
2087 {
2088         struct pcnet32_private *lp = netdev_priv(dev);
2089         unsigned long ioaddr = dev->base_addr;
2090         u16 val;
2091         int i;
2092         int rc;
2093         unsigned long flags;
2094
2095         if (request_irq(dev->irq, &pcnet32_interrupt,
2096                         lp->shared_irq ? IRQF_SHARED : 0, dev->name,
2097                         (void *)dev)) {
2098                 return -EAGAIN;
2099         }
2100
2101         spin_lock_irqsave(&lp->lock, flags);
2102         /* Check for a valid station address */
2103         if (!is_valid_ether_addr(dev->dev_addr)) {
2104                 rc = -EINVAL;
2105                 goto err_free_irq;
2106         }
2107
2108         /* Reset the PCNET32 */
2109         lp->a.reset(ioaddr);
2110
2111         /* switch pcnet32 to 32bit mode */
2112         lp->a.write_bcr(ioaddr, 20, 2);
2113
2114         if (netif_msg_ifup(lp))
2115                 printk(KERN_DEBUG
2116                        "%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n",
2117                        dev->name, dev->irq, (u32) (lp->tx_ring_dma_addr),
2118                        (u32) (lp->rx_ring_dma_addr),
2119                        (u32) (lp->init_dma_addr));
2120
2121         /* set/reset autoselect bit */
2122         val = lp->a.read_bcr(ioaddr, 2) & ~2;
2123         if (lp->options & PCNET32_PORT_ASEL)
2124                 val |= 2;
2125         lp->a.write_bcr(ioaddr, 2, val);
2126
2127         /* handle full duplex setting */
2128         if (lp->mii_if.full_duplex) {
2129                 val = lp->a.read_bcr(ioaddr, 9) & ~3;
2130                 if (lp->options & PCNET32_PORT_FD) {
2131                         val |= 1;
2132                         if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
2133                                 val |= 2;
2134                 } else if (lp->options & PCNET32_PORT_ASEL) {
2135                         /* workaround of xSeries250, turn on for 79C975 only */
2136                         if (lp->chip_version == 0x2627)
2137                                 val |= 3;
2138                 }
2139                 lp->a.write_bcr(ioaddr, 9, val);
2140         }
2141
2142         /* set/reset GPSI bit in test register */
2143         val = lp->a.read_csr(ioaddr, 124) & ~0x10;
2144         if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
2145                 val |= 0x10;
2146         lp->a.write_csr(ioaddr, 124, val);
2147
2148         /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
2149         if (lp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_AT &&
2150             (lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX ||
2151              lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) {
2152                 if (lp->options & PCNET32_PORT_ASEL) {
2153                         lp->options = PCNET32_PORT_FD | PCNET32_PORT_100;
2154                         if (netif_msg_link(lp))
2155                                 printk(KERN_DEBUG
2156                                        "%s: Setting 100Mb-Full Duplex.\n",
2157                                        dev->name);
2158                 }
2159         }
2160         if (lp->phycount < 2) {
2161                 /*
2162                  * 24 Jun 2004 according AMD, in order to change the PHY,
2163                  * DANAS (or DISPM for 79C976) must be set; then select the speed,
2164                  * duplex, and/or enable auto negotiation, and clear DANAS
2165                  */
2166                 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
2167                         lp->a.write_bcr(ioaddr, 32,
2168                                         lp->a.read_bcr(ioaddr, 32) | 0x0080);
2169                         /* disable Auto Negotiation, set 10Mpbs, HD */
2170                         val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
2171                         if (lp->options & PCNET32_PORT_FD)
2172                                 val |= 0x10;
2173                         if (lp->options & PCNET32_PORT_100)
2174                                 val |= 0x08;
2175                         lp->a.write_bcr(ioaddr, 32, val);
2176                 } else {
2177                         if (lp->options & PCNET32_PORT_ASEL) {
2178                                 lp->a.write_bcr(ioaddr, 32,
2179                                                 lp->a.read_bcr(ioaddr,
2180                                                                32) | 0x0080);
2181                                 /* enable auto negotiate, setup, disable fd */
2182                                 val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
2183                                 val |= 0x20;
2184                                 lp->a.write_bcr(ioaddr, 32, val);
2185                         }
2186                 }
2187         } else {
2188                 int first_phy = -1;
2189                 u16 bmcr;
2190                 u32 bcr9;
2191                 struct ethtool_cmd ecmd;
2192
2193                 /*
2194                  * There is really no good other way to handle multiple PHYs
2195                  * other than turning off all automatics
2196                  */
2197                 val = lp->a.read_bcr(ioaddr, 2);
2198                 lp->a.write_bcr(ioaddr, 2, val & ~2);
2199                 val = lp->a.read_bcr(ioaddr, 32);
2200                 lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7));   /* stop MII manager */
2201
2202                 if (!(lp->options & PCNET32_PORT_ASEL)) {
2203                         /* setup ecmd */
2204                         ecmd.port = PORT_MII;
2205                         ecmd.transceiver = XCVR_INTERNAL;
2206                         ecmd.autoneg = AUTONEG_DISABLE;
2207                         ecmd.speed =
2208                             lp->
2209                             options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
2210                         bcr9 = lp->a.read_bcr(ioaddr, 9);
2211
2212                         if (lp->options & PCNET32_PORT_FD) {
2213                                 ecmd.duplex = DUPLEX_FULL;
2214                                 bcr9 |= (1 << 0);
2215                         } else {
2216                                 ecmd.duplex = DUPLEX_HALF;
2217                                 bcr9 |= ~(1 << 0);
2218                         }
2219                         lp->a.write_bcr(ioaddr, 9, bcr9);
2220                 }
2221
2222                 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
2223                         if (lp->phymask & (1 << i)) {
2224                                 /* isolate all but the first PHY */
2225                                 bmcr = mdio_read(dev, i, MII_BMCR);
2226                                 if (first_phy == -1) {
2227                                         first_phy = i;
2228                                         mdio_write(dev, i, MII_BMCR,
2229                                                    bmcr & ~BMCR_ISOLATE);
2230                                 } else {
2231                                         mdio_write(dev, i, MII_BMCR,
2232                                                    bmcr | BMCR_ISOLATE);
2233                                 }
2234                                 /* use mii_ethtool_sset to setup PHY */
2235                                 lp->mii_if.phy_id = i;
2236                                 ecmd.phy_address = i;
2237                                 if (lp->options & PCNET32_PORT_ASEL) {
2238                                         mii_ethtool_gset(&lp->mii_if, &ecmd);
2239                                         ecmd.autoneg = AUTONEG_ENABLE;
2240                                 }
2241                                 mii_ethtool_sset(&lp->mii_if, &ecmd);
2242                         }
2243                 }
2244                 lp->mii_if.phy_id = first_phy;
2245                 if (netif_msg_link(lp))
2246                         printk(KERN_INFO "%s: Using PHY number %d.\n",
2247                                dev->name, first_phy);
2248         }
2249
2250 #ifdef DO_DXSUFLO
2251         if (lp->dxsuflo) {      /* Disable transmit stop on underflow */
2252                 val = lp->a.read_csr(ioaddr, CSR3);
2253                 val |= 0x40;
2254                 lp->a.write_csr(ioaddr, CSR3, val);
2255         }
2256 #endif
2257
2258         lp->init_block->mode =
2259             cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
2260         pcnet32_load_multicast(dev);
2261
2262         if (pcnet32_init_ring(dev)) {
2263                 rc = -ENOMEM;
2264                 goto err_free_ring;
2265         }
2266
2267         napi_enable(&lp->napi);
2268
2269         /* Re-initialize the PCNET32, and start it when done. */
2270         lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
2271         lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
2272
2273         lp->a.write_csr(ioaddr, CSR4, 0x0915);  /* auto tx pad */
2274         lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
2275
2276         netif_start_queue(dev);
2277
2278         if (lp->chip_version >= PCNET32_79C970A) {
2279                 /* Print the link status and start the watchdog */
2280                 pcnet32_check_media(dev, 1);
2281                 mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT);
2282         }
2283
2284         i = 0;
2285         while (i++ < 100)
2286                 if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
2287                         break;
2288         /*
2289          * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
2290          * reports that doing so triggers a bug in the '974.
2291          */
2292         lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
2293
2294         if (netif_msg_ifup(lp))
2295                 printk(KERN_DEBUG
2296                        "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
2297                        dev->name, i,
2298                        (u32) (lp->init_dma_addr),
2299                        lp->a.read_csr(ioaddr, CSR0));
2300
2301         spin_unlock_irqrestore(&lp->lock, flags);
2302
2303         return 0;               /* Always succeed */
2304
2305       err_free_ring:
2306         /* free any allocated skbuffs */
2307         pcnet32_purge_rx_ring(dev);
2308
2309         /*
2310          * Switch back to 16bit mode to avoid problems with dumb
2311          * DOS packet driver after a warm reboot
2312          */
2313         lp->a.write_bcr(ioaddr, 20, 4);
2314
2315       err_free_irq:
2316         spin_unlock_irqrestore(&lp->lock, flags);
2317         free_irq(dev->irq, dev);
2318         return rc;
2319 }
2320
2321 /*
2322  * The LANCE has been halted for one reason or another (busmaster memory
2323  * arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
2324  * etc.).  Modern LANCE variants always reload their ring-buffer
2325  * configuration when restarted, so we must reinitialize our ring
2326  * context before restarting.  As part of this reinitialization,
2327  * find all packets still on the Tx ring and pretend that they had been
2328  * sent (in effect, drop the packets on the floor) - the higher-level
2329  * protocols will time out and retransmit.  It'd be better to shuffle
2330  * these skbs to a temp list and then actually re-Tx them after
2331  * restarting the chip, but I'm too lazy to do so right now.  dplatt@3do.com
2332  */
2333
2334 static void pcnet32_purge_tx_ring(struct net_device *dev)
2335 {
2336         struct pcnet32_private *lp = netdev_priv(dev);
2337         int i;
2338
2339         for (i = 0; i < lp->tx_ring_size; i++) {
2340                 lp->tx_ring[i].status = 0;      /* CPU owns buffer */
2341                 wmb();          /* Make sure adapter sees owner change */
2342                 if (lp->tx_skbuff[i]) {
2343                         pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i],
2344                                          lp->tx_skbuff[i]->len,
2345                                          PCI_DMA_TODEVICE);
2346                         dev_kfree_skb_any(lp->tx_skbuff[i]);
2347                 }
2348                 lp->tx_skbuff[i] = NULL;
2349                 lp->tx_dma_addr[i] = 0;
2350         }
2351 }
2352
2353 /* Initialize the PCNET32 Rx and Tx rings. */
2354 static int pcnet32_init_ring(struct net_device *dev)
2355 {
2356         struct pcnet32_private *lp = netdev_priv(dev);
2357         int i;
2358
2359         lp->tx_full = 0;
2360         lp->cur_rx = lp->cur_tx = 0;
2361         lp->dirty_rx = lp->dirty_tx = 0;
2362
2363         for (i = 0; i < lp->rx_ring_size; i++) {
2364                 struct sk_buff *rx_skbuff = lp->rx_skbuff[i];
2365                 if (rx_skbuff == NULL) {
2366                         if (!
2367                             (rx_skbuff = lp->rx_skbuff[i] =
2368                              dev_alloc_skb(PKT_BUF_SKB))) {
2369                                 /* there is not much, we can do at this point */
2370                                 if (netif_msg_drv(lp))
2371                                         printk(KERN_ERR
2372                                                "%s: pcnet32_init_ring dev_alloc_skb failed.\n",
2373                                                dev->name);
2374                                 return -1;
2375                         }
2376                         skb_reserve(rx_skbuff, NET_IP_ALIGN);
2377                 }
2378
2379                 rmb();
2380                 if (lp->rx_dma_addr[i] == 0)
2381                         lp->rx_dma_addr[i] =
2382                             pci_map_single(lp->pci_dev, rx_skbuff->data,
2383                                            PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
2384                 lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]);
2385                 lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE);
2386                 wmb();          /* Make sure owner changes after all others are visible */
2387                 lp->rx_ring[i].status = cpu_to_le16(0x8000);
2388         }
2389         /* The Tx buffer address is filled in as needed, but we do need to clear
2390          * the upper ownership bit. */
2391         for (i = 0; i < lp->tx_ring_size; i++) {
2392                 lp->tx_ring[i].status = 0;      /* CPU owns buffer */
2393                 wmb();          /* Make sure adapter sees owner change */
2394                 lp->tx_ring[i].base = 0;
2395                 lp->tx_dma_addr[i] = 0;
2396         }
2397
2398         lp->init_block->tlen_rlen =
2399             cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits);
2400         for (i = 0; i < 6; i++)
2401                 lp->init_block->phys_addr[i] = dev->dev_addr[i];
2402         lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr);
2403         lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr);
2404         wmb();                  /* Make sure all changes are visible */
2405         return 0;
2406 }
2407
2408 /* the pcnet32 has been issued a stop or reset.  Wait for the stop bit
2409  * then flush the pending transmit operations, re-initialize the ring,
2410  * and tell the chip to initialize.
2411  */
2412 static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
2413 {
2414         struct pcnet32_private *lp = netdev_priv(dev);
2415         unsigned long ioaddr = dev->base_addr;
2416         int i;
2417
2418         /* wait for stop */
2419         for (i = 0; i < 100; i++)
2420                 if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
2421                         break;
2422
2423         if (i >= 100 && netif_msg_drv(lp))
2424                 printk(KERN_ERR
2425                        "%s: pcnet32_restart timed out waiting for stop.\n",
2426                        dev->name);
2427
2428         pcnet32_purge_tx_ring(dev);
2429         if (pcnet32_init_ring(dev))
2430                 return;
2431
2432         /* ReInit Ring */
2433         lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
2434         i = 0;
2435         while (i++ < 1000)
2436                 if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
2437                         break;
2438
2439         lp->a.write_csr(ioaddr, CSR0, csr0_bits);
2440 }
2441
2442 static void pcnet32_tx_timeout(struct net_device *dev)
2443 {
2444         struct pcnet32_private *lp = netdev_priv(dev);
2445         unsigned long ioaddr = dev->base_addr, flags;
2446
2447         spin_lock_irqsave(&lp->lock, flags);
2448         /* Transmitter timeout, serious problems. */
2449         if (pcnet32_debug & NETIF_MSG_DRV)
2450                 printk(KERN_ERR
2451                        "%s: transmit timed out, status %4.4x, resetting.\n",
2452                        dev->name, lp->a.read_csr(ioaddr, CSR0));
2453         lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
2454         dev->stats.tx_errors++;
2455         if (netif_msg_tx_err(lp)) {
2456                 int i;
2457                 printk(KERN_DEBUG
2458                        " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
2459                        lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
2460                        lp->cur_rx);
2461                 for (i = 0; i < lp->rx_ring_size; i++)
2462                         printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
2463                                le32_to_cpu(lp->rx_ring[i].base),
2464                                (-le16_to_cpu(lp->rx_ring[i].buf_length)) &
2465                                0xffff, le32_to_cpu(lp->rx_ring[i].msg_length),
2466                                le16_to_cpu(lp->rx_ring[i].status));
2467                 for (i = 0; i < lp->tx_ring_size; i++)
2468                         printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
2469                                le32_to_cpu(lp->tx_ring[i].base),
2470                                (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff,
2471                                le32_to_cpu(lp->tx_ring[i].misc),
2472                                le16_to_cpu(lp->tx_ring[i].status));
2473                 printk("\n");
2474         }
2475         pcnet32_restart(dev, CSR0_NORMAL);
2476
2477         dev->trans_start = jiffies;
2478         netif_wake_queue(dev);
2479
2480         spin_unlock_irqrestore(&lp->lock, flags);
2481 }
2482
2483 static int pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev)
2484 {
2485         struct pcnet32_private *lp = netdev_priv(dev);
2486         unsigned long ioaddr = dev->base_addr;
2487         u16 status;
2488         int entry;
2489         unsigned long flags;
2490
2491         spin_lock_irqsave(&lp->lock, flags);
2492
2493         if (netif_msg_tx_queued(lp)) {
2494                 printk(KERN_DEBUG
2495                        "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
2496                        dev->name, lp->a.read_csr(ioaddr, CSR0));
2497         }
2498
2499         /* Default status -- will not enable Successful-TxDone
2500          * interrupt when that option is available to us.
2501          */
2502         status = 0x8300;
2503
2504         /* Fill in a Tx ring entry */
2505
2506         /* Mask to ring buffer boundary. */
2507         entry = lp->cur_tx & lp->tx_mod_mask;
2508
2509         /* Caution: the write order is important here, set the status
2510          * with the "ownership" bits last. */
2511
2512         lp->tx_ring[entry].length = cpu_to_le16(-skb->len);
2513
2514         lp->tx_ring[entry].misc = 0x00000000;
2515
2516         lp->tx_skbuff[entry] = skb;
2517         lp->tx_dma_addr[entry] =
2518             pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2519         lp->tx_ring[entry].base = cpu_to_le32(lp->tx_dma_addr[entry]);
2520         wmb();                  /* Make sure owner changes after all others are visible */
2521         lp->tx_ring[entry].status = cpu_to_le16(status);
2522
2523         lp->cur_tx++;
2524         dev->stats.tx_bytes += skb->len;
2525
2526         /* Trigger an immediate send poll. */
2527         lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
2528
2529         dev->trans_start = jiffies;
2530
2531         if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
2532                 lp->tx_full = 1;
2533                 netif_stop_queue(dev);
2534         }
2535         spin_unlock_irqrestore(&lp->lock, flags);
2536         return 0;
2537 }
2538
2539 /* The PCNET32 interrupt handler. */
2540 static irqreturn_t
2541 pcnet32_interrupt(int irq, void *dev_id)
2542 {
2543         struct net_device *dev = dev_id;
2544         struct pcnet32_private *lp;
2545         unsigned long ioaddr;
2546         u16 csr0;
2547         int boguscnt = max_interrupt_work;
2548
2549         ioaddr = dev->base_addr;
2550         lp = netdev_priv(dev);
2551
2552         spin_lock(&lp->lock);
2553
2554         csr0 = lp->a.read_csr(ioaddr, CSR0);
2555         while ((csr0 & 0x8f00) && --boguscnt >= 0) {
2556                 if (csr0 == 0xffff) {
2557                         break;  /* PCMCIA remove happened */
2558                 }
2559                 /* Acknowledge all of the current interrupt sources ASAP. */
2560                 lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
2561
2562                 if (netif_msg_intr(lp))
2563                         printk(KERN_DEBUG
2564                                "%s: interrupt  csr0=%#2.2x new csr=%#2.2x.\n",
2565                                dev->name, csr0, lp->a.read_csr(ioaddr, CSR0));
2566
2567                 /* Log misc errors. */
2568                 if (csr0 & 0x4000)
2569                         dev->stats.tx_errors++; /* Tx babble. */
2570                 if (csr0 & 0x1000) {
2571                         /*
2572                          * This happens when our receive ring is full. This
2573                          * shouldn't be a problem as we will see normal rx
2574                          * interrupts for the frames in the receive ring.  But
2575                          * there are some PCI chipsets (I can reproduce this
2576                          * on SP3G with Intel saturn chipset) which have
2577                          * sometimes problems and will fill up the receive
2578                          * ring with error descriptors.  In this situation we
2579                          * don't get a rx interrupt, but a missed frame
2580                          * interrupt sooner or later.
2581                          */
2582                         dev->stats.rx_errors++; /* Missed a Rx frame. */
2583                 }
2584                 if (csr0 & 0x0800) {
2585                         if (netif_msg_drv(lp))
2586                                 printk(KERN_ERR
2587                                        "%s: Bus master arbitration failure, status %4.4x.\n",
2588                                        dev->name, csr0);
2589                         /* unlike for the lance, there is no restart needed */
2590                 }
2591                 if (netif_rx_schedule_prep(dev, &lp->napi)) {
2592                         u16 val;
2593                         /* set interrupt masks */
2594                         val = lp->a.read_csr(ioaddr, CSR3);
2595                         val |= 0x5f00;
2596                         lp->a.write_csr(ioaddr, CSR3, val);
2597                         mmiowb();
2598                         __netif_rx_schedule(dev, &lp->napi);
2599                         break;
2600                 }
2601                 csr0 = lp->a.read_csr(ioaddr, CSR0);
2602         }
2603
2604         if (netif_msg_intr(lp))
2605                 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
2606                        dev->name, lp->a.read_csr(ioaddr, CSR0));
2607
2608         spin_unlock(&lp->lock);
2609
2610         return IRQ_HANDLED;
2611 }
2612
2613 static int pcnet32_close(struct net_device *dev)
2614 {
2615         unsigned long ioaddr = dev->base_addr;
2616         struct pcnet32_private *lp = netdev_priv(dev);
2617         unsigned long flags;
2618
2619         del_timer_sync(&lp->watchdog_timer);
2620
2621         netif_stop_queue(dev);
2622         napi_disable(&lp->napi);
2623
2624         spin_lock_irqsave(&lp->lock, flags);
2625
2626         dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
2627
2628         if (netif_msg_ifdown(lp))
2629                 printk(KERN_DEBUG
2630                        "%s: Shutting down ethercard, status was %2.2x.\n",
2631                        dev->name, lp->a.read_csr(ioaddr, CSR0));
2632
2633         /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
2634         lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
2635
2636         /*
2637          * Switch back to 16bit mode to avoid problems with dumb
2638          * DOS packet driver after a warm reboot
2639          */
2640         lp->a.write_bcr(ioaddr, 20, 4);
2641
2642         spin_unlock_irqrestore(&lp->lock, flags);
2643
2644         free_irq(dev->irq, dev);
2645
2646         spin_lock_irqsave(&lp->lock, flags);
2647
2648         pcnet32_purge_rx_ring(dev);
2649         pcnet32_purge_tx_ring(dev);
2650
2651         spin_unlock_irqrestore(&lp->lock, flags);
2652
2653         return 0;
2654 }
2655
2656 static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
2657 {
2658         struct pcnet32_private *lp = netdev_priv(dev);
2659         unsigned long ioaddr = dev->base_addr;
2660         unsigned long flags;
2661
2662         spin_lock_irqsave(&lp->lock, flags);
2663         dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
2664         spin_unlock_irqrestore(&lp->lock, flags);
2665
2666         return &dev->stats;
2667 }
2668
2669 /* taken from the sunlance driver, which it took from the depca driver */
2670 static void pcnet32_load_multicast(struct net_device *dev)
2671 {
2672         struct pcnet32_private *lp = netdev_priv(dev);
2673         volatile struct pcnet32_init_block *ib = lp->init_block;
2674         volatile __le16 *mcast_table = (__le16 *)ib->filter;
2675         struct dev_mc_list *dmi = dev->mc_list;
2676         unsigned long ioaddr = dev->base_addr;
2677         char *addrs;
2678         int i;
2679         u32 crc;
2680
2681         /* set all multicast bits */
2682         if (dev->flags & IFF_ALLMULTI) {
2683                 ib->filter[0] = cpu_to_le32(~0U);
2684                 ib->filter[1] = cpu_to_le32(~0U);
2685                 lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
2686                 lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
2687                 lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
2688                 lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
2689                 return;
2690         }
2691         /* clear the multicast filter */
2692         ib->filter[0] = 0;
2693         ib->filter[1] = 0;
2694
2695         /* Add addresses */
2696         for (i = 0; i < dev->mc_count; i++) {
2697                 addrs = dmi->dmi_addr;
2698                 dmi = dmi->next;
2699
2700                 /* multicast address? */
2701                 if (!(*addrs & 1))
2702                         continue;
2703
2704                 crc = ether_crc_le(6, addrs);
2705                 crc = crc >> 26;
2706                 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
2707         }
2708         for (i = 0; i < 4; i++)
2709                 lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
2710                                 le16_to_cpu(mcast_table[i]));
2711         return;
2712 }
2713
2714 /*
2715  * Set or clear the multicast filter for this adaptor.
2716  */
2717 static void pcnet32_set_multicast_list(struct net_device *dev)
2718 {
2719         unsigned long ioaddr = dev->base_addr, flags;
2720         struct pcnet32_private *lp = netdev_priv(dev);
2721         int csr15, suspended;
2722
2723         spin_lock_irqsave(&lp->lock, flags);
2724         suspended = pcnet32_suspend(dev, &flags, 0);
2725         csr15 = lp->a.read_csr(ioaddr, CSR15);
2726         if (dev->flags & IFF_PROMISC) {
2727                 /* Log any net taps. */
2728                 if (netif_msg_hw(lp))
2729                         printk(KERN_INFO "%s: Promiscuous mode enabled.\n",
2730                                dev->name);
2731                 lp->init_block->mode =
2732                     cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
2733                                 7);
2734                 lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
2735         } else {
2736                 lp->init_block->mode =
2737                     cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
2738                 lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
2739                 pcnet32_load_multicast(dev);
2740         }
2741
2742         if (suspended) {
2743                 int csr5;
2744                 /* clear SUSPEND (SPND) - CSR5 bit 0 */
2745                 csr5 = lp->a.read_csr(ioaddr, CSR5);
2746                 lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
2747         } else {
2748                 lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
2749                 pcnet32_restart(dev, CSR0_NORMAL);
2750                 netif_wake_queue(dev);
2751         }
2752
2753         spin_unlock_irqrestore(&lp->lock, flags);
2754 }
2755
2756 /* This routine assumes that the lp->lock is held */
2757 static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
2758 {
2759         struct pcnet32_private *lp = netdev_priv(dev);
2760         unsigned long ioaddr = dev->base_addr;
2761         u16 val_out;
2762
2763         if (!lp->mii)
2764                 return 0;
2765
2766         lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
2767         val_out = lp->a.read_bcr(ioaddr, 34);
2768
2769         return val_out;
2770 }
2771
2772 /* This routine assumes that the lp->lock is held */
2773 static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
2774 {
2775         struct pcnet32_private *lp = netdev_priv(dev);
2776         unsigned long ioaddr = dev->base_addr;
2777
2778         if (!lp->mii)
2779                 return;
2780
2781         lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
2782         lp->a.write_bcr(ioaddr, 34, val);
2783 }
2784
2785 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2786 {
2787         struct pcnet32_private *lp = netdev_priv(dev);
2788         int rc;
2789         unsigned long flags;
2790
2791         /* SIOC[GS]MIIxxx ioctls */
2792         if (lp->mii) {
2793                 spin_lock_irqsave(&lp->lock, flags);
2794                 rc = generic_mii_ioctl(&lp->mii_if, if_mii(rq), cmd, NULL);
2795                 spin_unlock_irqrestore(&lp->lock, flags);
2796         } else {
2797                 rc = -EOPNOTSUPP;
2798         }
2799
2800         return rc;
2801 }
2802
2803 static int pcnet32_check_otherphy(struct net_device *dev)
2804 {
2805         struct pcnet32_private *lp = netdev_priv(dev);
2806         struct mii_if_info mii = lp->mii_if;
2807         u16 bmcr;
2808         int i;
2809
2810         for (i = 0; i < PCNET32_MAX_PHYS; i++) {
2811                 if (i == lp->mii_if.phy_id)
2812                         continue;       /* skip active phy */
2813                 if (lp->phymask & (1 << i)) {
2814                         mii.phy_id = i;
2815                         if (mii_link_ok(&mii)) {
2816                                 /* found PHY with active link */
2817                                 if (netif_msg_link(lp))
2818                                         printk(KERN_INFO
2819                                                "%s: Using PHY number %d.\n",
2820                                                dev->name, i);
2821
2822                                 /* isolate inactive phy */
2823                                 bmcr =
2824                                     mdio_read(dev, lp->mii_if.phy_id, MII_BMCR);
2825                                 mdio_write(dev, lp->mii_if.phy_id, MII_BMCR,
2826                                            bmcr | BMCR_ISOLATE);
2827
2828                                 /* de-isolate new phy */
2829                                 bmcr = mdio_read(dev, i, MII_BMCR);
2830                                 mdio_write(dev, i, MII_BMCR,
2831                                            bmcr & ~BMCR_ISOLATE);
2832
2833                                 /* set new phy address */
2834                                 lp->mii_if.phy_id = i;
2835                                 return 1;
2836                         }
2837                 }
2838         }
2839         return 0;
2840 }
2841
2842 /*
2843  * Show the status of the media.  Similar to mii_check_media however it
2844  * correctly shows the link speed for all (tested) pcnet32 variants.
2845  * Devices with no mii just report link state without speed.
2846  *
2847  * Caller is assumed to hold and release the lp->lock.
2848  */
2849
2850 static void pcnet32_check_media(struct net_device *dev, int verbose)
2851 {
2852         struct pcnet32_private *lp = netdev_priv(dev);
2853         int curr_link;
2854         int prev_link = netif_carrier_ok(dev) ? 1 : 0;
2855         u32 bcr9;
2856
2857         if (lp->mii) {
2858                 curr_link = mii_link_ok(&lp->mii_if);
2859         } else {
2860                 ulong ioaddr = dev->base_addr;  /* card base I/O address */
2861                 curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
2862         }
2863         if (!curr_link) {
2864                 if (prev_link || verbose) {
2865                         netif_carrier_off(dev);
2866                         if (netif_msg_link(lp))
2867                                 printk(KERN_INFO "%s: link down\n", dev->name);
2868                 }
2869                 if (lp->phycount > 1) {
2870                         curr_link = pcnet32_check_otherphy(dev);
2871                         prev_link = 0;
2872                 }
2873         } else if (verbose || !prev_link) {
2874                 netif_carrier_on(dev);
2875                 if (lp->mii) {
2876                         if (netif_msg_link(lp)) {
2877                                 struct ethtool_cmd ecmd;
2878                                 mii_ethtool_gset(&lp->mii_if, &ecmd);
2879                                 printk(KERN_INFO
2880                                        "%s: link up, %sMbps, %s-duplex\n",
2881                                        dev->name,
2882                                        (ecmd.speed == SPEED_100) ? "100" : "10",
2883                                        (ecmd.duplex ==
2884                                         DUPLEX_FULL) ? "full" : "half");
2885                         }
2886                         bcr9 = lp->a.read_bcr(dev->base_addr, 9);
2887                         if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
2888                                 if (lp->mii_if.full_duplex)
2889                                         bcr9 |= (1 << 0);
2890                                 else
2891                                         bcr9 &= ~(1 << 0);
2892                                 lp->a.write_bcr(dev->base_addr, 9, bcr9);
2893                         }
2894                 } else {
2895                         if (netif_msg_link(lp))
2896                                 printk(KERN_INFO "%s: link up\n", dev->name);
2897                 }
2898         }
2899 }
2900
2901 /*
2902  * Check for loss of link and link establishment.
2903  * Can not use mii_check_media because it does nothing if mode is forced.
2904  */
2905
2906 static void pcnet32_watchdog(struct net_device *dev)
2907 {
2908         struct pcnet32_private *lp = netdev_priv(dev);
2909         unsigned long flags;
2910
2911         /* Print the link status if it has changed */
2912         spin_lock_irqsave(&lp->lock, flags);
2913         pcnet32_check_media(dev, 0);
2914         spin_unlock_irqrestore(&lp->lock, flags);
2915
2916         mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT);
2917 }
2918
2919 static int pcnet32_pm_suspend(struct pci_dev *pdev, pm_message_t state)
2920 {
2921         struct net_device *dev = pci_get_drvdata(pdev);
2922
2923         if (netif_running(dev)) {
2924                 netif_device_detach(dev);
2925                 pcnet32_close(dev);
2926         }
2927         pci_save_state(pdev);
2928         pci_set_power_state(pdev, pci_choose_state(pdev, state));
2929         return 0;
2930 }
2931
2932 static int pcnet32_pm_resume(struct pci_dev *pdev)
2933 {
2934         struct net_device *dev = pci_get_drvdata(pdev);
2935
2936         pci_set_power_state(pdev, PCI_D0);
2937         pci_restore_state(pdev);
2938
2939         if (netif_running(dev)) {
2940                 pcnet32_open(dev);
2941                 netif_device_attach(dev);
2942         }
2943         return 0;
2944 }
2945
2946 static void __devexit pcnet32_remove_one(struct pci_dev *pdev)
2947 {
2948         struct net_device *dev = pci_get_drvdata(pdev);
2949
2950         if (dev) {
2951                 struct pcnet32_private *lp = netdev_priv(dev);
2952
2953                 unregister_netdev(dev);
2954                 pcnet32_free_ring(dev);
2955                 release_region(dev->base_addr, PCNET32_TOTAL_SIZE);
2956                 pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block),
2957                                     lp->init_block, lp->init_dma_addr);
2958                 free_netdev(dev);
2959                 pci_disable_device(pdev);
2960                 pci_set_drvdata(pdev, NULL);
2961         }
2962 }
2963
2964 static struct pci_driver pcnet32_driver = {
2965         .name = DRV_NAME,
2966         .probe = pcnet32_probe_pci,
2967         .remove = __devexit_p(pcnet32_remove_one),
2968         .id_table = pcnet32_pci_tbl,
2969         .suspend = pcnet32_pm_suspend,
2970         .resume = pcnet32_pm_resume,
2971 };
2972
2973 /* An additional parameter that may be passed in... */
2974 static int debug = -1;
2975 static int tx_start_pt = -1;
2976 static int pcnet32_have_pci;
2977
2978 module_param(debug, int, 0);
2979 MODULE_PARM_DESC(debug, DRV_NAME " debug level");
2980 module_param(max_interrupt_work, int, 0);
2981 MODULE_PARM_DESC(max_interrupt_work,
2982                  DRV_NAME " maximum events handled per interrupt");
2983 module_param(rx_copybreak, int, 0);
2984 MODULE_PARM_DESC(rx_copybreak,
2985                  DRV_NAME " copy breakpoint for copy-only-tiny-frames");
2986 module_param(tx_start_pt, int, 0);
2987 MODULE_PARM_DESC(tx_start_pt, DRV_NAME " transmit start point (0-3)");
2988 module_param(pcnet32vlb, int, 0);
2989 MODULE_PARM_DESC(pcnet32vlb, DRV_NAME " Vesa local bus (VLB) support (0/1)");
2990 module_param_array(options, int, NULL, 0);
2991 MODULE_PARM_DESC(options, DRV_NAME " initial option setting(s) (0-15)");
2992 module_param_array(full_duplex, int, NULL, 0);
2993 MODULE_PARM_DESC(full_duplex, DRV_NAME " full duplex setting(s) (1)");
2994 /* Module Parameter for HomePNA cards added by Patrick Simmons, 2004 */
2995 module_param_array(homepna, int, NULL, 0);
2996 MODULE_PARM_DESC(homepna,
2997                  DRV_NAME
2998                  " mode for 79C978 cards (1 for HomePNA, 0 for Ethernet, default Ethernet");
2999
3000 MODULE_AUTHOR("Thomas Bogendoerfer");
3001 MODULE_DESCRIPTION("Driver for PCnet32 and PCnetPCI based ethercards");
3002 MODULE_LICENSE("GPL");
3003
3004 #define PCNET32_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
3005
3006 static int __init pcnet32_init_module(void)
3007 {
3008         printk(KERN_INFO "%s", version);
3009
3010         pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT);
3011
3012         if ((tx_start_pt >= 0) && (tx_start_pt <= 3))
3013                 tx_start = tx_start_pt;
3014
3015         /* find the PCI devices */
3016         if (!pci_register_driver(&pcnet32_driver))
3017                 pcnet32_have_pci = 1;
3018
3019         /* should we find any remaining VLbus devices ? */
3020         if (pcnet32vlb)
3021                 pcnet32_probe_vlbus(pcnet32_portlist);
3022
3023         if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE))
3024                 printk(KERN_INFO PFX "%d cards_found.\n", cards_found);
3025
3026         return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV;
3027 }
3028
3029 static void __exit pcnet32_cleanup_module(void)
3030 {
3031         struct net_device *next_dev;
3032
3033         while (pcnet32_dev) {
3034                 struct pcnet32_private *lp = netdev_priv(pcnet32_dev);
3035                 next_dev = lp->next;
3036                 unregister_netdev(pcnet32_dev);
3037                 pcnet32_free_ring(pcnet32_dev);
3038                 release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE);
3039                 pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block),
3040                                     lp->init_block, lp->init_dma_addr);
3041                 free_netdev(pcnet32_dev);
3042                 pcnet32_dev = next_dev;
3043         }
3044
3045         if (pcnet32_have_pci)
3046                 pci_unregister_driver(&pcnet32_driver);
3047 }
3048
3049 module_init(pcnet32_init_module);
3050 module_exit(pcnet32_cleanup_module);
3051
3052 /*
3053  * Local variables:
3054  *  c-indent-level: 4
3055  *  tab-width: 8
3056  * End:
3057  */