[PATCH] forcedeth: Poll for link changes
[linux-2.6] / drivers / net / forcedeth.c
1 /*
2  * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
3  *
4  * Note: This driver is a cleanroom reimplementation based on reverse
5  *      engineered documentation written by Carl-Daniel Hailfinger
6  *      and Andrew de Quincey. It's neither supported nor endorsed
7  *      by NVIDIA Corp. Use at your own risk.
8  *
9  * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
10  * trademarks of NVIDIA Corporation in the United States and other
11  * countries.
12  *
13  * Copyright (C) 2003,4 Manfred Spraul
14  * Copyright (C) 2004 Andrew de Quincey (wol support)
15  * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
16  *              IRQ rate fixes, bigendian fixes, cleanups, verification)
17  * Copyright (c) 2004 NVIDIA Corporation
18  *
19  * This program is free software; you can redistribute it and/or modify
20  * it under the terms of the GNU General Public License as published by
21  * the Free Software Foundation; either version 2 of the License, or
22  * (at your option) any later version.
23  *
24  * This program is distributed in the hope that it will be useful,
25  * but WITHOUT ANY WARRANTY; without even the implied warranty of
26  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
27  * GNU General Public License for more details.
28  *
29  * You should have received a copy of the GNU General Public License
30  * along with this program; if not, write to the Free Software
31  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
32  *
33  * Changelog:
34  *      0.01: 05 Oct 2003: First release that compiles without warnings.
35  *      0.02: 05 Oct 2003: Fix bug for nv_drain_tx: do not try to free NULL skbs.
36  *                         Check all PCI BARs for the register window.
37  *                         udelay added to mii_rw.
38  *      0.03: 06 Oct 2003: Initialize dev->irq.
39  *      0.04: 07 Oct 2003: Initialize np->lock, reduce handled irqs, add printks.
40  *      0.05: 09 Oct 2003: printk removed again, irq status print tx_timeout.
41  *      0.06: 10 Oct 2003: MAC Address read updated, pff flag generation updated,
42  *                         irq mask updated
43  *      0.07: 14 Oct 2003: Further irq mask updates.
44  *      0.08: 20 Oct 2003: rx_desc.Length initialization added, nv_alloc_rx refill
45  *                         added into irq handler, NULL check for drain_ring.
46  *      0.09: 20 Oct 2003: Basic link speed irq implementation. Only handle the
47  *                         requested interrupt sources.
48  *      0.10: 20 Oct 2003: First cleanup for release.
49  *      0.11: 21 Oct 2003: hexdump for tx added, rx buffer sizes increased.
50  *                         MAC Address init fix, set_multicast cleanup.
51  *      0.12: 23 Oct 2003: Cleanups for release.
52  *      0.13: 25 Oct 2003: Limit for concurrent tx packets increased to 10.
53  *                         Set link speed correctly. start rx before starting
54  *                         tx (nv_start_rx sets the link speed).
55  *      0.14: 25 Oct 2003: Nic dependant irq mask.
56  *      0.15: 08 Nov 2003: fix smp deadlock with set_multicast_list during
57  *                         open.
58  *      0.16: 15 Nov 2003: include file cleanup for ppc64, rx buffer size
59  *                         increased to 1628 bytes.
60  *      0.17: 16 Nov 2003: undo rx buffer size increase. Substract 1 from
61  *                         the tx length.
62  *      0.18: 17 Nov 2003: fix oops due to late initialization of dev_stats
63  *      0.19: 29 Nov 2003: Handle RxNoBuf, detect & handle invalid mac
64  *                         addresses, really stop rx if already running
65  *                         in nv_start_rx, clean up a bit.
66  *      0.20: 07 Dec 2003: alloc fixes
67  *      0.21: 12 Jan 2004: additional alloc fix, nic polling fix.
68  *      0.22: 19 Jan 2004: reprogram timer to a sane rate, avoid lockup
69  *                         on close.
70  *      0.23: 26 Jan 2004: various small cleanups
71  *      0.24: 27 Feb 2004: make driver even less anonymous in backtraces
72  *      0.25: 09 Mar 2004: wol support
73  *      0.26: 03 Jun 2004: netdriver specific annotation, sparse-related fixes
74  *      0.27: 19 Jun 2004: Gigabit support, new descriptor rings,
75  *                         added CK804/MCP04 device IDs, code fixes
76  *                         for registers, link status and other minor fixes.
77  *      0.28: 21 Jun 2004: Big cleanup, making driver mostly endian safe
78  *      0.29: 31 Aug 2004: Add backup timer for link change notification.
79  *      0.30: 25 Sep 2004: rx checksum support for nf 250 Gb. Add rx reset
80  *                         into nv_close, otherwise reenabling for wol can
81  *                         cause DMA to kfree'd memory.
82  *      0.31: 14 Nov 2004: ethtool support for getting/setting link
83  *                         capabilities.
84  *      0.32: 16 Apr 2005: RX_ERROR4 handling added.
85  *      0.33: 16 May 2005: Support for MCP51 added.
86  *      0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics.
87  *
88  * Known bugs:
89  * We suspect that on some hardware no TX done interrupts are generated.
90  * This means recovery from netif_stop_queue only happens if the hw timer
91  * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
92  * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
93  * If your hardware reliably generates tx done interrupts, then you can remove
94  * DEV_NEED_TIMERIRQ from the driver_data flags.
95  * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
96  * superfluous timer interrupts from the nic.
97  */
98 #define FORCEDETH_VERSION               "0.34"
99 #define DRV_NAME                        "forcedeth"
100
101 #include <linux/module.h>
102 #include <linux/types.h>
103 #include <linux/pci.h>
104 #include <linux/interrupt.h>
105 #include <linux/netdevice.h>
106 #include <linux/etherdevice.h>
107 #include <linux/delay.h>
108 #include <linux/spinlock.h>
109 #include <linux/ethtool.h>
110 #include <linux/timer.h>
111 #include <linux/skbuff.h>
112 #include <linux/mii.h>
113 #include <linux/random.h>
114 #include <linux/init.h>
115 #include <linux/if_vlan.h>
116
117 #include <asm/irq.h>
118 #include <asm/io.h>
119 #include <asm/uaccess.h>
120 #include <asm/system.h>
121
122 #if 0
123 #define dprintk                 printk
124 #else
125 #define dprintk(x...)           do { } while (0)
126 #endif
127
128
129 /*
130  * Hardware access:
131  */
132
133 #define DEV_NEED_LASTPACKET1    0x0001  /* set LASTPACKET1 in tx flags */
134 #define DEV_IRQMASK_1           0x0002  /* use NVREG_IRQMASK_WANTED_1 for irq mask */
135 #define DEV_IRQMASK_2           0x0004  /* use NVREG_IRQMASK_WANTED_2 for irq mask */
136 #define DEV_NEED_TIMERIRQ       0x0008  /* set the timer irq flag in the irq mask */
137 #define DEV_NEED_LINKTIMER      0x0010  /* poll link settings. Relies on the timer irq */
138
139 enum {
140         NvRegIrqStatus = 0x000,
141 #define NVREG_IRQSTAT_MIIEVENT  0x040
142 #define NVREG_IRQSTAT_MASK              0x1ff
143         NvRegIrqMask = 0x004,
144 #define NVREG_IRQ_RX_ERROR              0x0001
145 #define NVREG_IRQ_RX                    0x0002
146 #define NVREG_IRQ_RX_NOBUF              0x0004
147 #define NVREG_IRQ_TX_ERR                0x0008
148 #define NVREG_IRQ_TX2                   0x0010
149 #define NVREG_IRQ_TIMER                 0x0020
150 #define NVREG_IRQ_LINK                  0x0040
151 #define NVREG_IRQ_TX1                   0x0100
152 #define NVREG_IRQMASK_WANTED_1          0x005f
153 #define NVREG_IRQMASK_WANTED_2          0x0147
154 #define NVREG_IRQ_UNKNOWN               (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR|NVREG_IRQ_TX2|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_TX1))
155
156         NvRegUnknownSetupReg6 = 0x008,
157 #define NVREG_UNKSETUP6_VAL             3
158
159 /*
160  * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
161  * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
162  */
163         NvRegPollingInterval = 0x00c,
164 #define NVREG_POLL_DEFAULT      970
165         NvRegMisc1 = 0x080,
166 #define NVREG_MISC1_HD          0x02
167 #define NVREG_MISC1_FORCE       0x3b0f3c
168
169         NvRegTransmitterControl = 0x084,
170 #define NVREG_XMITCTL_START     0x01
171         NvRegTransmitterStatus = 0x088,
172 #define NVREG_XMITSTAT_BUSY     0x01
173
174         NvRegPacketFilterFlags = 0x8c,
175 #define NVREG_PFF_ALWAYS        0x7F0008
176 #define NVREG_PFF_PROMISC       0x80
177 #define NVREG_PFF_MYADDR        0x20
178
179         NvRegOffloadConfig = 0x90,
180 #define NVREG_OFFLOAD_HOMEPHY   0x601
181 #define NVREG_OFFLOAD_NORMAL    RX_NIC_BUFSIZE
182         NvRegReceiverControl = 0x094,
183 #define NVREG_RCVCTL_START      0x01
184         NvRegReceiverStatus = 0x98,
185 #define NVREG_RCVSTAT_BUSY      0x01
186
187         NvRegRandomSeed = 0x9c,
188 #define NVREG_RNDSEED_MASK      0x00ff
189 #define NVREG_RNDSEED_FORCE     0x7f00
190 #define NVREG_RNDSEED_FORCE2    0x2d00
191 #define NVREG_RNDSEED_FORCE3    0x7400
192
193         NvRegUnknownSetupReg1 = 0xA0,
194 #define NVREG_UNKSETUP1_VAL     0x16070f
195         NvRegUnknownSetupReg2 = 0xA4,
196 #define NVREG_UNKSETUP2_VAL     0x16
197         NvRegMacAddrA = 0xA8,
198         NvRegMacAddrB = 0xAC,
199         NvRegMulticastAddrA = 0xB0,
200 #define NVREG_MCASTADDRA_FORCE  0x01
201         NvRegMulticastAddrB = 0xB4,
202         NvRegMulticastMaskA = 0xB8,
203         NvRegMulticastMaskB = 0xBC,
204
205         NvRegPhyInterface = 0xC0,
206 #define PHY_RGMII               0x10000000
207
208         NvRegTxRingPhysAddr = 0x100,
209         NvRegRxRingPhysAddr = 0x104,
210         NvRegRingSizes = 0x108,
211 #define NVREG_RINGSZ_TXSHIFT 0
212 #define NVREG_RINGSZ_RXSHIFT 16
213         NvRegUnknownTransmitterReg = 0x10c,
214         NvRegLinkSpeed = 0x110,
215 #define NVREG_LINKSPEED_FORCE 0x10000
216 #define NVREG_LINKSPEED_10      1000
217 #define NVREG_LINKSPEED_100     100
218 #define NVREG_LINKSPEED_1000    50
219 #define NVREG_LINKSPEED_MASK    (0xFFF)
220         NvRegUnknownSetupReg5 = 0x130,
221 #define NVREG_UNKSETUP5_BIT31   (1<<31)
222         NvRegUnknownSetupReg3 = 0x13c,
223 #define NVREG_UNKSETUP3_VAL1    0x200010
224         NvRegTxRxControl = 0x144,
225 #define NVREG_TXRXCTL_KICK      0x0001
226 #define NVREG_TXRXCTL_BIT1      0x0002
227 #define NVREG_TXRXCTL_BIT2      0x0004
228 #define NVREG_TXRXCTL_IDLE      0x0008
229 #define NVREG_TXRXCTL_RESET     0x0010
230 #define NVREG_TXRXCTL_RXCHECK   0x0400
231         NvRegMIIStatus = 0x180,
232 #define NVREG_MIISTAT_ERROR             0x0001
233 #define NVREG_MIISTAT_LINKCHANGE        0x0008
234 #define NVREG_MIISTAT_MASK              0x000f
235 #define NVREG_MIISTAT_MASK2             0x000f
236         NvRegUnknownSetupReg4 = 0x184,
237 #define NVREG_UNKSETUP4_VAL     8
238
239         NvRegAdapterControl = 0x188,
240 #define NVREG_ADAPTCTL_START    0x02
241 #define NVREG_ADAPTCTL_LINKUP   0x04
242 #define NVREG_ADAPTCTL_PHYVALID 0x40000
243 #define NVREG_ADAPTCTL_RUNNING  0x100000
244 #define NVREG_ADAPTCTL_PHYSHIFT 24
245         NvRegMIISpeed = 0x18c,
246 #define NVREG_MIISPEED_BIT8     (1<<8)
247 #define NVREG_MIIDELAY  5
248         NvRegMIIControl = 0x190,
249 #define NVREG_MIICTL_INUSE      0x08000
250 #define NVREG_MIICTL_WRITE      0x00400
251 #define NVREG_MIICTL_ADDRSHIFT  5
252         NvRegMIIData = 0x194,
253         NvRegWakeUpFlags = 0x200,
254 #define NVREG_WAKEUPFLAGS_VAL           0x7770
255 #define NVREG_WAKEUPFLAGS_BUSYSHIFT     24
256 #define NVREG_WAKEUPFLAGS_ENABLESHIFT   16
257 #define NVREG_WAKEUPFLAGS_D3SHIFT       12
258 #define NVREG_WAKEUPFLAGS_D2SHIFT       8
259 #define NVREG_WAKEUPFLAGS_D1SHIFT       4
260 #define NVREG_WAKEUPFLAGS_D0SHIFT       0
261 #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT         0x01
262 #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT      0x02
263 #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE     0x04
264 #define NVREG_WAKEUPFLAGS_ENABLE        0x1111
265
266         NvRegPatternCRC = 0x204,
267         NvRegPatternMask = 0x208,
268         NvRegPowerCap = 0x268,
269 #define NVREG_POWERCAP_D3SUPP   (1<<30)
270 #define NVREG_POWERCAP_D2SUPP   (1<<26)
271 #define NVREG_POWERCAP_D1SUPP   (1<<25)
272         NvRegPowerState = 0x26c,
273 #define NVREG_POWERSTATE_POWEREDUP      0x8000
274 #define NVREG_POWERSTATE_VALID          0x0100
275 #define NVREG_POWERSTATE_MASK           0x0003
276 #define NVREG_POWERSTATE_D0             0x0000
277 #define NVREG_POWERSTATE_D1             0x0001
278 #define NVREG_POWERSTATE_D2             0x0002
279 #define NVREG_POWERSTATE_D3             0x0003
280 };
281
282 /* Big endian: should work, but is untested */
283 struct ring_desc {
284         u32 PacketBuffer;
285         u32 FlagLen;
286 };
287
288 #define FLAG_MASK_V1 0xffff0000
289 #define FLAG_MASK_V2 0xffffc000
290 #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
291 #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
292
293 #define NV_TX_LASTPACKET        (1<<16)
294 #define NV_TX_RETRYERROR        (1<<19)
295 #define NV_TX_LASTPACKET1       (1<<24)
296 #define NV_TX_DEFERRED          (1<<26)
297 #define NV_TX_CARRIERLOST       (1<<27)
298 #define NV_TX_LATECOLLISION     (1<<28)
299 #define NV_TX_UNDERFLOW         (1<<29)
300 #define NV_TX_ERROR             (1<<30)
301 #define NV_TX_VALID             (1<<31)
302
303 #define NV_TX2_LASTPACKET       (1<<29)
304 #define NV_TX2_RETRYERROR       (1<<18)
305 #define NV_TX2_LASTPACKET1      (1<<23)
306 #define NV_TX2_DEFERRED         (1<<25)
307 #define NV_TX2_CARRIERLOST      (1<<26)
308 #define NV_TX2_LATECOLLISION    (1<<27)
309 #define NV_TX2_UNDERFLOW        (1<<28)
310 /* error and valid are the same for both */
311 #define NV_TX2_ERROR            (1<<30)
312 #define NV_TX2_VALID            (1<<31)
313
314 #define NV_RX_DESCRIPTORVALID   (1<<16)
315 #define NV_RX_MISSEDFRAME       (1<<17)
316 #define NV_RX_SUBSTRACT1        (1<<18)
317 #define NV_RX_ERROR1            (1<<23)
318 #define NV_RX_ERROR2            (1<<24)
319 #define NV_RX_ERROR3            (1<<25)
320 #define NV_RX_ERROR4            (1<<26)
321 #define NV_RX_CRCERR            (1<<27)
322 #define NV_RX_OVERFLOW          (1<<28)
323 #define NV_RX_FRAMINGERR        (1<<29)
324 #define NV_RX_ERROR             (1<<30)
325 #define NV_RX_AVAIL             (1<<31)
326
327 #define NV_RX2_CHECKSUMMASK     (0x1C000000)
328 #define NV_RX2_CHECKSUMOK1      (0x10000000)
329 #define NV_RX2_CHECKSUMOK2      (0x14000000)
330 #define NV_RX2_CHECKSUMOK3      (0x18000000)
331 #define NV_RX2_DESCRIPTORVALID  (1<<29)
332 #define NV_RX2_SUBSTRACT1       (1<<25)
333 #define NV_RX2_ERROR1           (1<<18)
334 #define NV_RX2_ERROR2           (1<<19)
335 #define NV_RX2_ERROR3           (1<<20)
336 #define NV_RX2_ERROR4           (1<<21)
337 #define NV_RX2_CRCERR           (1<<22)
338 #define NV_RX2_OVERFLOW         (1<<23)
339 #define NV_RX2_FRAMINGERR       (1<<24)
340 /* error and avail are the same for both */
341 #define NV_RX2_ERROR            (1<<30)
342 #define NV_RX2_AVAIL            (1<<31)
343
344 /* Miscelaneous hardware related defines: */
345 #define NV_PCI_REGSZ            0x270
346
347 /* various timeout delays: all in usec */
348 #define NV_TXRX_RESET_DELAY     4
349 #define NV_TXSTOP_DELAY1        10
350 #define NV_TXSTOP_DELAY1MAX     500000
351 #define NV_TXSTOP_DELAY2        100
352 #define NV_RXSTOP_DELAY1        10
353 #define NV_RXSTOP_DELAY1MAX     500000
354 #define NV_RXSTOP_DELAY2        100
355 #define NV_SETUP5_DELAY         5
356 #define NV_SETUP5_DELAYMAX      50000
357 #define NV_POWERUP_DELAY        5
358 #define NV_POWERUP_DELAYMAX     5000
359 #define NV_MIIBUSY_DELAY        50
360 #define NV_MIIPHY_DELAY 10
361 #define NV_MIIPHY_DELAYMAX      10000
362
363 #define NV_WAKEUPPATTERNS       5
364 #define NV_WAKEUPMASKENTRIES    4
365
366 /* General driver defaults */
367 #define NV_WATCHDOG_TIMEO       (5*HZ)
368
369 #define RX_RING         128
370 #define TX_RING         64
371 /* 
372  * If your nic mysteriously hangs then try to reduce the limits
373  * to 1/0: It might be required to set NV_TX_LASTPACKET in the
374  * last valid ring entry. But this would be impossible to
375  * implement - probably a disassembly error.
376  */
377 #define TX_LIMIT_STOP   63
378 #define TX_LIMIT_START  62
379
380 /* rx/tx mac addr + type + vlan + align + slack*/
381 #define RX_NIC_BUFSIZE          (ETH_DATA_LEN + 64)
382 /* even more slack */
383 #define RX_ALLOC_BUFSIZE        (ETH_DATA_LEN + 128)
384
385 #define OOM_REFILL      (1+HZ/20)
386 #define POLL_WAIT       (1+HZ/100)
387 #define LINK_TIMEOUT    (3*HZ)
388
389 /* 
390  * desc_ver values:
391  * This field has two purposes:
392  * - Newer nics uses a different ring layout. The layout is selected by
393  *   comparing np->desc_ver with DESC_VER_xy.
394  * - It contains bits that are forced on when writing to NvRegTxRxControl.
395  */
396 #define DESC_VER_1      0x0
397 #define DESC_VER_2      (0x02100|NVREG_TXRXCTL_RXCHECK)
398
399 /* PHY defines */
400 #define PHY_OUI_MARVELL 0x5043
401 #define PHY_OUI_CICADA  0x03f1
402 #define PHYID1_OUI_MASK 0x03ff
403 #define PHYID1_OUI_SHFT 6
404 #define PHYID2_OUI_MASK 0xfc00
405 #define PHYID2_OUI_SHFT 10
406 #define PHY_INIT1       0x0f000
407 #define PHY_INIT2       0x0e00
408 #define PHY_INIT3       0x01000
409 #define PHY_INIT4       0x0200
410 #define PHY_INIT5       0x0004
411 #define PHY_INIT6       0x02000
412 #define PHY_GIGABIT     0x0100
413
414 #define PHY_TIMEOUT     0x1
415 #define PHY_ERROR       0x2
416
417 #define PHY_100 0x1
418 #define PHY_1000        0x2
419 #define PHY_HALF        0x100
420
421 /* FIXME: MII defines that should be added to <linux/mii.h> */
422 #define MII_1000BT_CR   0x09
423 #define MII_1000BT_SR   0x0a
424 #define ADVERTISE_1000FULL      0x0200
425 #define ADVERTISE_1000HALF      0x0100
426 #define LPA_1000FULL    0x0800
427 #define LPA_1000HALF    0x0400
428
429
430 /*
431  * SMP locking:
432  * All hardware access under dev->priv->lock, except the performance
433  * critical parts:
434  * - rx is (pseudo-) lockless: it relies on the single-threading provided
435  *      by the arch code for interrupts.
436  * - tx setup is lockless: it relies on dev->xmit_lock. Actual submission
437  *      needs dev->priv->lock :-(
438  * - set_multicast_list: preparation lockless, relies on dev->xmit_lock.
439  */
440
441 /* in dev: base, irq */
442 struct fe_priv {
443         spinlock_t lock;
444
445         /* General data:
446          * Locking: spin_lock(&np->lock); */
447         struct net_device_stats stats;
448         int in_shutdown;
449         u32 linkspeed;
450         int duplex;
451         int autoneg;
452         int fixed_mode;
453         int phyaddr;
454         int wolenabled;
455         unsigned int phy_oui;
456         u16 gigabit;
457
458         /* General data: RO fields */
459         dma_addr_t ring_addr;
460         struct pci_dev *pci_dev;
461         u32 orig_mac[2];
462         u32 irqmask;
463         u32 desc_ver;
464
465         void __iomem *base;
466
467         /* rx specific fields.
468          * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
469          */
470         struct ring_desc *rx_ring;
471         unsigned int cur_rx, refill_rx;
472         struct sk_buff *rx_skbuff[RX_RING];
473         dma_addr_t rx_dma[RX_RING];
474         unsigned int rx_buf_sz;
475         struct timer_list oom_kick;
476         struct timer_list nic_poll;
477
478         /* media detection workaround.
479          * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
480          */
481         int need_linktimer;
482         unsigned long link_timeout;
483         /*
484          * tx specific fields.
485          */
486         struct ring_desc *tx_ring;
487         unsigned int next_tx, nic_tx;
488         struct sk_buff *tx_skbuff[TX_RING];
489         dma_addr_t tx_dma[TX_RING];
490         u32 tx_flags;
491 };
492
493 /*
494  * Maximum number of loops until we assume that a bit in the irq mask
495  * is stuck. Overridable with module param.
496  */
497 static int max_interrupt_work = 5;
498
499 static inline struct fe_priv *get_nvpriv(struct net_device *dev)
500 {
501         return netdev_priv(dev);
502 }
503
504 static inline u8 __iomem *get_hwbase(struct net_device *dev)
505 {
506         return get_nvpriv(dev)->base;
507 }
508
509 static inline void pci_push(u8 __iomem *base)
510 {
511         /* force out pending posted writes */
512         readl(base);
513 }
514
515 static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
516 {
517         return le32_to_cpu(prd->FlagLen)
518                 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
519 }
520
521 static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
522                                 int delay, int delaymax, const char *msg)
523 {
524         u8 __iomem *base = get_hwbase(dev);
525
526         pci_push(base);
527         do {
528                 udelay(delay);
529                 delaymax -= delay;
530                 if (delaymax < 0) {
531                         if (msg)
532                                 printk(msg);
533                         return 1;
534                 }
535         } while ((readl(base + offset) & mask) != target);
536         return 0;
537 }
538
539 #define MII_READ        (-1)
540 /* mii_rw: read/write a register on the PHY.
541  *
542  * Caller must guarantee serialization
543  */
544 static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
545 {
546         u8 __iomem *base = get_hwbase(dev);
547         u32 reg;
548         int retval;
549
550         writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
551
552         reg = readl(base + NvRegMIIControl);
553         if (reg & NVREG_MIICTL_INUSE) {
554                 writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
555                 udelay(NV_MIIBUSY_DELAY);
556         }
557
558         reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
559         if (value != MII_READ) {
560                 writel(value, base + NvRegMIIData);
561                 reg |= NVREG_MIICTL_WRITE;
562         }
563         writel(reg, base + NvRegMIIControl);
564
565         if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
566                         NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) {
567                 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n",
568                                 dev->name, miireg, addr);
569                 retval = -1;
570         } else if (value != MII_READ) {
571                 /* it was a write operation - fewer failures are detectable */
572                 dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
573                                 dev->name, value, miireg, addr);
574                 retval = 0;
575         } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
576                 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n",
577                                 dev->name, miireg, addr);
578                 retval = -1;
579         } else {
580                 retval = readl(base + NvRegMIIData);
581                 dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
582                                 dev->name, miireg, addr, retval);
583         }
584
585         return retval;
586 }
587
588 static int phy_reset(struct net_device *dev)
589 {
590         struct fe_priv *np = get_nvpriv(dev);
591         u32 miicontrol;
592         unsigned int tries = 0;
593
594         miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
595         miicontrol |= BMCR_RESET;
596         if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) {
597                 return -1;
598         }
599
600         /* wait for 500ms */
601         msleep(500);
602
603         /* must wait till reset is deasserted */
604         while (miicontrol & BMCR_RESET) {
605                 msleep(10);
606                 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
607                 /* FIXME: 100 tries seem excessive */
608                 if (tries++ > 100)
609                         return -1;
610         }
611         return 0;
612 }
613
614 static int phy_init(struct net_device *dev)
615 {
616         struct fe_priv *np = get_nvpriv(dev);
617         u8 __iomem *base = get_hwbase(dev);
618         u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg;
619
620         /* set advertise register */
621         reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
622         reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|0x800|0x400);
623         if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
624                 printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev));
625                 return PHY_ERROR;
626         }
627
628         /* get phy interface type */
629         phyinterface = readl(base + NvRegPhyInterface);
630
631         /* see if gigabit phy */
632         mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
633         if (mii_status & PHY_GIGABIT) {
634                 np->gigabit = PHY_GIGABIT;
635                 mii_control_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
636                 mii_control_1000 &= ~ADVERTISE_1000HALF;
637                 if (phyinterface & PHY_RGMII)
638                         mii_control_1000 |= ADVERTISE_1000FULL;
639                 else
640                         mii_control_1000 &= ~ADVERTISE_1000FULL;
641
642                 if (mii_rw(dev, np->phyaddr, MII_1000BT_CR, mii_control_1000)) {
643                         printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
644                         return PHY_ERROR;
645                 }
646         }
647         else
648                 np->gigabit = 0;
649
650         /* reset the phy */
651         if (phy_reset(dev)) {
652                 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
653                 return PHY_ERROR;
654         }
655
656         /* phy vendor specific configuration */
657         if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) {
658                 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
659                 phy_reserved &= ~(PHY_INIT1 | PHY_INIT2);
660                 phy_reserved |= (PHY_INIT3 | PHY_INIT4);
661                 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) {
662                         printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
663                         return PHY_ERROR;
664                 }
665                 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
666                 phy_reserved |= PHY_INIT5;
667                 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) {
668                         printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
669                         return PHY_ERROR;
670                 }
671         }
672         if (np->phy_oui == PHY_OUI_CICADA) {
673                 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
674                 phy_reserved |= PHY_INIT6;
675                 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) {
676                         printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
677                         return PHY_ERROR;
678                 }
679         }
680
681         /* restart auto negotiation */
682         mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
683         mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
684         if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
685                 return PHY_ERROR;
686         }
687
688         return 0;
689 }
690
691 static void nv_start_rx(struct net_device *dev)
692 {
693         struct fe_priv *np = get_nvpriv(dev);
694         u8 __iomem *base = get_hwbase(dev);
695
696         dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
697         /* Already running? Stop it. */
698         if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
699                 writel(0, base + NvRegReceiverControl);
700                 pci_push(base);
701         }
702         writel(np->linkspeed, base + NvRegLinkSpeed);
703         pci_push(base);
704         writel(NVREG_RCVCTL_START, base + NvRegReceiverControl);
705         dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
706                                 dev->name, np->duplex, np->linkspeed);
707         pci_push(base);
708 }
709
710 static void nv_stop_rx(struct net_device *dev)
711 {
712         u8 __iomem *base = get_hwbase(dev);
713
714         dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
715         writel(0, base + NvRegReceiverControl);
716         reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
717                         NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX,
718                         KERN_INFO "nv_stop_rx: ReceiverStatus remained busy");
719
720         udelay(NV_RXSTOP_DELAY2);
721         writel(0, base + NvRegLinkSpeed);
722 }
723
724 static void nv_start_tx(struct net_device *dev)
725 {
726         u8 __iomem *base = get_hwbase(dev);
727
728         dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name);
729         writel(NVREG_XMITCTL_START, base + NvRegTransmitterControl);
730         pci_push(base);
731 }
732
733 static void nv_stop_tx(struct net_device *dev)
734 {
735         u8 __iomem *base = get_hwbase(dev);
736
737         dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
738         writel(0, base + NvRegTransmitterControl);
739         reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
740                         NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX,
741                         KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");
742
743         udelay(NV_TXSTOP_DELAY2);
744         writel(0, base + NvRegUnknownTransmitterReg);
745 }
746
747 static void nv_txrx_reset(struct net_device *dev)
748 {
749         struct fe_priv *np = get_nvpriv(dev);
750         u8 __iomem *base = get_hwbase(dev);
751
752         dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
753         writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->desc_ver, base + NvRegTxRxControl);
754         pci_push(base);
755         udelay(NV_TXRX_RESET_DELAY);
756         writel(NVREG_TXRXCTL_BIT2 | np->desc_ver, base + NvRegTxRxControl);
757         pci_push(base);
758 }
759
760 /*
761  * nv_get_stats: dev->get_stats function
762  * Get latest stats value from the nic.
763  * Called with read_lock(&dev_base_lock) held for read -
764  * only synchronized against unregister_netdevice.
765  */
766 static struct net_device_stats *nv_get_stats(struct net_device *dev)
767 {
768         struct fe_priv *np = get_nvpriv(dev);
769
770         /* It seems that the nic always generates interrupts and doesn't
771          * accumulate errors internally. Thus the current values in np->stats
772          * are already up to date.
773          */
774         return &np->stats;
775 }
776
777 /*
778  * nv_alloc_rx: fill rx ring entries.
779  * Return 1 if the allocations for the skbs failed and the
780  * rx engine is without Available descriptors
781  */
782 static int nv_alloc_rx(struct net_device *dev)
783 {
784         struct fe_priv *np = get_nvpriv(dev);
785         unsigned int refill_rx = np->refill_rx;
786         int nr;
787
788         while (np->cur_rx != refill_rx) {
789                 struct sk_buff *skb;
790
791                 nr = refill_rx % RX_RING;
792                 if (np->rx_skbuff[nr] == NULL) {
793
794                         skb = dev_alloc_skb(RX_ALLOC_BUFSIZE);
795                         if (!skb)
796                                 break;
797
798                         skb->dev = dev;
799                         np->rx_skbuff[nr] = skb;
800                 } else {
801                         skb = np->rx_skbuff[nr];
802                 }
803                 np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, skb->len,
804                                                 PCI_DMA_FROMDEVICE);
805                 np->rx_ring[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]);
806                 wmb();
807                 np->rx_ring[nr].FlagLen = cpu_to_le32(RX_NIC_BUFSIZE | NV_RX_AVAIL);
808                 dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n",
809                                         dev->name, refill_rx);
810                 refill_rx++;
811         }
812         np->refill_rx = refill_rx;
813         if (np->cur_rx - refill_rx == RX_RING)
814                 return 1;
815         return 0;
816 }
817
818 static void nv_do_rx_refill(unsigned long data)
819 {
820         struct net_device *dev = (struct net_device *) data;
821         struct fe_priv *np = get_nvpriv(dev);
822
823         disable_irq(dev->irq);
824         if (nv_alloc_rx(dev)) {
825                 spin_lock(&np->lock);
826                 if (!np->in_shutdown)
827                         mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
828                 spin_unlock(&np->lock);
829         }
830         enable_irq(dev->irq);
831 }
832
833 static int nv_init_ring(struct net_device *dev)
834 {
835         struct fe_priv *np = get_nvpriv(dev);
836         int i;
837
838         np->next_tx = np->nic_tx = 0;
839         for (i = 0; i < TX_RING; i++)
840                 np->tx_ring[i].FlagLen = 0;
841
842         np->cur_rx = RX_RING;
843         np->refill_rx = 0;
844         for (i = 0; i < RX_RING; i++)
845                 np->rx_ring[i].FlagLen = 0;
846         return nv_alloc_rx(dev);
847 }
848
849 static void nv_drain_tx(struct net_device *dev)
850 {
851         struct fe_priv *np = get_nvpriv(dev);
852         int i;
853         for (i = 0; i < TX_RING; i++) {
854                 np->tx_ring[i].FlagLen = 0;
855                 if (np->tx_skbuff[i]) {
856                         pci_unmap_single(np->pci_dev, np->tx_dma[i],
857                                                 np->tx_skbuff[i]->len,
858                                                 PCI_DMA_TODEVICE);
859                         dev_kfree_skb(np->tx_skbuff[i]);
860                         np->tx_skbuff[i] = NULL;
861                         np->stats.tx_dropped++;
862                 }
863         }
864 }
865
866 static void nv_drain_rx(struct net_device *dev)
867 {
868         struct fe_priv *np = get_nvpriv(dev);
869         int i;
870         for (i = 0; i < RX_RING; i++) {
871                 np->rx_ring[i].FlagLen = 0;
872                 wmb();
873                 if (np->rx_skbuff[i]) {
874                         pci_unmap_single(np->pci_dev, np->rx_dma[i],
875                                                 np->rx_skbuff[i]->len,
876                                                 PCI_DMA_FROMDEVICE);
877                         dev_kfree_skb(np->rx_skbuff[i]);
878                         np->rx_skbuff[i] = NULL;
879                 }
880         }
881 }
882
883 static void drain_ring(struct net_device *dev)
884 {
885         nv_drain_tx(dev);
886         nv_drain_rx(dev);
887 }
888
889 /*
890  * nv_start_xmit: dev->hard_start_xmit function
891  * Called with dev->xmit_lock held.
892  */
893 static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
894 {
895         struct fe_priv *np = get_nvpriv(dev);
896         int nr = np->next_tx % TX_RING;
897
898         np->tx_skbuff[nr] = skb;
899         np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data,skb->len,
900                                         PCI_DMA_TODEVICE);
901
902         np->tx_ring[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
903
904         spin_lock_irq(&np->lock);
905         wmb();
906         np->tx_ring[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags );
907         dprintk(KERN_DEBUG "%s: nv_start_xmit: packet packet %d queued for transmission.\n",
908                                 dev->name, np->next_tx);
909         {
910                 int j;
911                 for (j=0; j<64; j++) {
912                         if ((j%16) == 0)
913                                 dprintk("\n%03x:", j);
914                         dprintk(" %02x", ((unsigned char*)skb->data)[j]);
915                 }
916                 dprintk("\n");
917         }
918
919         np->next_tx++;
920
921         dev->trans_start = jiffies;
922         if (np->next_tx - np->nic_tx >= TX_LIMIT_STOP)
923                 netif_stop_queue(dev);
924         spin_unlock_irq(&np->lock);
925         writel(NVREG_TXRXCTL_KICK|np->desc_ver, get_hwbase(dev) + NvRegTxRxControl);
926         pci_push(get_hwbase(dev));
927         return 0;
928 }
929
930 /*
931  * nv_tx_done: check for completed packets, release the skbs.
932  *
933  * Caller must own np->lock.
934  */
935 static void nv_tx_done(struct net_device *dev)
936 {
937         struct fe_priv *np = get_nvpriv(dev);
938         u32 Flags;
939         int i;
940
941         while (np->nic_tx != np->next_tx) {
942                 i = np->nic_tx % TX_RING;
943
944                 Flags = le32_to_cpu(np->tx_ring[i].FlagLen);
945
946                 dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n",
947                                         dev->name, np->nic_tx, Flags);
948                 if (Flags & NV_TX_VALID)
949                         break;
950                 if (np->desc_ver == DESC_VER_1) {
951                         if (Flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION|
952                                                         NV_TX_UNDERFLOW|NV_TX_ERROR)) {
953                                 if (Flags & NV_TX_UNDERFLOW)
954                                         np->stats.tx_fifo_errors++;
955                                 if (Flags & NV_TX_CARRIERLOST)
956                                         np->stats.tx_carrier_errors++;
957                                 np->stats.tx_errors++;
958                         } else {
959                                 np->stats.tx_packets++;
960                                 np->stats.tx_bytes += np->tx_skbuff[i]->len;
961                         }
962                 } else {
963                         if (Flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION|
964                                                         NV_TX2_UNDERFLOW|NV_TX2_ERROR)) {
965                                 if (Flags & NV_TX2_UNDERFLOW)
966                                         np->stats.tx_fifo_errors++;
967                                 if (Flags & NV_TX2_CARRIERLOST)
968                                         np->stats.tx_carrier_errors++;
969                                 np->stats.tx_errors++;
970                         } else {
971                                 np->stats.tx_packets++;
972                                 np->stats.tx_bytes += np->tx_skbuff[i]->len;
973                         }
974                 }
975                 pci_unmap_single(np->pci_dev, np->tx_dma[i],
976                                         np->tx_skbuff[i]->len,
977                                         PCI_DMA_TODEVICE);
978                 dev_kfree_skb_irq(np->tx_skbuff[i]);
979                 np->tx_skbuff[i] = NULL;
980                 np->nic_tx++;
981         }
982         if (np->next_tx - np->nic_tx < TX_LIMIT_START)
983                 netif_wake_queue(dev);
984 }
985
986 /*
987  * nv_tx_timeout: dev->tx_timeout function
988  * Called with dev->xmit_lock held.
989  */
990 static void nv_tx_timeout(struct net_device *dev)
991 {
992         struct fe_priv *np = get_nvpriv(dev);
993         u8 __iomem *base = get_hwbase(dev);
994
995         dprintk(KERN_DEBUG "%s: Got tx_timeout. irq: %08x\n", dev->name,
996                         readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK);
997
998         spin_lock_irq(&np->lock);
999
1000         /* 1) stop tx engine */
1001         nv_stop_tx(dev);
1002
1003         /* 2) check that the packets were not sent already: */
1004         nv_tx_done(dev);
1005
1006         /* 3) if there are dead entries: clear everything */
1007         if (np->next_tx != np->nic_tx) {
1008                 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name);
1009                 nv_drain_tx(dev);
1010                 np->next_tx = np->nic_tx = 0;
1011                 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
1012                 netif_wake_queue(dev);
1013         }
1014
1015         /* 4) restart tx engine */
1016         nv_start_tx(dev);
1017         spin_unlock_irq(&np->lock);
1018 }
1019
1020 /*
1021  * Called when the nic notices a mismatch between the actual data len on the
1022  * wire and the len indicated in the 802 header
1023  */
1024 static int nv_getlen(struct net_device *dev, void *packet, int datalen)
1025 {
1026         int hdrlen;     /* length of the 802 header */
1027         int protolen;   /* length as stored in the proto field */
1028
1029         /* 1) calculate len according to header */
1030         if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == __constant_htons(ETH_P_8021Q)) {
1031                 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto );
1032                 hdrlen = VLAN_HLEN;
1033         } else {
1034                 protolen = ntohs( ((struct ethhdr *)packet)->h_proto);
1035                 hdrlen = ETH_HLEN;
1036         }
1037         dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
1038                                 dev->name, datalen, protolen, hdrlen);
1039         if (protolen > ETH_DATA_LEN)
1040                 return datalen; /* Value in proto field not a len, no checks possible */
1041
1042         protolen += hdrlen;
1043         /* consistency checks: */
1044         if (datalen > ETH_ZLEN) {
1045                 if (datalen >= protolen) {
1046                         /* more data on wire than in 802 header, trim of
1047                          * additional data.
1048                          */
1049                         dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
1050                                         dev->name, protolen);
1051                         return protolen;
1052                 } else {
1053                         /* less data on wire than mentioned in header.
1054                          * Discard the packet.
1055                          */
1056                         dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n",
1057                                         dev->name);
1058                         return -1;
1059                 }
1060         } else {
1061                 /* short packet. Accept only if 802 values are also short */
1062                 if (protolen > ETH_ZLEN) {
1063                         dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n",
1064                                         dev->name);
1065                         return -1;
1066                 }
1067                 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
1068                                 dev->name, datalen);
1069                 return datalen;
1070         }
1071 }
1072
1073 static void nv_rx_process(struct net_device *dev)
1074 {
1075         struct fe_priv *np = get_nvpriv(dev);
1076         u32 Flags;
1077
1078         for (;;) {
1079                 struct sk_buff *skb;
1080                 int len;
1081                 int i;
1082                 if (np->cur_rx - np->refill_rx >= RX_RING)
1083                         break;  /* we scanned the whole ring - do not continue */
1084
1085                 i = np->cur_rx % RX_RING;
1086                 Flags = le32_to_cpu(np->rx_ring[i].FlagLen);
1087                 len = nv_descr_getlength(&np->rx_ring[i], np->desc_ver);
1088
1089                 dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n",
1090                                         dev->name, np->cur_rx, Flags);
1091
1092                 if (Flags & NV_RX_AVAIL)
1093                         break;  /* still owned by hardware, */
1094
1095                 /*
1096                  * the packet is for us - immediately tear down the pci mapping.
1097                  * TODO: check if a prefetch of the first cacheline improves
1098                  * the performance.
1099                  */
1100                 pci_unmap_single(np->pci_dev, np->rx_dma[i],
1101                                 np->rx_skbuff[i]->len,
1102                                 PCI_DMA_FROMDEVICE);
1103
1104                 {
1105                         int j;
1106                         dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",Flags);
1107                         for (j=0; j<64; j++) {
1108                                 if ((j%16) == 0)
1109                                         dprintk("\n%03x:", j);
1110                                 dprintk(" %02x", ((unsigned char*)np->rx_skbuff[i]->data)[j]);
1111                         }
1112                         dprintk("\n");
1113                 }
1114                 /* look at what we actually got: */
1115                 if (np->desc_ver == DESC_VER_1) {
1116                         if (!(Flags & NV_RX_DESCRIPTORVALID))
1117                                 goto next_pkt;
1118
1119                         if (Flags & NV_RX_MISSEDFRAME) {
1120                                 np->stats.rx_missed_errors++;
1121                                 np->stats.rx_errors++;
1122                                 goto next_pkt;
1123                         }
1124                         if (Flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) {
1125                                 np->stats.rx_errors++;
1126                                 goto next_pkt;
1127                         }
1128                         if (Flags & NV_RX_CRCERR) {
1129                                 np->stats.rx_crc_errors++;
1130                                 np->stats.rx_errors++;
1131                                 goto next_pkt;
1132                         }
1133                         if (Flags & NV_RX_OVERFLOW) {
1134                                 np->stats.rx_over_errors++;
1135                                 np->stats.rx_errors++;
1136                                 goto next_pkt;
1137                         }
1138                         if (Flags & NV_RX_ERROR4) {
1139                                 len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
1140                                 if (len < 0) {
1141                                         np->stats.rx_errors++;
1142                                         goto next_pkt;
1143                                 }
1144                         }
1145                         /* framing errors are soft errors. */
1146                         if (Flags & NV_RX_FRAMINGERR) {
1147                                 if (Flags & NV_RX_SUBSTRACT1) {
1148                                         len--;
1149                                 }
1150                         }
1151                 } else {
1152                         if (!(Flags & NV_RX2_DESCRIPTORVALID))
1153                                 goto next_pkt;
1154
1155                         if (Flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) {
1156                                 np->stats.rx_errors++;
1157                                 goto next_pkt;
1158                         }
1159                         if (Flags & NV_RX2_CRCERR) {
1160                                 np->stats.rx_crc_errors++;
1161                                 np->stats.rx_errors++;
1162                                 goto next_pkt;
1163                         }
1164                         if (Flags & NV_RX2_OVERFLOW) {
1165                                 np->stats.rx_over_errors++;
1166                                 np->stats.rx_errors++;
1167                                 goto next_pkt;
1168                         }
1169                         if (Flags & NV_RX2_ERROR4) {
1170                                 len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
1171                                 if (len < 0) {
1172                                         np->stats.rx_errors++;
1173                                         goto next_pkt;
1174                                 }
1175                         }
1176                         /* framing errors are soft errors */
1177                         if (Flags & NV_RX2_FRAMINGERR) {
1178                                 if (Flags & NV_RX2_SUBSTRACT1) {
1179                                         len--;
1180                                 }
1181                         }
1182                         Flags &= NV_RX2_CHECKSUMMASK;
1183                         if (Flags == NV_RX2_CHECKSUMOK1 ||
1184                                         Flags == NV_RX2_CHECKSUMOK2 ||
1185                                         Flags == NV_RX2_CHECKSUMOK3) {
1186                                 dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name);
1187                                 np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY;
1188                         } else {
1189                                 dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name);
1190                         }
1191                 }
1192                 /* got a valid packet - forward it to the network core */
1193                 skb = np->rx_skbuff[i];
1194                 np->rx_skbuff[i] = NULL;
1195
1196                 skb_put(skb, len);
1197                 skb->protocol = eth_type_trans(skb, dev);
1198                 dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n",
1199                                         dev->name, np->cur_rx, len, skb->protocol);
1200                 netif_rx(skb);
1201                 dev->last_rx = jiffies;
1202                 np->stats.rx_packets++;
1203                 np->stats.rx_bytes += len;
1204 next_pkt:
1205                 np->cur_rx++;
1206         }
1207 }
1208
1209 /*
1210  * nv_change_mtu: dev->change_mtu function
1211  * Called with dev_base_lock held for read.
1212  */
1213 static int nv_change_mtu(struct net_device *dev, int new_mtu)
1214 {
1215         if (new_mtu > ETH_DATA_LEN)
1216                 return -EINVAL;
1217         dev->mtu = new_mtu;
1218         return 0;
1219 }
1220
1221 /*
1222  * nv_set_multicast: dev->set_multicast function
1223  * Called with dev->xmit_lock held.
1224  */
1225 static void nv_set_multicast(struct net_device *dev)
1226 {
1227         struct fe_priv *np = get_nvpriv(dev);
1228         u8 __iomem *base = get_hwbase(dev);
1229         u32 addr[2];
1230         u32 mask[2];
1231         u32 pff;
1232
1233         memset(addr, 0, sizeof(addr));
1234         memset(mask, 0, sizeof(mask));
1235
1236         if (dev->flags & IFF_PROMISC) {
1237                 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1238                 pff = NVREG_PFF_PROMISC;
1239         } else {
1240                 pff = NVREG_PFF_MYADDR;
1241
1242                 if (dev->flags & IFF_ALLMULTI || dev->mc_list) {
1243                         u32 alwaysOff[2];
1244                         u32 alwaysOn[2];
1245
1246                         alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff;
1247                         if (dev->flags & IFF_ALLMULTI) {
1248                                 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
1249                         } else {
1250                                 struct dev_mc_list *walk;
1251
1252                                 walk = dev->mc_list;
1253                                 while (walk != NULL) {
1254                                         u32 a, b;
1255                                         a = le32_to_cpu(*(u32 *) walk->dmi_addr);
1256                                         b = le16_to_cpu(*(u16 *) (&walk->dmi_addr[4]));
1257                                         alwaysOn[0] &= a;
1258                                         alwaysOff[0] &= ~a;
1259                                         alwaysOn[1] &= b;
1260                                         alwaysOff[1] &= ~b;
1261                                         walk = walk->next;
1262                                 }
1263                         }
1264                         addr[0] = alwaysOn[0];
1265                         addr[1] = alwaysOn[1];
1266                         mask[0] = alwaysOn[0] | alwaysOff[0];
1267                         mask[1] = alwaysOn[1] | alwaysOff[1];
1268                 }
1269         }
1270         addr[0] |= NVREG_MCASTADDRA_FORCE;
1271         pff |= NVREG_PFF_ALWAYS;
1272         spin_lock_irq(&np->lock);
1273         nv_stop_rx(dev);
1274         writel(addr[0], base + NvRegMulticastAddrA);
1275         writel(addr[1], base + NvRegMulticastAddrB);
1276         writel(mask[0], base + NvRegMulticastMaskA);
1277         writel(mask[1], base + NvRegMulticastMaskB);
1278         writel(pff, base + NvRegPacketFilterFlags);
1279         dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n",
1280                 dev->name);
1281         nv_start_rx(dev);
1282         spin_unlock_irq(&np->lock);
1283 }
1284
1285 static int nv_update_linkspeed(struct net_device *dev)
1286 {
1287         struct fe_priv *np = get_nvpriv(dev);
1288         u8 __iomem *base = get_hwbase(dev);
1289         int adv, lpa;
1290         int newls = np->linkspeed;
1291         int newdup = np->duplex;
1292         int mii_status;
1293         int retval = 0;
1294         u32 control_1000, status_1000, phyreg;
1295
1296         /* BMSR_LSTATUS is latched, read it twice:
1297          * we want the current value.
1298          */
1299         mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1300         mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1301
1302         if (!(mii_status & BMSR_LSTATUS)) {
1303                 dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n",
1304                                 dev->name);
1305                 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
1306                 newdup = 0;
1307                 retval = 0;
1308                 goto set_speed;
1309         }
1310
1311         if (np->autoneg == 0) {
1312                 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
1313                                 dev->name, np->fixed_mode);
1314                 if (np->fixed_mode & LPA_100FULL) {
1315                         newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
1316                         newdup = 1;
1317                 } else if (np->fixed_mode & LPA_100HALF) {
1318                         newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
1319                         newdup = 0;
1320                 } else if (np->fixed_mode & LPA_10FULL) {
1321                         newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
1322                         newdup = 1;
1323                 } else {
1324                         newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
1325                         newdup = 0;
1326                 }
1327                 retval = 1;
1328                 goto set_speed;
1329         }
1330         /* check auto negotiation is complete */
1331         if (!(mii_status & BMSR_ANEGCOMPLETE)) {
1332                 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */
1333                 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
1334                 newdup = 0;
1335                 retval = 0;
1336                 dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name);
1337                 goto set_speed;
1338         }
1339
1340         retval = 1;
1341         if (np->gigabit == PHY_GIGABIT) {
1342                 control_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
1343                 status_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_SR, MII_READ);
1344
1345                 if ((control_1000 & ADVERTISE_1000FULL) &&
1346                         (status_1000 & LPA_1000FULL)) {
1347                         dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n",
1348                                 dev->name);
1349                         newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
1350                         newdup = 1;
1351                         goto set_speed;
1352                 }
1353         }
1354
1355         adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1356         lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
1357         dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
1358                                 dev->name, adv, lpa);
1359
1360         /* FIXME: handle parallel detection properly */
1361         lpa = lpa & adv;
1362         if (lpa & LPA_100FULL) {
1363                 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
1364                 newdup = 1;
1365         } else if (lpa & LPA_100HALF) {
1366                 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
1367                 newdup = 0;
1368         } else if (lpa & LPA_10FULL) {
1369                 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
1370                 newdup = 1;
1371         } else if (lpa & LPA_10HALF) {
1372                 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
1373                 newdup = 0;
1374         } else {
1375                 dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, lpa);
1376                 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
1377                 newdup = 0;
1378         }
1379
1380 set_speed:
1381         if (np->duplex == newdup && np->linkspeed == newls)
1382                 return retval;
1383
1384         dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n",
1385                         dev->name, np->linkspeed, np->duplex, newls, newdup);
1386
1387         np->duplex = newdup;
1388         np->linkspeed = newls;
1389
1390         if (np->gigabit == PHY_GIGABIT) {
1391                 phyreg = readl(base + NvRegRandomSeed);
1392                 phyreg &= ~(0x3FF00);
1393                 if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10)
1394                         phyreg |= NVREG_RNDSEED_FORCE3;
1395                 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)
1396                         phyreg |= NVREG_RNDSEED_FORCE2;
1397                 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
1398                         phyreg |= NVREG_RNDSEED_FORCE;
1399                 writel(phyreg, base + NvRegRandomSeed);
1400         }
1401
1402         phyreg = readl(base + NvRegPhyInterface);
1403         phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
1404         if (np->duplex == 0)
1405                 phyreg |= PHY_HALF;
1406         if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
1407                 phyreg |= PHY_100;
1408         else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
1409                 phyreg |= PHY_1000;
1410         writel(phyreg, base + NvRegPhyInterface);
1411
1412         writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD),
1413                 base + NvRegMisc1);
1414         pci_push(base);
1415         writel(np->linkspeed, base + NvRegLinkSpeed);
1416         pci_push(base);
1417
1418         return retval;
1419 }
1420
1421 static void nv_linkchange(struct net_device *dev)
1422 {
1423         if (nv_update_linkspeed(dev)) {
1424                 if (netif_carrier_ok(dev)) {
1425                         nv_stop_rx(dev);
1426                 } else {
1427                         netif_carrier_on(dev);
1428                         printk(KERN_INFO "%s: link up.\n", dev->name);
1429                 }
1430                 nv_start_rx(dev);
1431         } else {
1432                 if (netif_carrier_ok(dev)) {
1433                         netif_carrier_off(dev);
1434                         printk(KERN_INFO "%s: link down.\n", dev->name);
1435                         nv_stop_rx(dev);
1436                 }
1437         }
1438 }
1439
1440 static void nv_link_irq(struct net_device *dev)
1441 {
1442         u8 __iomem *base = get_hwbase(dev);
1443         u32 miistat;
1444
1445         miistat = readl(base + NvRegMIIStatus);
1446         writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
1447         dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat);
1448
1449         if (miistat & (NVREG_MIISTAT_LINKCHANGE))
1450                 nv_linkchange(dev);
1451         dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name);
1452 }
1453
1454 static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
1455 {
1456         struct net_device *dev = (struct net_device *) data;
1457         struct fe_priv *np = get_nvpriv(dev);
1458         u8 __iomem *base = get_hwbase(dev);
1459         u32 events;
1460         int i;
1461
1462         dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
1463
1464         for (i=0; ; i++) {
1465                 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
1466                 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
1467                 pci_push(base);
1468                 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
1469                 if (!(events & np->irqmask))
1470                         break;
1471
1472                 if (events & (NVREG_IRQ_TX1|NVREG_IRQ_TX2|NVREG_IRQ_TX_ERR)) {
1473                         spin_lock(&np->lock);
1474                         nv_tx_done(dev);
1475                         spin_unlock(&np->lock);
1476                 }
1477
1478                 if (events & (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF)) {
1479                         nv_rx_process(dev);
1480                         if (nv_alloc_rx(dev)) {
1481                                 spin_lock(&np->lock);
1482                                 if (!np->in_shutdown)
1483                                         mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
1484                                 spin_unlock(&np->lock);
1485                         }
1486                 }
1487
1488                 if (events & NVREG_IRQ_LINK) {
1489                         spin_lock(&np->lock);
1490                         nv_link_irq(dev);
1491                         spin_unlock(&np->lock);
1492                 }
1493                 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
1494                         spin_lock(&np->lock);
1495                         nv_linkchange(dev);
1496                         spin_unlock(&np->lock);
1497                         np->link_timeout = jiffies + LINK_TIMEOUT;
1498                 }
1499                 if (events & (NVREG_IRQ_TX_ERR)) {
1500                         dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
1501                                                 dev->name, events);
1502                 }
1503                 if (events & (NVREG_IRQ_UNKNOWN)) {
1504                         printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
1505                                                 dev->name, events);
1506                 }
1507                 if (i > max_interrupt_work) {
1508                         spin_lock(&np->lock);
1509                         /* disable interrupts on the nic */
1510                         writel(0, base + NvRegIrqMask);
1511                         pci_push(base);
1512
1513                         if (!np->in_shutdown)
1514                                 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
1515                         printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
1516                         spin_unlock(&np->lock);
1517                         break;
1518                 }
1519
1520         }
1521         dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
1522
1523         return IRQ_RETVAL(i);
1524 }
1525
1526 static void nv_do_nic_poll(unsigned long data)
1527 {
1528         struct net_device *dev = (struct net_device *) data;
1529         struct fe_priv *np = get_nvpriv(dev);
1530         u8 __iomem *base = get_hwbase(dev);
1531
1532         disable_irq(dev->irq);
1533         /* FIXME: Do we need synchronize_irq(dev->irq) here? */
1534         /*
1535          * reenable interrupts on the nic, we have to do this before calling
1536          * nv_nic_irq because that may decide to do otherwise
1537          */
1538         writel(np->irqmask, base + NvRegIrqMask);
1539         pci_push(base);
1540         nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL);
1541         enable_irq(dev->irq);
1542 }
1543
1544 #ifdef CONFIG_NET_POLL_CONTROLLER
1545 static void nv_poll_controller(struct net_device *dev)
1546 {
1547         nv_do_nic_poll((unsigned long) dev);
1548 }
1549 #endif
1550
1551 static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1552 {
1553         struct fe_priv *np = get_nvpriv(dev);
1554         strcpy(info->driver, "forcedeth");
1555         strcpy(info->version, FORCEDETH_VERSION);
1556         strcpy(info->bus_info, pci_name(np->pci_dev));
1557 }
1558
1559 static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
1560 {
1561         struct fe_priv *np = get_nvpriv(dev);
1562         wolinfo->supported = WAKE_MAGIC;
1563
1564         spin_lock_irq(&np->lock);
1565         if (np->wolenabled)
1566                 wolinfo->wolopts = WAKE_MAGIC;
1567         spin_unlock_irq(&np->lock);
1568 }
1569
1570 static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
1571 {
1572         struct fe_priv *np = get_nvpriv(dev);
1573         u8 __iomem *base = get_hwbase(dev);
1574
1575         spin_lock_irq(&np->lock);
1576         if (wolinfo->wolopts == 0) {
1577                 writel(0, base + NvRegWakeUpFlags);
1578                 np->wolenabled = 0;
1579         }
1580         if (wolinfo->wolopts & WAKE_MAGIC) {
1581                 writel(NVREG_WAKEUPFLAGS_ENABLE, base + NvRegWakeUpFlags);
1582                 np->wolenabled = 1;
1583         }
1584         spin_unlock_irq(&np->lock);
1585         return 0;
1586 }
1587
1588 static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1589 {
1590         struct fe_priv *np = netdev_priv(dev);
1591         int adv;
1592
1593         spin_lock_irq(&np->lock);
1594         ecmd->port = PORT_MII;
1595         if (!netif_running(dev)) {
1596                 /* We do not track link speed / duplex setting if the
1597                  * interface is disabled. Force a link check */
1598                 nv_update_linkspeed(dev);
1599         }
1600         switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
1601                 case NVREG_LINKSPEED_10:
1602                         ecmd->speed = SPEED_10;
1603                         break;
1604                 case NVREG_LINKSPEED_100:
1605                         ecmd->speed = SPEED_100;
1606                         break;
1607                 case NVREG_LINKSPEED_1000:
1608                         ecmd->speed = SPEED_1000;
1609                         break;
1610         }
1611         ecmd->duplex = DUPLEX_HALF;
1612         if (np->duplex)
1613                 ecmd->duplex = DUPLEX_FULL;
1614
1615         ecmd->autoneg = np->autoneg;
1616
1617         ecmd->advertising = ADVERTISED_MII;
1618         if (np->autoneg) {
1619                 ecmd->advertising |= ADVERTISED_Autoneg;
1620                 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1621         } else {
1622                 adv = np->fixed_mode;
1623         }
1624         if (adv & ADVERTISE_10HALF)
1625                 ecmd->advertising |= ADVERTISED_10baseT_Half;
1626         if (adv & ADVERTISE_10FULL)
1627                 ecmd->advertising |= ADVERTISED_10baseT_Full;
1628         if (adv & ADVERTISE_100HALF)
1629                 ecmd->advertising |= ADVERTISED_100baseT_Half;
1630         if (adv & ADVERTISE_100FULL)
1631                 ecmd->advertising |= ADVERTISED_100baseT_Full;
1632         if (np->autoneg && np->gigabit == PHY_GIGABIT) {
1633                 adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
1634                 if (adv & ADVERTISE_1000FULL)
1635                         ecmd->advertising |= ADVERTISED_1000baseT_Full;
1636         }
1637
1638         ecmd->supported = (SUPPORTED_Autoneg |
1639                 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
1640                 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1641                 SUPPORTED_MII);
1642         if (np->gigabit == PHY_GIGABIT)
1643                 ecmd->supported |= SUPPORTED_1000baseT_Full;
1644
1645         ecmd->phy_address = np->phyaddr;
1646         ecmd->transceiver = XCVR_EXTERNAL;
1647
1648         /* ignore maxtxpkt, maxrxpkt for now */
1649         spin_unlock_irq(&np->lock);
1650         return 0;
1651 }
1652
1653 static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1654 {
1655         struct fe_priv *np = netdev_priv(dev);
1656
1657         if (ecmd->port != PORT_MII)
1658                 return -EINVAL;
1659         if (ecmd->transceiver != XCVR_EXTERNAL)
1660                 return -EINVAL;
1661         if (ecmd->phy_address != np->phyaddr) {
1662                 /* TODO: support switching between multiple phys. Should be
1663                  * trivial, but not enabled due to lack of test hardware. */
1664                 return -EINVAL;
1665         }
1666         if (ecmd->autoneg == AUTONEG_ENABLE) {
1667                 u32 mask;
1668
1669                 mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1670                           ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
1671                 if (np->gigabit == PHY_GIGABIT)
1672                         mask |= ADVERTISED_1000baseT_Full;
1673
1674                 if ((ecmd->advertising & mask) == 0)
1675                         return -EINVAL;
1676
1677         } else if (ecmd->autoneg == AUTONEG_DISABLE) {
1678                 /* Note: autonegotiation disable, speed 1000 intentionally
1679                  * forbidden - noone should need that. */
1680
1681                 if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
1682                         return -EINVAL;
1683                 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
1684                         return -EINVAL;
1685         } else {
1686                 return -EINVAL;
1687         }
1688
1689         spin_lock_irq(&np->lock);
1690         if (ecmd->autoneg == AUTONEG_ENABLE) {
1691                 int adv, bmcr;
1692
1693                 np->autoneg = 1;
1694
1695                 /* advertise only what has been requested */
1696                 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1697                 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
1698                 if (ecmd->advertising & ADVERTISED_10baseT_Half)
1699                         adv |= ADVERTISE_10HALF;
1700                 if (ecmd->advertising & ADVERTISED_10baseT_Full)
1701                         adv |= ADVERTISE_10FULL;
1702                 if (ecmd->advertising & ADVERTISED_100baseT_Half)
1703                         adv |= ADVERTISE_100HALF;
1704                 if (ecmd->advertising & ADVERTISED_100baseT_Full)
1705                         adv |= ADVERTISE_100FULL;
1706                 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
1707
1708                 if (np->gigabit == PHY_GIGABIT) {
1709                         adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
1710                         adv &= ~ADVERTISE_1000FULL;
1711                         if (ecmd->advertising & ADVERTISED_1000baseT_Full)
1712                                 adv |= ADVERTISE_1000FULL;
1713                         mii_rw(dev, np->phyaddr, MII_1000BT_CR, adv);
1714                 }
1715
1716                 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1717                 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
1718                 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
1719
1720         } else {
1721                 int adv, bmcr;
1722
1723                 np->autoneg = 0;
1724
1725                 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1726                 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
1727                 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
1728                         adv |= ADVERTISE_10HALF;
1729                 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
1730                         adv |= ADVERTISE_10FULL;
1731                 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
1732                         adv |= ADVERTISE_100HALF;
1733                 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
1734                         adv |= ADVERTISE_100FULL;
1735                 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
1736                 np->fixed_mode = adv;
1737
1738                 if (np->gigabit == PHY_GIGABIT) {
1739                         adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
1740                         adv &= ~ADVERTISE_1000FULL;
1741                         mii_rw(dev, np->phyaddr, MII_1000BT_CR, adv);
1742                 }
1743
1744                 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1745                 bmcr |= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_FULLDPLX);
1746                 if (adv & (ADVERTISE_10FULL|ADVERTISE_100FULL))
1747                         bmcr |= BMCR_FULLDPLX;
1748                 if (adv & (ADVERTISE_100HALF|ADVERTISE_100FULL))
1749                         bmcr |= BMCR_SPEED100;
1750                 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
1751
1752                 if (netif_running(dev)) {
1753                         /* Wait a bit and then reconfigure the nic. */
1754                         udelay(10);
1755                         nv_linkchange(dev);
1756                 }
1757         }
1758         spin_unlock_irq(&np->lock);
1759
1760         return 0;
1761 }
1762
1763 static struct ethtool_ops ops = {
1764         .get_drvinfo = nv_get_drvinfo,
1765         .get_link = ethtool_op_get_link,
1766         .get_wol = nv_get_wol,
1767         .set_wol = nv_set_wol,
1768         .get_settings = nv_get_settings,
1769         .set_settings = nv_set_settings,
1770 };
1771
1772 static int nv_open(struct net_device *dev)
1773 {
1774         struct fe_priv *np = get_nvpriv(dev);
1775         u8 __iomem *base = get_hwbase(dev);
1776         int ret, oom, i;
1777
1778         dprintk(KERN_DEBUG "nv_open: begin\n");
1779
1780         /* 1) erase previous misconfiguration */
1781         /* 4.1-1: stop adapter: ignored, 4.3 seems to be overkill */
1782         writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
1783         writel(0, base + NvRegMulticastAddrB);
1784         writel(0, base + NvRegMulticastMaskA);
1785         writel(0, base + NvRegMulticastMaskB);
1786         writel(0, base + NvRegPacketFilterFlags);
1787
1788         writel(0, base + NvRegTransmitterControl);
1789         writel(0, base + NvRegReceiverControl);
1790
1791         writel(0, base + NvRegAdapterControl);
1792
1793         /* 2) initialize descriptor rings */
1794         oom = nv_init_ring(dev);
1795
1796         writel(0, base + NvRegLinkSpeed);
1797         writel(0, base + NvRegUnknownTransmitterReg);
1798         nv_txrx_reset(dev);
1799         writel(0, base + NvRegUnknownSetupReg6);
1800
1801         np->in_shutdown = 0;
1802
1803         /* 3) set mac address */
1804         {
1805                 u32 mac[2];
1806
1807                 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
1808                                 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
1809                 mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
1810
1811                 writel(mac[0], base + NvRegMacAddrA);
1812                 writel(mac[1], base + NvRegMacAddrB);
1813         }
1814
1815         /* 4) give hw rings */
1816         writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr);
1817         writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
1818         writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT),
1819                 base + NvRegRingSizes);
1820
1821         /* 5) continue setup */
1822         writel(np->linkspeed, base + NvRegLinkSpeed);
1823         writel(NVREG_UNKSETUP3_VAL1, base + NvRegUnknownSetupReg3);
1824         writel(np->desc_ver, base + NvRegTxRxControl);
1825         pci_push(base);
1826         writel(NVREG_TXRXCTL_BIT1|np->desc_ver, base + NvRegTxRxControl);
1827         reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
1828                         NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX,
1829                         KERN_INFO "open: SetupReg5, Bit 31 remained off\n");
1830
1831         writel(0, base + NvRegUnknownSetupReg4);
1832         writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
1833         writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
1834
1835         /* 6) continue setup */
1836         writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
1837         writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
1838         writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
1839         writel(NVREG_OFFLOAD_NORMAL, base + NvRegOffloadConfig);
1840
1841         writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
1842         get_random_bytes(&i, sizeof(i));
1843         writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed);
1844         writel(NVREG_UNKSETUP1_VAL, base + NvRegUnknownSetupReg1);
1845         writel(NVREG_UNKSETUP2_VAL, base + NvRegUnknownSetupReg2);
1846         writel(NVREG_POLL_DEFAULT, base + NvRegPollingInterval);
1847         writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
1848         writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
1849                         base + NvRegAdapterControl);
1850         writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
1851         writel(NVREG_UNKSETUP4_VAL, base + NvRegUnknownSetupReg4);
1852         writel(NVREG_WAKEUPFLAGS_VAL, base + NvRegWakeUpFlags);
1853
1854         i = readl(base + NvRegPowerState);
1855         if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0)
1856                 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
1857
1858         pci_push(base);
1859         udelay(10);
1860         writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
1861
1862         writel(0, base + NvRegIrqMask);
1863         pci_push(base);
1864         writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
1865         writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
1866         pci_push(base);
1867
1868         ret = request_irq(dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev);
1869         if (ret)
1870                 goto out_drain;
1871
1872         /* ask for interrupts */
1873         writel(np->irqmask, base + NvRegIrqMask);
1874
1875         spin_lock_irq(&np->lock);
1876         writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
1877         writel(0, base + NvRegMulticastAddrB);
1878         writel(0, base + NvRegMulticastMaskA);
1879         writel(0, base + NvRegMulticastMaskB);
1880         writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
1881         /* One manual link speed update: Interrupts are enabled, future link
1882          * speed changes cause interrupts and are handled by nv_link_irq().
1883          */
1884         {
1885                 u32 miistat;
1886                 miistat = readl(base + NvRegMIIStatus);
1887                 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
1888                 dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat);
1889         }
1890         ret = nv_update_linkspeed(dev);
1891         nv_start_rx(dev);
1892         nv_start_tx(dev);
1893         netif_start_queue(dev);
1894         if (ret) {
1895                 netif_carrier_on(dev);
1896         } else {
1897                 printk("%s: no link during initialization.\n", dev->name);
1898                 netif_carrier_off(dev);
1899         }
1900         if (oom)
1901                 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
1902         spin_unlock_irq(&np->lock);
1903
1904         return 0;
1905 out_drain:
1906         drain_ring(dev);
1907         return ret;
1908 }
1909
1910 static int nv_close(struct net_device *dev)
1911 {
1912         struct fe_priv *np = get_nvpriv(dev);
1913         u8 __iomem *base;
1914
1915         spin_lock_irq(&np->lock);
1916         np->in_shutdown = 1;
1917         spin_unlock_irq(&np->lock);
1918         synchronize_irq(dev->irq);
1919
1920         del_timer_sync(&np->oom_kick);
1921         del_timer_sync(&np->nic_poll);
1922
1923         netif_stop_queue(dev);
1924         spin_lock_irq(&np->lock);
1925         nv_stop_tx(dev);
1926         nv_stop_rx(dev);
1927         nv_txrx_reset(dev);
1928
1929         /* disable interrupts on the nic or we will lock up */
1930         base = get_hwbase(dev);
1931         writel(0, base + NvRegIrqMask);
1932         pci_push(base);
1933         dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
1934
1935         spin_unlock_irq(&np->lock);
1936
1937         free_irq(dev->irq, dev);
1938
1939         drain_ring(dev);
1940
1941         if (np->wolenabled)
1942                 nv_start_rx(dev);
1943
1944         /* FIXME: power down nic */
1945
1946         return 0;
1947 }
1948
1949 static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
1950 {
1951         struct net_device *dev;
1952         struct fe_priv *np;
1953         unsigned long addr;
1954         u8 __iomem *base;
1955         int err, i;
1956
1957         dev = alloc_etherdev(sizeof(struct fe_priv));
1958         err = -ENOMEM;
1959         if (!dev)
1960                 goto out;
1961
1962         np = get_nvpriv(dev);
1963         np->pci_dev = pci_dev;
1964         spin_lock_init(&np->lock);
1965         SET_MODULE_OWNER(dev);
1966         SET_NETDEV_DEV(dev, &pci_dev->dev);
1967
1968         init_timer(&np->oom_kick);
1969         np->oom_kick.data = (unsigned long) dev;
1970         np->oom_kick.function = &nv_do_rx_refill;       /* timer handler */
1971         init_timer(&np->nic_poll);
1972         np->nic_poll.data = (unsigned long) dev;
1973         np->nic_poll.function = &nv_do_nic_poll;        /* timer handler */
1974
1975         err = pci_enable_device(pci_dev);
1976         if (err) {
1977                 printk(KERN_INFO "forcedeth: pci_enable_dev failed (%d) for device %s\n",
1978                                 err, pci_name(pci_dev));
1979                 goto out_free;
1980         }
1981
1982         pci_set_master(pci_dev);
1983
1984         err = pci_request_regions(pci_dev, DRV_NAME);
1985         if (err < 0)
1986                 goto out_disable;
1987
1988         err = -EINVAL;
1989         addr = 0;
1990         for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1991                 dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n",
1992                                 pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i),
1993                                 pci_resource_len(pci_dev, i),
1994                                 pci_resource_flags(pci_dev, i));
1995                 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
1996                                 pci_resource_len(pci_dev, i) >= NV_PCI_REGSZ) {
1997                         addr = pci_resource_start(pci_dev, i);
1998                         break;
1999                 }
2000         }
2001         if (i == DEVICE_COUNT_RESOURCE) {
2002                 printk(KERN_INFO "forcedeth: Couldn't find register window for device %s.\n",
2003                                         pci_name(pci_dev));
2004                 goto out_relreg;
2005         }
2006
2007         /* handle different descriptor versions */
2008         if (pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_1 ||
2009                 pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_2 ||
2010                 pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_3 ||    
2011                 pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 ||
2012                 pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_13)
2013                 np->desc_ver = DESC_VER_1;
2014         else
2015                 np->desc_ver = DESC_VER_2;
2016
2017         err = -ENOMEM;
2018         np->base = ioremap(addr, NV_PCI_REGSZ);
2019         if (!np->base)
2020                 goto out_relreg;
2021         dev->base_addr = (unsigned long)np->base;
2022         dev->irq = pci_dev->irq;
2023         np->rx_ring = pci_alloc_consistent(pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING),
2024                                                 &np->ring_addr);
2025         if (!np->rx_ring)
2026                 goto out_unmap;
2027         np->tx_ring = &np->rx_ring[RX_RING];
2028
2029         dev->open = nv_open;
2030         dev->stop = nv_close;
2031         dev->hard_start_xmit = nv_start_xmit;
2032         dev->get_stats = nv_get_stats;
2033         dev->change_mtu = nv_change_mtu;
2034         dev->set_multicast_list = nv_set_multicast;
2035 #ifdef CONFIG_NET_POLL_CONTROLLER
2036         dev->poll_controller = nv_poll_controller;
2037 #endif
2038         SET_ETHTOOL_OPS(dev, &ops);
2039         dev->tx_timeout = nv_tx_timeout;
2040         dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
2041
2042         pci_set_drvdata(pci_dev, dev);
2043
2044         /* read the mac address */
2045         base = get_hwbase(dev);
2046         np->orig_mac[0] = readl(base + NvRegMacAddrA);
2047         np->orig_mac[1] = readl(base + NvRegMacAddrB);
2048
2049         dev->dev_addr[0] = (np->orig_mac[1] >>  8) & 0xff;
2050         dev->dev_addr[1] = (np->orig_mac[1] >>  0) & 0xff;
2051         dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
2052         dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
2053         dev->dev_addr[4] = (np->orig_mac[0] >>  8) & 0xff;
2054         dev->dev_addr[5] = (np->orig_mac[0] >>  0) & 0xff;
2055
2056         if (!is_valid_ether_addr(dev->dev_addr)) {
2057                 /*
2058                  * Bad mac address. At least one bios sets the mac address
2059                  * to 01:23:45:67:89:ab
2060                  */
2061                 printk(KERN_ERR "%s: Invalid Mac address detected: %02x:%02x:%02x:%02x:%02x:%02x\n",
2062                         pci_name(pci_dev),
2063                         dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
2064                         dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
2065                 printk(KERN_ERR "Please complain to your hardware vendor. Switching to a random MAC.\n");
2066                 dev->dev_addr[0] = 0x00;
2067                 dev->dev_addr[1] = 0x00;
2068                 dev->dev_addr[2] = 0x6c;
2069                 get_random_bytes(&dev->dev_addr[3], 3);
2070         }
2071
2072         dprintk(KERN_DEBUG "%s: MAC Address %02x:%02x:%02x:%02x:%02x:%02x\n", pci_name(pci_dev),
2073                         dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
2074                         dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
2075
2076         /* disable WOL */
2077         writel(0, base + NvRegWakeUpFlags);
2078         np->wolenabled = 0;
2079
2080         if (np->desc_ver == DESC_VER_1) {
2081                 np->tx_flags = NV_TX_LASTPACKET|NV_TX_VALID;
2082                 if (id->driver_data & DEV_NEED_LASTPACKET1)
2083                         np->tx_flags |= NV_TX_LASTPACKET1;
2084         } else {
2085                 np->tx_flags = NV_TX2_LASTPACKET|NV_TX2_VALID;
2086                 if (id->driver_data & DEV_NEED_LASTPACKET1)
2087                         np->tx_flags |= NV_TX2_LASTPACKET1;
2088         }
2089         if (id->driver_data & DEV_IRQMASK_1)
2090                 np->irqmask = NVREG_IRQMASK_WANTED_1;
2091         if (id->driver_data & DEV_IRQMASK_2)
2092                 np->irqmask = NVREG_IRQMASK_WANTED_2;
2093         if (id->driver_data & DEV_NEED_TIMERIRQ)
2094                 np->irqmask |= NVREG_IRQ_TIMER;
2095         if (id->driver_data & DEV_NEED_LINKTIMER) {
2096                 dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev));
2097                 np->need_linktimer = 1;
2098                 np->link_timeout = jiffies + LINK_TIMEOUT;
2099         } else {
2100                 dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev));
2101                 np->need_linktimer = 0;
2102         }
2103
2104         /* find a suitable phy */
2105         for (i = 1; i < 32; i++) {
2106                 int id1, id2;
2107
2108                 spin_lock_irq(&np->lock);
2109                 id1 = mii_rw(dev, i, MII_PHYSID1, MII_READ);
2110                 spin_unlock_irq(&np->lock);
2111                 if (id1 < 0 || id1 == 0xffff)
2112                         continue;
2113                 spin_lock_irq(&np->lock);
2114                 id2 = mii_rw(dev, i, MII_PHYSID2, MII_READ);
2115                 spin_unlock_irq(&np->lock);
2116                 if (id2 < 0 || id2 == 0xffff)
2117                         continue;
2118
2119                 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
2120                 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
2121                 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
2122                                 pci_name(pci_dev), id1, id2, i);
2123                 np->phyaddr = i;
2124                 np->phy_oui = id1 | id2;
2125                 break;
2126         }
2127         if (i == 32) {
2128                 /* PHY in isolate mode? No phy attached and user wants to
2129                  * test loopback? Very odd, but can be correct.
2130                  */
2131                 printk(KERN_INFO "%s: open: Could not find a valid PHY.\n",
2132                                 pci_name(pci_dev));
2133         }
2134
2135         if (i != 32) {
2136                 /* reset it */
2137                 phy_init(dev);
2138         }
2139
2140         /* set default link speed settings */
2141         np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2142         np->duplex = 0;
2143         np->autoneg = 1;
2144
2145         err = register_netdev(dev);
2146         if (err) {
2147                 printk(KERN_INFO "forcedeth: unable to register netdev: %d\n", err);
2148                 goto out_freering;
2149         }
2150         printk(KERN_INFO "%s: forcedeth.c: subsystem: %05x:%04x bound to %s\n",
2151                         dev->name, pci_dev->subsystem_vendor, pci_dev->subsystem_device,
2152                         pci_name(pci_dev));
2153
2154         return 0;
2155
2156 out_freering:
2157         pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING),
2158                                 np->rx_ring, np->ring_addr);
2159         pci_set_drvdata(pci_dev, NULL);
2160 out_unmap:
2161         iounmap(get_hwbase(dev));
2162 out_relreg:
2163         pci_release_regions(pci_dev);
2164 out_disable:
2165         pci_disable_device(pci_dev);
2166 out_free:
2167         free_netdev(dev);
2168 out:
2169         return err;
2170 }
2171
2172 static void __devexit nv_remove(struct pci_dev *pci_dev)
2173 {
2174         struct net_device *dev = pci_get_drvdata(pci_dev);
2175         struct fe_priv *np = get_nvpriv(dev);
2176         u8 __iomem *base = get_hwbase(dev);
2177
2178         unregister_netdev(dev);
2179
2180         /* special op: write back the misordered MAC address - otherwise
2181          * the next nv_probe would see a wrong address.
2182          */
2183         writel(np->orig_mac[0], base + NvRegMacAddrA);
2184         writel(np->orig_mac[1], base + NvRegMacAddrB);
2185
2186         /* free all structures */
2187         pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), np->rx_ring, np->ring_addr);
2188         iounmap(get_hwbase(dev));
2189         pci_release_regions(pci_dev);
2190         pci_disable_device(pci_dev);
2191         free_netdev(dev);
2192         pci_set_drvdata(pci_dev, NULL);
2193 }
2194
2195 static struct pci_device_id pci_tbl[] = {
2196         {       /* nForce Ethernet Controller */
2197                 .vendor = PCI_VENDOR_ID_NVIDIA,
2198                 .device = PCI_DEVICE_ID_NVIDIA_NVENET_1,
2199                 .subvendor = PCI_ANY_ID,
2200                 .subdevice = PCI_ANY_ID,
2201                 .driver_data = DEV_IRQMASK_1|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2202         },
2203         {       /* nForce2 Ethernet Controller */
2204                 .vendor = PCI_VENDOR_ID_NVIDIA,
2205                 .device = PCI_DEVICE_ID_NVIDIA_NVENET_2,
2206                 .subvendor = PCI_ANY_ID,
2207                 .subdevice = PCI_ANY_ID,
2208                 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2209         },
2210         {       /* nForce3 Ethernet Controller */
2211                 .vendor = PCI_VENDOR_ID_NVIDIA,
2212                 .device = PCI_DEVICE_ID_NVIDIA_NVENET_3,
2213                 .subvendor = PCI_ANY_ID,
2214                 .subdevice = PCI_ANY_ID,
2215                 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2216         },
2217         {       /* nForce3 Ethernet Controller */
2218                 .vendor = PCI_VENDOR_ID_NVIDIA,
2219                 .device = PCI_DEVICE_ID_NVIDIA_NVENET_4,
2220                 .subvendor = PCI_ANY_ID,
2221                 .subdevice = PCI_ANY_ID,
2222                 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2223         },
2224         {       /* nForce3 Ethernet Controller */
2225                 .vendor = PCI_VENDOR_ID_NVIDIA,
2226                 .device = PCI_DEVICE_ID_NVIDIA_NVENET_5,
2227                 .subvendor = PCI_ANY_ID,
2228                 .subdevice = PCI_ANY_ID,
2229                 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2230         },
2231         {       /* nForce3 Ethernet Controller */
2232                 .vendor = PCI_VENDOR_ID_NVIDIA,
2233                 .device = PCI_DEVICE_ID_NVIDIA_NVENET_6,
2234                 .subvendor = PCI_ANY_ID,
2235                 .subdevice = PCI_ANY_ID,
2236                 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2237         },
2238         {       /* nForce3 Ethernet Controller */
2239                 .vendor = PCI_VENDOR_ID_NVIDIA,
2240                 .device = PCI_DEVICE_ID_NVIDIA_NVENET_7,
2241                 .subvendor = PCI_ANY_ID,
2242                 .subdevice = PCI_ANY_ID,
2243                 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2244         },
2245         {       /* CK804 Ethernet Controller */
2246                 .vendor = PCI_VENDOR_ID_NVIDIA,
2247                 .device = PCI_DEVICE_ID_NVIDIA_NVENET_8,
2248                 .subvendor = PCI_ANY_ID,
2249                 .subdevice = PCI_ANY_ID,
2250                 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2251         },
2252         {       /* CK804 Ethernet Controller */
2253                 .vendor = PCI_VENDOR_ID_NVIDIA,
2254                 .device = PCI_DEVICE_ID_NVIDIA_NVENET_9,
2255                 .subvendor = PCI_ANY_ID,
2256                 .subdevice = PCI_ANY_ID,
2257                 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2258         },
2259         {       /* MCP04 Ethernet Controller */
2260                 .vendor = PCI_VENDOR_ID_NVIDIA,
2261                 .device = PCI_DEVICE_ID_NVIDIA_NVENET_10,
2262                 .subvendor = PCI_ANY_ID,
2263                 .subdevice = PCI_ANY_ID,
2264                 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2265         },
2266         {       /* MCP04 Ethernet Controller */
2267                 .vendor = PCI_VENDOR_ID_NVIDIA,
2268                 .device = PCI_DEVICE_ID_NVIDIA_NVENET_11,
2269                 .subvendor = PCI_ANY_ID,
2270                 .subdevice = PCI_ANY_ID,
2271                 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2272         },
2273         {       /* MCP51 Ethernet Controller */
2274                 .vendor = PCI_VENDOR_ID_NVIDIA,
2275                 .device = PCI_DEVICE_ID_NVIDIA_NVENET_12,
2276                 .subvendor = PCI_ANY_ID,
2277                 .subdevice = PCI_ANY_ID,
2278                 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2279         },
2280         {       /* MCP51 Ethernet Controller */
2281                 .vendor = PCI_VENDOR_ID_NVIDIA,
2282                 .device = PCI_DEVICE_ID_NVIDIA_NVENET_13,
2283                 .subvendor = PCI_ANY_ID,
2284                 .subdevice = PCI_ANY_ID,
2285                 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2286         },
2287         {0,},
2288 };
2289
2290 static struct pci_driver driver = {
2291         .name = "forcedeth",
2292         .id_table = pci_tbl,
2293         .probe = nv_probe,
2294         .remove = __devexit_p(nv_remove),
2295 };
2296
2297
2298 static int __init init_nic(void)
2299 {
2300         printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION);
2301         return pci_module_init(&driver);
2302 }
2303
2304 static void __exit exit_nic(void)
2305 {
2306         pci_unregister_driver(&driver);
2307 }
2308
2309 module_param(max_interrupt_work, int, 0);
2310 MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
2311
2312 MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
2313 MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
2314 MODULE_LICENSE("GPL");
2315
2316 MODULE_DEVICE_TABLE(pci, pci_tbl);
2317
2318 module_init(init_nic);
2319 module_exit(exit_nic);