2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
4 * Note: This driver is a cleanroom reimplementation based on reverse
5 * engineered documentation written by Carl-Daniel Hailfinger
6 * and Andrew de Quincey. It's neither supported nor endorsed
7 * by NVIDIA Corp. Use at your own risk.
9 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
10 * trademarks of NVIDIA Corporation in the United States and other
13 * Copyright (C) 2003,4 Manfred Spraul
14 * Copyright (C) 2004 Andrew de Quincey (wol support)
15 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
16 * IRQ rate fixes, bigendian fixes, cleanups, verification)
17 * Copyright (c) 2004 NVIDIA Corporation
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License as published by
21 * the Free Software Foundation; either version 2 of the License, or
22 * (at your option) any later version.
24 * This program is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * GNU General Public License for more details.
29 * You should have received a copy of the GNU General Public License
30 * along with this program; if not, write to the Free Software
31 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
34 * 0.01: 05 Oct 2003: First release that compiles without warnings.
35 * 0.02: 05 Oct 2003: Fix bug for nv_drain_tx: do not try to free NULL skbs.
36 * Check all PCI BARs for the register window.
37 * udelay added to mii_rw.
38 * 0.03: 06 Oct 2003: Initialize dev->irq.
39 * 0.04: 07 Oct 2003: Initialize np->lock, reduce handled irqs, add printks.
40 * 0.05: 09 Oct 2003: printk removed again, irq status print tx_timeout.
41 * 0.06: 10 Oct 2003: MAC Address read updated, pff flag generation updated,
43 * 0.07: 14 Oct 2003: Further irq mask updates.
44 * 0.08: 20 Oct 2003: rx_desc.Length initialization added, nv_alloc_rx refill
45 * added into irq handler, NULL check for drain_ring.
46 * 0.09: 20 Oct 2003: Basic link speed irq implementation. Only handle the
47 * requested interrupt sources.
48 * 0.10: 20 Oct 2003: First cleanup for release.
49 * 0.11: 21 Oct 2003: hexdump for tx added, rx buffer sizes increased.
50 * MAC Address init fix, set_multicast cleanup.
51 * 0.12: 23 Oct 2003: Cleanups for release.
52 * 0.13: 25 Oct 2003: Limit for concurrent tx packets increased to 10.
53 * Set link speed correctly. start rx before starting
54 * tx (nv_start_rx sets the link speed).
55 * 0.14: 25 Oct 2003: Nic dependant irq mask.
56 * 0.15: 08 Nov 2003: fix smp deadlock with set_multicast_list during
58 * 0.16: 15 Nov 2003: include file cleanup for ppc64, rx buffer size
59 * increased to 1628 bytes.
60 * 0.17: 16 Nov 2003: undo rx buffer size increase. Substract 1 from
62 * 0.18: 17 Nov 2003: fix oops due to late initialization of dev_stats
63 * 0.19: 29 Nov 2003: Handle RxNoBuf, detect & handle invalid mac
64 * addresses, really stop rx if already running
65 * in nv_start_rx, clean up a bit.
66 * 0.20: 07 Dec 2003: alloc fixes
67 * 0.21: 12 Jan 2004: additional alloc fix, nic polling fix.
68 * 0.22: 19 Jan 2004: reprogram timer to a sane rate, avoid lockup
70 * 0.23: 26 Jan 2004: various small cleanups
71 * 0.24: 27 Feb 2004: make driver even less anonymous in backtraces
72 * 0.25: 09 Mar 2004: wol support
73 * 0.26: 03 Jun 2004: netdriver specific annotation, sparse-related fixes
74 * 0.27: 19 Jun 2004: Gigabit support, new descriptor rings,
75 * added CK804/MCP04 device IDs, code fixes
76 * for registers, link status and other minor fixes.
77 * 0.28: 21 Jun 2004: Big cleanup, making driver mostly endian safe
78 * 0.29: 31 Aug 2004: Add backup timer for link change notification.
79 * 0.30: 25 Sep 2004: rx checksum support for nf 250 Gb. Add rx reset
80 * into nv_close, otherwise reenabling for wol can
81 * cause DMA to kfree'd memory.
82 * 0.31: 14 Nov 2004: ethtool support for getting/setting link
84 * 0.32: 16 Apr 2005: RX_ERROR4 handling added.
85 * 0.33: 16 May 2005: Support for MCP51 added.
86 * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics.
87 * 0.35: 26 Jun 2005: Support for MCP55 added.
88 * 0.36: 28 Jun 2005: Add jumbo frame support.
89 * 0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list
90 * 0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of
92 * 0.39: 18 Jul 2005: Add 64bit descriptor support.
93 * 0.40: 19 Jul 2005: Add support for mac address change.
94 * 0.41: 30 Jul 2005: Write back original MAC in nv_close instead
96 * 0.42: 06 Aug 2005: Fix lack of link speed initialization
97 * in the second (and later) nv_open call
98 * 0.43: 10 Aug 2005: Add support for tx checksum.
101 * We suspect that on some hardware no TX done interrupts are generated.
102 * This means recovery from netif_stop_queue only happens if the hw timer
103 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
104 * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
105 * If your hardware reliably generates tx done interrupts, then you can remove
106 * DEV_NEED_TIMERIRQ from the driver_data flags.
107 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
108 * superfluous timer interrupts from the nic.
110 #define FORCEDETH_VERSION "0.43"
111 #define DRV_NAME "forcedeth"
113 #include <linux/module.h>
114 #include <linux/types.h>
115 #include <linux/pci.h>
116 #include <linux/interrupt.h>
117 #include <linux/netdevice.h>
118 #include <linux/etherdevice.h>
119 #include <linux/delay.h>
120 #include <linux/spinlock.h>
121 #include <linux/ethtool.h>
122 #include <linux/timer.h>
123 #include <linux/skbuff.h>
124 #include <linux/mii.h>
125 #include <linux/random.h>
126 #include <linux/init.h>
127 #include <linux/if_vlan.h>
131 #include <asm/uaccess.h>
132 #include <asm/system.h>
135 #define dprintk printk
137 #define dprintk(x...) do { } while (0)
145 #define DEV_NEED_TIMERIRQ 0x0001 /* set the timer irq flag in the irq mask */
146 #define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */
147 #define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */
148 #define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */
149 #define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */
152 NvRegIrqStatus = 0x000,
153 #define NVREG_IRQSTAT_MIIEVENT 0x040
154 #define NVREG_IRQSTAT_MASK 0x1ff
155 NvRegIrqMask = 0x004,
156 #define NVREG_IRQ_RX_ERROR 0x0001
157 #define NVREG_IRQ_RX 0x0002
158 #define NVREG_IRQ_RX_NOBUF 0x0004
159 #define NVREG_IRQ_TX_ERR 0x0008
160 #define NVREG_IRQ_TX_OK 0x0010
161 #define NVREG_IRQ_TIMER 0x0020
162 #define NVREG_IRQ_LINK 0x0040
163 #define NVREG_IRQ_TX_ERROR 0x0080
164 #define NVREG_IRQ_TX1 0x0100
165 #define NVREG_IRQMASK_WANTED 0x00df
167 #define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \
168 NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_TX_ERROR| \
171 NvRegUnknownSetupReg6 = 0x008,
172 #define NVREG_UNKSETUP6_VAL 3
175 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
176 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
178 NvRegPollingInterval = 0x00c,
179 #define NVREG_POLL_DEFAULT 970
181 #define NVREG_MISC1_HD 0x02
182 #define NVREG_MISC1_FORCE 0x3b0f3c
184 NvRegTransmitterControl = 0x084,
185 #define NVREG_XMITCTL_START 0x01
186 NvRegTransmitterStatus = 0x088,
187 #define NVREG_XMITSTAT_BUSY 0x01
189 NvRegPacketFilterFlags = 0x8c,
190 #define NVREG_PFF_ALWAYS 0x7F0008
191 #define NVREG_PFF_PROMISC 0x80
192 #define NVREG_PFF_MYADDR 0x20
194 NvRegOffloadConfig = 0x90,
195 #define NVREG_OFFLOAD_HOMEPHY 0x601
196 #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE
197 NvRegReceiverControl = 0x094,
198 #define NVREG_RCVCTL_START 0x01
199 NvRegReceiverStatus = 0x98,
200 #define NVREG_RCVSTAT_BUSY 0x01
202 NvRegRandomSeed = 0x9c,
203 #define NVREG_RNDSEED_MASK 0x00ff
204 #define NVREG_RNDSEED_FORCE 0x7f00
205 #define NVREG_RNDSEED_FORCE2 0x2d00
206 #define NVREG_RNDSEED_FORCE3 0x7400
208 NvRegUnknownSetupReg1 = 0xA0,
209 #define NVREG_UNKSETUP1_VAL 0x16070f
210 NvRegUnknownSetupReg2 = 0xA4,
211 #define NVREG_UNKSETUP2_VAL 0x16
212 NvRegMacAddrA = 0xA8,
213 NvRegMacAddrB = 0xAC,
214 NvRegMulticastAddrA = 0xB0,
215 #define NVREG_MCASTADDRA_FORCE 0x01
216 NvRegMulticastAddrB = 0xB4,
217 NvRegMulticastMaskA = 0xB8,
218 NvRegMulticastMaskB = 0xBC,
220 NvRegPhyInterface = 0xC0,
221 #define PHY_RGMII 0x10000000
223 NvRegTxRingPhysAddr = 0x100,
224 NvRegRxRingPhysAddr = 0x104,
225 NvRegRingSizes = 0x108,
226 #define NVREG_RINGSZ_TXSHIFT 0
227 #define NVREG_RINGSZ_RXSHIFT 16
228 NvRegUnknownTransmitterReg = 0x10c,
229 NvRegLinkSpeed = 0x110,
230 #define NVREG_LINKSPEED_FORCE 0x10000
231 #define NVREG_LINKSPEED_10 1000
232 #define NVREG_LINKSPEED_100 100
233 #define NVREG_LINKSPEED_1000 50
234 #define NVREG_LINKSPEED_MASK (0xFFF)
235 NvRegUnknownSetupReg5 = 0x130,
236 #define NVREG_UNKSETUP5_BIT31 (1<<31)
237 NvRegUnknownSetupReg3 = 0x13c,
238 #define NVREG_UNKSETUP3_VAL1 0x200010
239 NvRegTxRxControl = 0x144,
240 #define NVREG_TXRXCTL_KICK 0x0001
241 #define NVREG_TXRXCTL_BIT1 0x0002
242 #define NVREG_TXRXCTL_BIT2 0x0004
243 #define NVREG_TXRXCTL_IDLE 0x0008
244 #define NVREG_TXRXCTL_RESET 0x0010
245 #define NVREG_TXRXCTL_RXCHECK 0x0400
246 #define NVREG_TXRXCTL_DESC_1 0
247 #define NVREG_TXRXCTL_DESC_2 0x02100
248 #define NVREG_TXRXCTL_DESC_3 0x02200
249 NvRegMIIStatus = 0x180,
250 #define NVREG_MIISTAT_ERROR 0x0001
251 #define NVREG_MIISTAT_LINKCHANGE 0x0008
252 #define NVREG_MIISTAT_MASK 0x000f
253 #define NVREG_MIISTAT_MASK2 0x000f
254 NvRegUnknownSetupReg4 = 0x184,
255 #define NVREG_UNKSETUP4_VAL 8
257 NvRegAdapterControl = 0x188,
258 #define NVREG_ADAPTCTL_START 0x02
259 #define NVREG_ADAPTCTL_LINKUP 0x04
260 #define NVREG_ADAPTCTL_PHYVALID 0x40000
261 #define NVREG_ADAPTCTL_RUNNING 0x100000
262 #define NVREG_ADAPTCTL_PHYSHIFT 24
263 NvRegMIISpeed = 0x18c,
264 #define NVREG_MIISPEED_BIT8 (1<<8)
265 #define NVREG_MIIDELAY 5
266 NvRegMIIControl = 0x190,
267 #define NVREG_MIICTL_INUSE 0x08000
268 #define NVREG_MIICTL_WRITE 0x00400
269 #define NVREG_MIICTL_ADDRSHIFT 5
270 NvRegMIIData = 0x194,
271 NvRegWakeUpFlags = 0x200,
272 #define NVREG_WAKEUPFLAGS_VAL 0x7770
273 #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
274 #define NVREG_WAKEUPFLAGS_ENABLESHIFT 16
275 #define NVREG_WAKEUPFLAGS_D3SHIFT 12
276 #define NVREG_WAKEUPFLAGS_D2SHIFT 8
277 #define NVREG_WAKEUPFLAGS_D1SHIFT 4
278 #define NVREG_WAKEUPFLAGS_D0SHIFT 0
279 #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01
280 #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02
281 #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04
282 #define NVREG_WAKEUPFLAGS_ENABLE 0x1111
284 NvRegPatternCRC = 0x204,
285 NvRegPatternMask = 0x208,
286 NvRegPowerCap = 0x268,
287 #define NVREG_POWERCAP_D3SUPP (1<<30)
288 #define NVREG_POWERCAP_D2SUPP (1<<26)
289 #define NVREG_POWERCAP_D1SUPP (1<<25)
290 NvRegPowerState = 0x26c,
291 #define NVREG_POWERSTATE_POWEREDUP 0x8000
292 #define NVREG_POWERSTATE_VALID 0x0100
293 #define NVREG_POWERSTATE_MASK 0x0003
294 #define NVREG_POWERSTATE_D0 0x0000
295 #define NVREG_POWERSTATE_D1 0x0001
296 #define NVREG_POWERSTATE_D2 0x0002
297 #define NVREG_POWERSTATE_D3 0x0003
300 /* Big endian: should work, but is untested */
306 struct ring_desc_ex {
307 u32 PacketBufferHigh;
313 typedef union _ring_type {
314 struct ring_desc* orig;
315 struct ring_desc_ex* ex;
318 #define FLAG_MASK_V1 0xffff0000
319 #define FLAG_MASK_V2 0xffffc000
320 #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
321 #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
323 #define NV_TX_LASTPACKET (1<<16)
324 #define NV_TX_RETRYERROR (1<<19)
325 #define NV_TX_FORCED_INTERRUPT (1<<24)
326 #define NV_TX_DEFERRED (1<<26)
327 #define NV_TX_CARRIERLOST (1<<27)
328 #define NV_TX_LATECOLLISION (1<<28)
329 #define NV_TX_UNDERFLOW (1<<29)
330 #define NV_TX_ERROR (1<<30)
331 #define NV_TX_VALID (1<<31)
333 #define NV_TX2_LASTPACKET (1<<29)
334 #define NV_TX2_RETRYERROR (1<<18)
335 #define NV_TX2_FORCED_INTERRUPT (1<<30)
336 #define NV_TX2_DEFERRED (1<<25)
337 #define NV_TX2_CARRIERLOST (1<<26)
338 #define NV_TX2_LATECOLLISION (1<<27)
339 #define NV_TX2_UNDERFLOW (1<<28)
340 /* error and valid are the same for both */
341 #define NV_TX2_ERROR (1<<30)
342 #define NV_TX2_VALID (1<<31)
343 #define NV_TX2_CHECKSUM_L3 (1<<27)
344 #define NV_TX2_CHECKSUM_L4 (1<<26)
346 #define NV_RX_DESCRIPTORVALID (1<<16)
347 #define NV_RX_MISSEDFRAME (1<<17)
348 #define NV_RX_SUBSTRACT1 (1<<18)
349 #define NV_RX_ERROR1 (1<<23)
350 #define NV_RX_ERROR2 (1<<24)
351 #define NV_RX_ERROR3 (1<<25)
352 #define NV_RX_ERROR4 (1<<26)
353 #define NV_RX_CRCERR (1<<27)
354 #define NV_RX_OVERFLOW (1<<28)
355 #define NV_RX_FRAMINGERR (1<<29)
356 #define NV_RX_ERROR (1<<30)
357 #define NV_RX_AVAIL (1<<31)
359 #define NV_RX2_CHECKSUMMASK (0x1C000000)
360 #define NV_RX2_CHECKSUMOK1 (0x10000000)
361 #define NV_RX2_CHECKSUMOK2 (0x14000000)
362 #define NV_RX2_CHECKSUMOK3 (0x18000000)
363 #define NV_RX2_DESCRIPTORVALID (1<<29)
364 #define NV_RX2_SUBSTRACT1 (1<<25)
365 #define NV_RX2_ERROR1 (1<<18)
366 #define NV_RX2_ERROR2 (1<<19)
367 #define NV_RX2_ERROR3 (1<<20)
368 #define NV_RX2_ERROR4 (1<<21)
369 #define NV_RX2_CRCERR (1<<22)
370 #define NV_RX2_OVERFLOW (1<<23)
371 #define NV_RX2_FRAMINGERR (1<<24)
372 /* error and avail are the same for both */
373 #define NV_RX2_ERROR (1<<30)
374 #define NV_RX2_AVAIL (1<<31)
376 /* Miscelaneous hardware related defines: */
377 #define NV_PCI_REGSZ 0x270
379 /* various timeout delays: all in usec */
380 #define NV_TXRX_RESET_DELAY 4
381 #define NV_TXSTOP_DELAY1 10
382 #define NV_TXSTOP_DELAY1MAX 500000
383 #define NV_TXSTOP_DELAY2 100
384 #define NV_RXSTOP_DELAY1 10
385 #define NV_RXSTOP_DELAY1MAX 500000
386 #define NV_RXSTOP_DELAY2 100
387 #define NV_SETUP5_DELAY 5
388 #define NV_SETUP5_DELAYMAX 50000
389 #define NV_POWERUP_DELAY 5
390 #define NV_POWERUP_DELAYMAX 5000
391 #define NV_MIIBUSY_DELAY 50
392 #define NV_MIIPHY_DELAY 10
393 #define NV_MIIPHY_DELAYMAX 10000
395 #define NV_WAKEUPPATTERNS 5
396 #define NV_WAKEUPMASKENTRIES 4
398 /* General driver defaults */
399 #define NV_WATCHDOG_TIMEO (5*HZ)
404 * If your nic mysteriously hangs then try to reduce the limits
405 * to 1/0: It might be required to set NV_TX_LASTPACKET in the
406 * last valid ring entry. But this would be impossible to
407 * implement - probably a disassembly error.
409 #define TX_LIMIT_STOP 63
410 #define TX_LIMIT_START 62
412 /* rx/tx mac addr + type + vlan + align + slack*/
413 #define NV_RX_HEADERS (64)
414 /* even more slack. */
415 #define NV_RX_ALLOC_PAD (64)
417 /* maximum mtu size */
418 #define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */
419 #define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */
421 #define OOM_REFILL (1+HZ/20)
422 #define POLL_WAIT (1+HZ/100)
423 #define LINK_TIMEOUT (3*HZ)
427 * The nic supports three different descriptor types:
428 * - DESC_VER_1: Original
429 * - DESC_VER_2: support for jumbo frames.
430 * - DESC_VER_3: 64-bit format.
437 #define PHY_OUI_MARVELL 0x5043
438 #define PHY_OUI_CICADA 0x03f1
439 #define PHYID1_OUI_MASK 0x03ff
440 #define PHYID1_OUI_SHFT 6
441 #define PHYID2_OUI_MASK 0xfc00
442 #define PHYID2_OUI_SHFT 10
443 #define PHY_INIT1 0x0f000
444 #define PHY_INIT2 0x0e00
445 #define PHY_INIT3 0x01000
446 #define PHY_INIT4 0x0200
447 #define PHY_INIT5 0x0004
448 #define PHY_INIT6 0x02000
449 #define PHY_GIGABIT 0x0100
451 #define PHY_TIMEOUT 0x1
452 #define PHY_ERROR 0x2
456 #define PHY_HALF 0x100
458 /* FIXME: MII defines that should be added to <linux/mii.h> */
459 #define MII_1000BT_CR 0x09
460 #define MII_1000BT_SR 0x0a
461 #define ADVERTISE_1000FULL 0x0200
462 #define ADVERTISE_1000HALF 0x0100
463 #define LPA_1000FULL 0x0800
464 #define LPA_1000HALF 0x0400
469 * All hardware access under dev->priv->lock, except the performance
471 * - rx is (pseudo-) lockless: it relies on the single-threading provided
472 * by the arch code for interrupts.
473 * - tx setup is lockless: it relies on dev->xmit_lock. Actual submission
474 * needs dev->priv->lock :-(
475 * - set_multicast_list: preparation lockless, relies on dev->xmit_lock.
478 /* in dev: base, irq */
483 * Locking: spin_lock(&np->lock); */
484 struct net_device_stats stats;
492 unsigned int phy_oui;
495 /* General data: RO fields */
496 dma_addr_t ring_addr;
497 struct pci_dev *pci_dev;
505 /* rx specific fields.
506 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
509 unsigned int cur_rx, refill_rx;
510 struct sk_buff *rx_skbuff[RX_RING];
511 dma_addr_t rx_dma[RX_RING];
512 unsigned int rx_buf_sz;
513 unsigned int pkt_limit;
514 struct timer_list oom_kick;
515 struct timer_list nic_poll;
517 /* media detection workaround.
518 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
521 unsigned long link_timeout;
523 * tx specific fields.
526 unsigned int next_tx, nic_tx;
527 struct sk_buff *tx_skbuff[TX_RING];
528 dma_addr_t tx_dma[TX_RING];
533 * Maximum number of loops until we assume that a bit in the irq mask
534 * is stuck. Overridable with module param.
536 static int max_interrupt_work = 5;
538 static inline struct fe_priv *get_nvpriv(struct net_device *dev)
540 return netdev_priv(dev);
543 static inline u8 __iomem *get_hwbase(struct net_device *dev)
545 return get_nvpriv(dev)->base;
548 static inline void pci_push(u8 __iomem *base)
550 /* force out pending posted writes */
554 static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
556 return le32_to_cpu(prd->FlagLen)
557 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
560 static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
562 return le32_to_cpu(prd->FlagLen) & LEN_MASK_V2;
565 static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
566 int delay, int delaymax, const char *msg)
568 u8 __iomem *base = get_hwbase(dev);
579 } while ((readl(base + offset) & mask) != target);
583 #define MII_READ (-1)
584 /* mii_rw: read/write a register on the PHY.
586 * Caller must guarantee serialization
588 static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
590 u8 __iomem *base = get_hwbase(dev);
594 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
596 reg = readl(base + NvRegMIIControl);
597 if (reg & NVREG_MIICTL_INUSE) {
598 writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
599 udelay(NV_MIIBUSY_DELAY);
602 reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
603 if (value != MII_READ) {
604 writel(value, base + NvRegMIIData);
605 reg |= NVREG_MIICTL_WRITE;
607 writel(reg, base + NvRegMIIControl);
609 if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
610 NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) {
611 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n",
612 dev->name, miireg, addr);
614 } else if (value != MII_READ) {
615 /* it was a write operation - fewer failures are detectable */
616 dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
617 dev->name, value, miireg, addr);
619 } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
620 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n",
621 dev->name, miireg, addr);
624 retval = readl(base + NvRegMIIData);
625 dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
626 dev->name, miireg, addr, retval);
632 static int phy_reset(struct net_device *dev)
634 struct fe_priv *np = get_nvpriv(dev);
636 unsigned int tries = 0;
638 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
639 miicontrol |= BMCR_RESET;
640 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) {
647 /* must wait till reset is deasserted */
648 while (miicontrol & BMCR_RESET) {
650 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
651 /* FIXME: 100 tries seem excessive */
658 static int phy_init(struct net_device *dev)
660 struct fe_priv *np = get_nvpriv(dev);
661 u8 __iomem *base = get_hwbase(dev);
662 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg;
664 /* set advertise register */
665 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
666 reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|0x800|0x400);
667 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
668 printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev));
672 /* get phy interface type */
673 phyinterface = readl(base + NvRegPhyInterface);
675 /* see if gigabit phy */
676 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
677 if (mii_status & PHY_GIGABIT) {
678 np->gigabit = PHY_GIGABIT;
679 mii_control_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
680 mii_control_1000 &= ~ADVERTISE_1000HALF;
681 if (phyinterface & PHY_RGMII)
682 mii_control_1000 |= ADVERTISE_1000FULL;
684 mii_control_1000 &= ~ADVERTISE_1000FULL;
686 if (mii_rw(dev, np->phyaddr, MII_1000BT_CR, mii_control_1000)) {
687 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
695 if (phy_reset(dev)) {
696 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
700 /* phy vendor specific configuration */
701 if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) {
702 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
703 phy_reserved &= ~(PHY_INIT1 | PHY_INIT2);
704 phy_reserved |= (PHY_INIT3 | PHY_INIT4);
705 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) {
706 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
709 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
710 phy_reserved |= PHY_INIT5;
711 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) {
712 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
716 if (np->phy_oui == PHY_OUI_CICADA) {
717 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
718 phy_reserved |= PHY_INIT6;
719 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) {
720 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
725 /* restart auto negotiation */
726 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
727 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
728 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
735 static void nv_start_rx(struct net_device *dev)
737 struct fe_priv *np = get_nvpriv(dev);
738 u8 __iomem *base = get_hwbase(dev);
740 dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
741 /* Already running? Stop it. */
742 if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
743 writel(0, base + NvRegReceiverControl);
746 writel(np->linkspeed, base + NvRegLinkSpeed);
748 writel(NVREG_RCVCTL_START, base + NvRegReceiverControl);
749 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
750 dev->name, np->duplex, np->linkspeed);
754 static void nv_stop_rx(struct net_device *dev)
756 u8 __iomem *base = get_hwbase(dev);
758 dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
759 writel(0, base + NvRegReceiverControl);
760 reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
761 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX,
762 KERN_INFO "nv_stop_rx: ReceiverStatus remained busy");
764 udelay(NV_RXSTOP_DELAY2);
765 writel(0, base + NvRegLinkSpeed);
768 static void nv_start_tx(struct net_device *dev)
770 u8 __iomem *base = get_hwbase(dev);
772 dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name);
773 writel(NVREG_XMITCTL_START, base + NvRegTransmitterControl);
777 static void nv_stop_tx(struct net_device *dev)
779 u8 __iomem *base = get_hwbase(dev);
781 dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
782 writel(0, base + NvRegTransmitterControl);
783 reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
784 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX,
785 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");
787 udelay(NV_TXSTOP_DELAY2);
788 writel(0, base + NvRegUnknownTransmitterReg);
791 static void nv_txrx_reset(struct net_device *dev)
793 struct fe_priv *np = get_nvpriv(dev);
794 u8 __iomem *base = get_hwbase(dev);
796 dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
797 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
799 udelay(NV_TXRX_RESET_DELAY);
800 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
805 * nv_get_stats: dev->get_stats function
806 * Get latest stats value from the nic.
807 * Called with read_lock(&dev_base_lock) held for read -
808 * only synchronized against unregister_netdevice.
810 static struct net_device_stats *nv_get_stats(struct net_device *dev)
812 struct fe_priv *np = get_nvpriv(dev);
814 /* It seems that the nic always generates interrupts and doesn't
815 * accumulate errors internally. Thus the current values in np->stats
816 * are already up to date.
822 * nv_alloc_rx: fill rx ring entries.
823 * Return 1 if the allocations for the skbs failed and the
824 * rx engine is without Available descriptors
826 static int nv_alloc_rx(struct net_device *dev)
828 struct fe_priv *np = get_nvpriv(dev);
829 unsigned int refill_rx = np->refill_rx;
832 while (np->cur_rx != refill_rx) {
835 nr = refill_rx % RX_RING;
836 if (np->rx_skbuff[nr] == NULL) {
838 skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
843 np->rx_skbuff[nr] = skb;
845 skb = np->rx_skbuff[nr];
847 np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, skb->len,
849 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
850 np->rx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]);
852 np->rx_ring.orig[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
854 np->rx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->rx_dma[nr]) >> 32;
855 np->rx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF;
857 np->rx_ring.ex[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
859 dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n",
860 dev->name, refill_rx);
863 np->refill_rx = refill_rx;
864 if (np->cur_rx - refill_rx == RX_RING)
869 static void nv_do_rx_refill(unsigned long data)
871 struct net_device *dev = (struct net_device *) data;
872 struct fe_priv *np = get_nvpriv(dev);
874 disable_irq(dev->irq);
875 if (nv_alloc_rx(dev)) {
876 spin_lock(&np->lock);
877 if (!np->in_shutdown)
878 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
879 spin_unlock(&np->lock);
881 enable_irq(dev->irq);
884 static void nv_init_rx(struct net_device *dev)
886 struct fe_priv *np = get_nvpriv(dev);
889 np->cur_rx = RX_RING;
891 for (i = 0; i < RX_RING; i++)
892 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
893 np->rx_ring.orig[i].FlagLen = 0;
895 np->rx_ring.ex[i].FlagLen = 0;
898 static void nv_init_tx(struct net_device *dev)
900 struct fe_priv *np = get_nvpriv(dev);
903 np->next_tx = np->nic_tx = 0;
904 for (i = 0; i < TX_RING; i++)
905 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
906 np->tx_ring.orig[i].FlagLen = 0;
908 np->tx_ring.ex[i].FlagLen = 0;
911 static int nv_init_ring(struct net_device *dev)
915 return nv_alloc_rx(dev);
918 static void nv_drain_tx(struct net_device *dev)
920 struct fe_priv *np = get_nvpriv(dev);
922 for (i = 0; i < TX_RING; i++) {
923 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
924 np->tx_ring.orig[i].FlagLen = 0;
926 np->tx_ring.ex[i].FlagLen = 0;
927 if (np->tx_skbuff[i]) {
928 pci_unmap_single(np->pci_dev, np->tx_dma[i],
929 np->tx_skbuff[i]->len,
931 dev_kfree_skb(np->tx_skbuff[i]);
932 np->tx_skbuff[i] = NULL;
933 np->stats.tx_dropped++;
938 static void nv_drain_rx(struct net_device *dev)
940 struct fe_priv *np = get_nvpriv(dev);
942 for (i = 0; i < RX_RING; i++) {
943 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
944 np->rx_ring.orig[i].FlagLen = 0;
946 np->rx_ring.ex[i].FlagLen = 0;
948 if (np->rx_skbuff[i]) {
949 pci_unmap_single(np->pci_dev, np->rx_dma[i],
950 np->rx_skbuff[i]->len,
952 dev_kfree_skb(np->rx_skbuff[i]);
953 np->rx_skbuff[i] = NULL;
958 static void drain_ring(struct net_device *dev)
965 * nv_start_xmit: dev->hard_start_xmit function
966 * Called with dev->xmit_lock held.
968 static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
970 struct fe_priv *np = get_nvpriv(dev);
971 int nr = np->next_tx % TX_RING;
972 u32 tx_checksum = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
974 np->tx_skbuff[nr] = skb;
975 np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data,skb->len,
978 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
979 np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
981 np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
982 np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
985 spin_lock_irq(&np->lock);
987 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
988 np->tx_ring.orig[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags | tx_checksum);
990 np->tx_ring.ex[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags | tx_checksum);
991 dprintk(KERN_DEBUG "%s: nv_start_xmit: packet packet %d queued for transmission\n",
992 dev->name, np->next_tx);
995 for (j=0; j<64; j++) {
997 dprintk("\n%03x:", j);
998 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
1005 dev->trans_start = jiffies;
1006 if (np->next_tx - np->nic_tx >= TX_LIMIT_STOP)
1007 netif_stop_queue(dev);
1008 spin_unlock_irq(&np->lock);
1009 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
1010 pci_push(get_hwbase(dev));
1015 * nv_tx_done: check for completed packets, release the skbs.
1017 * Caller must own np->lock.
1019 static void nv_tx_done(struct net_device *dev)
1021 struct fe_priv *np = get_nvpriv(dev);
1025 while (np->nic_tx != np->next_tx) {
1026 i = np->nic_tx % TX_RING;
1028 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1029 Flags = le32_to_cpu(np->tx_ring.orig[i].FlagLen);
1031 Flags = le32_to_cpu(np->tx_ring.ex[i].FlagLen);
1033 dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n",
1034 dev->name, np->nic_tx, Flags);
1035 if (Flags & NV_TX_VALID)
1037 if (np->desc_ver == DESC_VER_1) {
1038 if (Flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION|
1039 NV_TX_UNDERFLOW|NV_TX_ERROR)) {
1040 if (Flags & NV_TX_UNDERFLOW)
1041 np->stats.tx_fifo_errors++;
1042 if (Flags & NV_TX_CARRIERLOST)
1043 np->stats.tx_carrier_errors++;
1044 np->stats.tx_errors++;
1046 np->stats.tx_packets++;
1047 np->stats.tx_bytes += np->tx_skbuff[i]->len;
1050 if (Flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION|
1051 NV_TX2_UNDERFLOW|NV_TX2_ERROR)) {
1052 if (Flags & NV_TX2_UNDERFLOW)
1053 np->stats.tx_fifo_errors++;
1054 if (Flags & NV_TX2_CARRIERLOST)
1055 np->stats.tx_carrier_errors++;
1056 np->stats.tx_errors++;
1058 np->stats.tx_packets++;
1059 np->stats.tx_bytes += np->tx_skbuff[i]->len;
1062 pci_unmap_single(np->pci_dev, np->tx_dma[i],
1063 np->tx_skbuff[i]->len,
1065 dev_kfree_skb_irq(np->tx_skbuff[i]);
1066 np->tx_skbuff[i] = NULL;
1069 if (np->next_tx - np->nic_tx < TX_LIMIT_START)
1070 netif_wake_queue(dev);
1074 * nv_tx_timeout: dev->tx_timeout function
1075 * Called with dev->xmit_lock held.
1077 static void nv_tx_timeout(struct net_device *dev)
1079 struct fe_priv *np = get_nvpriv(dev);
1080 u8 __iomem *base = get_hwbase(dev);
1082 printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name,
1083 readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK);
1088 printk(KERN_INFO "%s: Ring at %lx: next %d nic %d\n",
1089 dev->name, (unsigned long)np->ring_addr,
1090 np->next_tx, np->nic_tx);
1091 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
1092 for (i=0;i<0x400;i+= 32) {
1093 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
1095 readl(base + i + 0), readl(base + i + 4),
1096 readl(base + i + 8), readl(base + i + 12),
1097 readl(base + i + 16), readl(base + i + 20),
1098 readl(base + i + 24), readl(base + i + 28));
1100 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
1101 for (i=0;i<TX_RING;i+= 4) {
1102 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1103 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
1105 le32_to_cpu(np->tx_ring.orig[i].PacketBuffer),
1106 le32_to_cpu(np->tx_ring.orig[i].FlagLen),
1107 le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer),
1108 le32_to_cpu(np->tx_ring.orig[i+1].FlagLen),
1109 le32_to_cpu(np->tx_ring.orig[i+2].PacketBuffer),
1110 le32_to_cpu(np->tx_ring.orig[i+2].FlagLen),
1111 le32_to_cpu(np->tx_ring.orig[i+3].PacketBuffer),
1112 le32_to_cpu(np->tx_ring.orig[i+3].FlagLen));
1114 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
1116 le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh),
1117 le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow),
1118 le32_to_cpu(np->tx_ring.ex[i].FlagLen),
1119 le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferHigh),
1120 le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferLow),
1121 le32_to_cpu(np->tx_ring.ex[i+1].FlagLen),
1122 le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferHigh),
1123 le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferLow),
1124 le32_to_cpu(np->tx_ring.ex[i+2].FlagLen),
1125 le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferHigh),
1126 le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferLow),
1127 le32_to_cpu(np->tx_ring.ex[i+3].FlagLen));
1132 spin_lock_irq(&np->lock);
1134 /* 1) stop tx engine */
1137 /* 2) check that the packets were not sent already: */
1140 /* 3) if there are dead entries: clear everything */
1141 if (np->next_tx != np->nic_tx) {
1142 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name);
1144 np->next_tx = np->nic_tx = 0;
1145 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1146 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
1148 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
1149 netif_wake_queue(dev);
1152 /* 4) restart tx engine */
1154 spin_unlock_irq(&np->lock);
1158 * Called when the nic notices a mismatch between the actual data len on the
1159 * wire and the len indicated in the 802 header
1161 static int nv_getlen(struct net_device *dev, void *packet, int datalen)
1163 int hdrlen; /* length of the 802 header */
1164 int protolen; /* length as stored in the proto field */
1166 /* 1) calculate len according to header */
1167 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == __constant_htons(ETH_P_8021Q)) {
1168 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto );
1171 protolen = ntohs( ((struct ethhdr *)packet)->h_proto);
1174 dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
1175 dev->name, datalen, protolen, hdrlen);
1176 if (protolen > ETH_DATA_LEN)
1177 return datalen; /* Value in proto field not a len, no checks possible */
1180 /* consistency checks: */
1181 if (datalen > ETH_ZLEN) {
1182 if (datalen >= protolen) {
1183 /* more data on wire than in 802 header, trim of
1186 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
1187 dev->name, protolen);
1190 /* less data on wire than mentioned in header.
1191 * Discard the packet.
1193 dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n",
1198 /* short packet. Accept only if 802 values are also short */
1199 if (protolen > ETH_ZLEN) {
1200 dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n",
1204 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
1205 dev->name, datalen);
1210 static void nv_rx_process(struct net_device *dev)
1212 struct fe_priv *np = get_nvpriv(dev);
1216 struct sk_buff *skb;
1219 if (np->cur_rx - np->refill_rx >= RX_RING)
1220 break; /* we scanned the whole ring - do not continue */
1222 i = np->cur_rx % RX_RING;
1223 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1224 Flags = le32_to_cpu(np->rx_ring.orig[i].FlagLen);
1225 len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver);
1227 Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen);
1228 len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver);
1231 dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n",
1232 dev->name, np->cur_rx, Flags);
1234 if (Flags & NV_RX_AVAIL)
1235 break; /* still owned by hardware, */
1238 * the packet is for us - immediately tear down the pci mapping.
1239 * TODO: check if a prefetch of the first cacheline improves
1242 pci_unmap_single(np->pci_dev, np->rx_dma[i],
1243 np->rx_skbuff[i]->len,
1244 PCI_DMA_FROMDEVICE);
1248 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",Flags);
1249 for (j=0; j<64; j++) {
1251 dprintk("\n%03x:", j);
1252 dprintk(" %02x", ((unsigned char*)np->rx_skbuff[i]->data)[j]);
1256 /* look at what we actually got: */
1257 if (np->desc_ver == DESC_VER_1) {
1258 if (!(Flags & NV_RX_DESCRIPTORVALID))
1261 if (Flags & NV_RX_MISSEDFRAME) {
1262 np->stats.rx_missed_errors++;
1263 np->stats.rx_errors++;
1266 if (Flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) {
1267 np->stats.rx_errors++;
1270 if (Flags & NV_RX_CRCERR) {
1271 np->stats.rx_crc_errors++;
1272 np->stats.rx_errors++;
1275 if (Flags & NV_RX_OVERFLOW) {
1276 np->stats.rx_over_errors++;
1277 np->stats.rx_errors++;
1280 if (Flags & NV_RX_ERROR4) {
1281 len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
1283 np->stats.rx_errors++;
1287 /* framing errors are soft errors. */
1288 if (Flags & NV_RX_FRAMINGERR) {
1289 if (Flags & NV_RX_SUBSTRACT1) {
1294 if (!(Flags & NV_RX2_DESCRIPTORVALID))
1297 if (Flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) {
1298 np->stats.rx_errors++;
1301 if (Flags & NV_RX2_CRCERR) {
1302 np->stats.rx_crc_errors++;
1303 np->stats.rx_errors++;
1306 if (Flags & NV_RX2_OVERFLOW) {
1307 np->stats.rx_over_errors++;
1308 np->stats.rx_errors++;
1311 if (Flags & NV_RX2_ERROR4) {
1312 len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
1314 np->stats.rx_errors++;
1318 /* framing errors are soft errors */
1319 if (Flags & NV_RX2_FRAMINGERR) {
1320 if (Flags & NV_RX2_SUBSTRACT1) {
1324 Flags &= NV_RX2_CHECKSUMMASK;
1325 if (Flags == NV_RX2_CHECKSUMOK1 ||
1326 Flags == NV_RX2_CHECKSUMOK2 ||
1327 Flags == NV_RX2_CHECKSUMOK3) {
1328 dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name);
1329 np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY;
1331 dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name);
1334 /* got a valid packet - forward it to the network core */
1335 skb = np->rx_skbuff[i];
1336 np->rx_skbuff[i] = NULL;
1339 skb->protocol = eth_type_trans(skb, dev);
1340 dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n",
1341 dev->name, np->cur_rx, len, skb->protocol);
1343 dev->last_rx = jiffies;
1344 np->stats.rx_packets++;
1345 np->stats.rx_bytes += len;
1351 static void set_bufsize(struct net_device *dev)
1353 struct fe_priv *np = netdev_priv(dev);
1355 if (dev->mtu <= ETH_DATA_LEN)
1356 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
1358 np->rx_buf_sz = dev->mtu + NV_RX_HEADERS;
1362 * nv_change_mtu: dev->change_mtu function
1363 * Called with dev_base_lock held for read.
1365 static int nv_change_mtu(struct net_device *dev, int new_mtu)
1367 struct fe_priv *np = get_nvpriv(dev);
1370 if (new_mtu < 64 || new_mtu > np->pkt_limit)
1376 /* return early if the buffer sizes will not change */
1377 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
1379 if (old_mtu == new_mtu)
1382 /* synchronized against open : rtnl_lock() held by caller */
1383 if (netif_running(dev)) {
1384 u8 __iomem *base = get_hwbase(dev);
1386 * It seems that the nic preloads valid ring entries into an
1387 * internal buffer. The procedure for flushing everything is
1388 * guessed, there is probably a simpler approach.
1389 * Changing the MTU is a rare event, it shouldn't matter.
1391 disable_irq(dev->irq);
1392 spin_lock_bh(&dev->xmit_lock);
1393 spin_lock(&np->lock);
1398 /* drain rx queue */
1401 /* reinit driver view of the rx queue */
1404 /* alloc new rx buffers */
1406 if (nv_alloc_rx(dev)) {
1407 if (!np->in_shutdown)
1408 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
1410 /* reinit nic view of the rx queue */
1411 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
1412 writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr);
1413 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1414 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
1416 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
1417 writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT),
1418 base + NvRegRingSizes);
1420 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
1423 /* restart rx engine */
1426 spin_unlock(&np->lock);
1427 spin_unlock_bh(&dev->xmit_lock);
1428 enable_irq(dev->irq);
1433 static void nv_copy_mac_to_hw(struct net_device *dev)
1435 u8 __iomem *base = get_hwbase(dev);
1438 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
1439 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
1440 mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
1442 writel(mac[0], base + NvRegMacAddrA);
1443 writel(mac[1], base + NvRegMacAddrB);
1447 * nv_set_mac_address: dev->set_mac_address function
1448 * Called with rtnl_lock() held.
1450 static int nv_set_mac_address(struct net_device *dev, void *addr)
1452 struct fe_priv *np = get_nvpriv(dev);
1453 struct sockaddr *macaddr = (struct sockaddr*)addr;
1455 if(!is_valid_ether_addr(macaddr->sa_data))
1456 return -EADDRNOTAVAIL;
1458 /* synchronized against open : rtnl_lock() held by caller */
1459 memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
1461 if (netif_running(dev)) {
1462 spin_lock_bh(&dev->xmit_lock);
1463 spin_lock_irq(&np->lock);
1465 /* stop rx engine */
1468 /* set mac address */
1469 nv_copy_mac_to_hw(dev);
1471 /* restart rx engine */
1473 spin_unlock_irq(&np->lock);
1474 spin_unlock_bh(&dev->xmit_lock);
1476 nv_copy_mac_to_hw(dev);
1482 * nv_set_multicast: dev->set_multicast function
1483 * Called with dev->xmit_lock held.
1485 static void nv_set_multicast(struct net_device *dev)
1487 struct fe_priv *np = get_nvpriv(dev);
1488 u8 __iomem *base = get_hwbase(dev);
1493 memset(addr, 0, sizeof(addr));
1494 memset(mask, 0, sizeof(mask));
1496 if (dev->flags & IFF_PROMISC) {
1497 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1498 pff = NVREG_PFF_PROMISC;
1500 pff = NVREG_PFF_MYADDR;
1502 if (dev->flags & IFF_ALLMULTI || dev->mc_list) {
1506 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff;
1507 if (dev->flags & IFF_ALLMULTI) {
1508 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
1510 struct dev_mc_list *walk;
1512 walk = dev->mc_list;
1513 while (walk != NULL) {
1515 a = le32_to_cpu(*(u32 *) walk->dmi_addr);
1516 b = le16_to_cpu(*(u16 *) (&walk->dmi_addr[4]));
1524 addr[0] = alwaysOn[0];
1525 addr[1] = alwaysOn[1];
1526 mask[0] = alwaysOn[0] | alwaysOff[0];
1527 mask[1] = alwaysOn[1] | alwaysOff[1];
1530 addr[0] |= NVREG_MCASTADDRA_FORCE;
1531 pff |= NVREG_PFF_ALWAYS;
1532 spin_lock_irq(&np->lock);
1534 writel(addr[0], base + NvRegMulticastAddrA);
1535 writel(addr[1], base + NvRegMulticastAddrB);
1536 writel(mask[0], base + NvRegMulticastMaskA);
1537 writel(mask[1], base + NvRegMulticastMaskB);
1538 writel(pff, base + NvRegPacketFilterFlags);
1539 dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n",
1542 spin_unlock_irq(&np->lock);
1545 static int nv_update_linkspeed(struct net_device *dev)
1547 struct fe_priv *np = get_nvpriv(dev);
1548 u8 __iomem *base = get_hwbase(dev);
1550 int newls = np->linkspeed;
1551 int newdup = np->duplex;
1554 u32 control_1000, status_1000, phyreg;
1556 /* BMSR_LSTATUS is latched, read it twice:
1557 * we want the current value.
1559 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1560 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1562 if (!(mii_status & BMSR_LSTATUS)) {
1563 dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n",
1565 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
1571 if (np->autoneg == 0) {
1572 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
1573 dev->name, np->fixed_mode);
1574 if (np->fixed_mode & LPA_100FULL) {
1575 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
1577 } else if (np->fixed_mode & LPA_100HALF) {
1578 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
1580 } else if (np->fixed_mode & LPA_10FULL) {
1581 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
1584 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
1590 /* check auto negotiation is complete */
1591 if (!(mii_status & BMSR_ANEGCOMPLETE)) {
1592 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */
1593 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
1596 dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name);
1601 if (np->gigabit == PHY_GIGABIT) {
1602 control_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
1603 status_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_SR, MII_READ);
1605 if ((control_1000 & ADVERTISE_1000FULL) &&
1606 (status_1000 & LPA_1000FULL)) {
1607 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n",
1609 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
1615 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1616 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
1617 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
1618 dev->name, adv, lpa);
1620 /* FIXME: handle parallel detection properly */
1622 if (lpa & LPA_100FULL) {
1623 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
1625 } else if (lpa & LPA_100HALF) {
1626 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
1628 } else if (lpa & LPA_10FULL) {
1629 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
1631 } else if (lpa & LPA_10HALF) {
1632 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
1635 dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, lpa);
1636 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
1641 if (np->duplex == newdup && np->linkspeed == newls)
1644 dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n",
1645 dev->name, np->linkspeed, np->duplex, newls, newdup);
1647 np->duplex = newdup;
1648 np->linkspeed = newls;
1650 if (np->gigabit == PHY_GIGABIT) {
1651 phyreg = readl(base + NvRegRandomSeed);
1652 phyreg &= ~(0x3FF00);
1653 if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10)
1654 phyreg |= NVREG_RNDSEED_FORCE3;
1655 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)
1656 phyreg |= NVREG_RNDSEED_FORCE2;
1657 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
1658 phyreg |= NVREG_RNDSEED_FORCE;
1659 writel(phyreg, base + NvRegRandomSeed);
1662 phyreg = readl(base + NvRegPhyInterface);
1663 phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
1664 if (np->duplex == 0)
1666 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
1668 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
1670 writel(phyreg, base + NvRegPhyInterface);
1672 writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD),
1675 writel(np->linkspeed, base + NvRegLinkSpeed);
1681 static void nv_linkchange(struct net_device *dev)
1683 if (nv_update_linkspeed(dev)) {
1684 if (netif_carrier_ok(dev)) {
1687 netif_carrier_on(dev);
1688 printk(KERN_INFO "%s: link up.\n", dev->name);
1692 if (netif_carrier_ok(dev)) {
1693 netif_carrier_off(dev);
1694 printk(KERN_INFO "%s: link down.\n", dev->name);
1700 static void nv_link_irq(struct net_device *dev)
1702 u8 __iomem *base = get_hwbase(dev);
1705 miistat = readl(base + NvRegMIIStatus);
1706 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
1707 dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat);
1709 if (miistat & (NVREG_MIISTAT_LINKCHANGE))
1711 dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name);
1714 static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
1716 struct net_device *dev = (struct net_device *) data;
1717 struct fe_priv *np = get_nvpriv(dev);
1718 u8 __iomem *base = get_hwbase(dev);
1722 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
1725 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
1726 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
1728 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
1729 if (!(events & np->irqmask))
1732 if (events & (NVREG_IRQ_TX1|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_ERROR|NVREG_IRQ_TX_ERR)) {
1733 spin_lock(&np->lock);
1735 spin_unlock(&np->lock);
1738 if (events & (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF)) {
1740 if (nv_alloc_rx(dev)) {
1741 spin_lock(&np->lock);
1742 if (!np->in_shutdown)
1743 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
1744 spin_unlock(&np->lock);
1748 if (events & NVREG_IRQ_LINK) {
1749 spin_lock(&np->lock);
1751 spin_unlock(&np->lock);
1753 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
1754 spin_lock(&np->lock);
1756 spin_unlock(&np->lock);
1757 np->link_timeout = jiffies + LINK_TIMEOUT;
1759 if (events & (NVREG_IRQ_TX_ERR)) {
1760 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
1763 if (events & (NVREG_IRQ_UNKNOWN)) {
1764 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
1767 if (i > max_interrupt_work) {
1768 spin_lock(&np->lock);
1769 /* disable interrupts on the nic */
1770 writel(0, base + NvRegIrqMask);
1773 if (!np->in_shutdown)
1774 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
1775 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
1776 spin_unlock(&np->lock);
1781 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
1783 return IRQ_RETVAL(i);
1786 static void nv_do_nic_poll(unsigned long data)
1788 struct net_device *dev = (struct net_device *) data;
1789 struct fe_priv *np = get_nvpriv(dev);
1790 u8 __iomem *base = get_hwbase(dev);
1792 disable_irq(dev->irq);
1793 /* FIXME: Do we need synchronize_irq(dev->irq) here? */
1795 * reenable interrupts on the nic, we have to do this before calling
1796 * nv_nic_irq because that may decide to do otherwise
1798 writel(np->irqmask, base + NvRegIrqMask);
1800 nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL);
1801 enable_irq(dev->irq);
1804 #ifdef CONFIG_NET_POLL_CONTROLLER
1805 static void nv_poll_controller(struct net_device *dev)
1807 nv_do_nic_poll((unsigned long) dev);
1811 static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1813 struct fe_priv *np = get_nvpriv(dev);
1814 strcpy(info->driver, "forcedeth");
1815 strcpy(info->version, FORCEDETH_VERSION);
1816 strcpy(info->bus_info, pci_name(np->pci_dev));
1819 static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
1821 struct fe_priv *np = get_nvpriv(dev);
1822 wolinfo->supported = WAKE_MAGIC;
1824 spin_lock_irq(&np->lock);
1826 wolinfo->wolopts = WAKE_MAGIC;
1827 spin_unlock_irq(&np->lock);
1830 static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
1832 struct fe_priv *np = get_nvpriv(dev);
1833 u8 __iomem *base = get_hwbase(dev);
1835 spin_lock_irq(&np->lock);
1836 if (wolinfo->wolopts == 0) {
1837 writel(0, base + NvRegWakeUpFlags);
1840 if (wolinfo->wolopts & WAKE_MAGIC) {
1841 writel(NVREG_WAKEUPFLAGS_ENABLE, base + NvRegWakeUpFlags);
1844 spin_unlock_irq(&np->lock);
1848 static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1850 struct fe_priv *np = netdev_priv(dev);
1853 spin_lock_irq(&np->lock);
1854 ecmd->port = PORT_MII;
1855 if (!netif_running(dev)) {
1856 /* We do not track link speed / duplex setting if the
1857 * interface is disabled. Force a link check */
1858 nv_update_linkspeed(dev);
1860 switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
1861 case NVREG_LINKSPEED_10:
1862 ecmd->speed = SPEED_10;
1864 case NVREG_LINKSPEED_100:
1865 ecmd->speed = SPEED_100;
1867 case NVREG_LINKSPEED_1000:
1868 ecmd->speed = SPEED_1000;
1871 ecmd->duplex = DUPLEX_HALF;
1873 ecmd->duplex = DUPLEX_FULL;
1875 ecmd->autoneg = np->autoneg;
1877 ecmd->advertising = ADVERTISED_MII;
1879 ecmd->advertising |= ADVERTISED_Autoneg;
1880 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1882 adv = np->fixed_mode;
1884 if (adv & ADVERTISE_10HALF)
1885 ecmd->advertising |= ADVERTISED_10baseT_Half;
1886 if (adv & ADVERTISE_10FULL)
1887 ecmd->advertising |= ADVERTISED_10baseT_Full;
1888 if (adv & ADVERTISE_100HALF)
1889 ecmd->advertising |= ADVERTISED_100baseT_Half;
1890 if (adv & ADVERTISE_100FULL)
1891 ecmd->advertising |= ADVERTISED_100baseT_Full;
1892 if (np->autoneg && np->gigabit == PHY_GIGABIT) {
1893 adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
1894 if (adv & ADVERTISE_1000FULL)
1895 ecmd->advertising |= ADVERTISED_1000baseT_Full;
1898 ecmd->supported = (SUPPORTED_Autoneg |
1899 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
1900 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1902 if (np->gigabit == PHY_GIGABIT)
1903 ecmd->supported |= SUPPORTED_1000baseT_Full;
1905 ecmd->phy_address = np->phyaddr;
1906 ecmd->transceiver = XCVR_EXTERNAL;
1908 /* ignore maxtxpkt, maxrxpkt for now */
1909 spin_unlock_irq(&np->lock);
1913 static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1915 struct fe_priv *np = netdev_priv(dev);
1917 if (ecmd->port != PORT_MII)
1919 if (ecmd->transceiver != XCVR_EXTERNAL)
1921 if (ecmd->phy_address != np->phyaddr) {
1922 /* TODO: support switching between multiple phys. Should be
1923 * trivial, but not enabled due to lack of test hardware. */
1926 if (ecmd->autoneg == AUTONEG_ENABLE) {
1929 mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1930 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
1931 if (np->gigabit == PHY_GIGABIT)
1932 mask |= ADVERTISED_1000baseT_Full;
1934 if ((ecmd->advertising & mask) == 0)
1937 } else if (ecmd->autoneg == AUTONEG_DISABLE) {
1938 /* Note: autonegotiation disable, speed 1000 intentionally
1939 * forbidden - noone should need that. */
1941 if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
1943 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
1949 spin_lock_irq(&np->lock);
1950 if (ecmd->autoneg == AUTONEG_ENABLE) {
1955 /* advertise only what has been requested */
1956 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1957 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
1958 if (ecmd->advertising & ADVERTISED_10baseT_Half)
1959 adv |= ADVERTISE_10HALF;
1960 if (ecmd->advertising & ADVERTISED_10baseT_Full)
1961 adv |= ADVERTISE_10FULL;
1962 if (ecmd->advertising & ADVERTISED_100baseT_Half)
1963 adv |= ADVERTISE_100HALF;
1964 if (ecmd->advertising & ADVERTISED_100baseT_Full)
1965 adv |= ADVERTISE_100FULL;
1966 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
1968 if (np->gigabit == PHY_GIGABIT) {
1969 adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
1970 adv &= ~ADVERTISE_1000FULL;
1971 if (ecmd->advertising & ADVERTISED_1000baseT_Full)
1972 adv |= ADVERTISE_1000FULL;
1973 mii_rw(dev, np->phyaddr, MII_1000BT_CR, adv);
1976 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1977 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
1978 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
1985 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1986 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
1987 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
1988 adv |= ADVERTISE_10HALF;
1989 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
1990 adv |= ADVERTISE_10FULL;
1991 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
1992 adv |= ADVERTISE_100HALF;
1993 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
1994 adv |= ADVERTISE_100FULL;
1995 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
1996 np->fixed_mode = adv;
1998 if (np->gigabit == PHY_GIGABIT) {
1999 adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
2000 adv &= ~ADVERTISE_1000FULL;
2001 mii_rw(dev, np->phyaddr, MII_1000BT_CR, adv);
2004 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
2005 bmcr |= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_FULLDPLX);
2006 if (adv & (ADVERTISE_10FULL|ADVERTISE_100FULL))
2007 bmcr |= BMCR_FULLDPLX;
2008 if (adv & (ADVERTISE_100HALF|ADVERTISE_100FULL))
2009 bmcr |= BMCR_SPEED100;
2010 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
2012 if (netif_running(dev)) {
2013 /* Wait a bit and then reconfigure the nic. */
2018 spin_unlock_irq(&np->lock);
2023 #define FORCEDETH_REGS_VER 1
2024 #define FORCEDETH_REGS_SIZE 0x400 /* 256 32-bit registers */
2026 static int nv_get_regs_len(struct net_device *dev)
2028 return FORCEDETH_REGS_SIZE;
2031 static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
2033 struct fe_priv *np = get_nvpriv(dev);
2034 u8 __iomem *base = get_hwbase(dev);
2038 regs->version = FORCEDETH_REGS_VER;
2039 spin_lock_irq(&np->lock);
2040 for (i=0;i<FORCEDETH_REGS_SIZE/sizeof(u32);i++)
2041 rbuf[i] = readl(base + i*sizeof(u32));
2042 spin_unlock_irq(&np->lock);
2045 static int nv_nway_reset(struct net_device *dev)
2047 struct fe_priv *np = get_nvpriv(dev);
2050 spin_lock_irq(&np->lock);
2054 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
2055 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
2056 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
2062 spin_unlock_irq(&np->lock);
2067 static struct ethtool_ops ops = {
2068 .get_drvinfo = nv_get_drvinfo,
2069 .get_link = ethtool_op_get_link,
2070 .get_wol = nv_get_wol,
2071 .set_wol = nv_set_wol,
2072 .get_settings = nv_get_settings,
2073 .set_settings = nv_set_settings,
2074 .get_regs_len = nv_get_regs_len,
2075 .get_regs = nv_get_regs,
2076 .nway_reset = nv_nway_reset,
2077 .get_perm_addr = ethtool_op_get_perm_addr,
2080 static int nv_open(struct net_device *dev)
2082 struct fe_priv *np = get_nvpriv(dev);
2083 u8 __iomem *base = get_hwbase(dev);
2086 dprintk(KERN_DEBUG "nv_open: begin\n");
2088 /* 1) erase previous misconfiguration */
2089 /* 4.1-1: stop adapter: ignored, 4.3 seems to be overkill */
2090 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
2091 writel(0, base + NvRegMulticastAddrB);
2092 writel(0, base + NvRegMulticastMaskA);
2093 writel(0, base + NvRegMulticastMaskB);
2094 writel(0, base + NvRegPacketFilterFlags);
2096 writel(0, base + NvRegTransmitterControl);
2097 writel(0, base + NvRegReceiverControl);
2099 writel(0, base + NvRegAdapterControl);
2101 /* 2) initialize descriptor rings */
2103 oom = nv_init_ring(dev);
2105 writel(0, base + NvRegLinkSpeed);
2106 writel(0, base + NvRegUnknownTransmitterReg);
2108 writel(0, base + NvRegUnknownSetupReg6);
2110 np->in_shutdown = 0;
2112 /* 3) set mac address */
2113 nv_copy_mac_to_hw(dev);
2115 /* 4) give hw rings */
2116 writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr);
2117 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
2118 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
2120 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
2121 writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT),
2122 base + NvRegRingSizes);
2124 /* 5) continue setup */
2125 writel(np->linkspeed, base + NvRegLinkSpeed);
2126 writel(NVREG_UNKSETUP3_VAL1, base + NvRegUnknownSetupReg3);
2127 writel(np->txrxctl_bits, base + NvRegTxRxControl);
2129 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
2130 reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
2131 NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX,
2132 KERN_INFO "open: SetupReg5, Bit 31 remained off\n");
2134 writel(0, base + NvRegUnknownSetupReg4);
2135 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
2136 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
2138 /* 6) continue setup */
2139 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
2140 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
2141 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
2142 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
2144 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
2145 get_random_bytes(&i, sizeof(i));
2146 writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed);
2147 writel(NVREG_UNKSETUP1_VAL, base + NvRegUnknownSetupReg1);
2148 writel(NVREG_UNKSETUP2_VAL, base + NvRegUnknownSetupReg2);
2149 writel(NVREG_POLL_DEFAULT, base + NvRegPollingInterval);
2150 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
2151 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
2152 base + NvRegAdapterControl);
2153 writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
2154 writel(NVREG_UNKSETUP4_VAL, base + NvRegUnknownSetupReg4);
2155 writel(NVREG_WAKEUPFLAGS_VAL, base + NvRegWakeUpFlags);
2157 i = readl(base + NvRegPowerState);
2158 if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0)
2159 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
2163 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
2165 writel(0, base + NvRegIrqMask);
2167 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
2168 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
2171 ret = request_irq(dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev);
2175 /* ask for interrupts */
2176 writel(np->irqmask, base + NvRegIrqMask);
2178 spin_lock_irq(&np->lock);
2179 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
2180 writel(0, base + NvRegMulticastAddrB);
2181 writel(0, base + NvRegMulticastMaskA);
2182 writel(0, base + NvRegMulticastMaskB);
2183 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
2184 /* One manual link speed update: Interrupts are enabled, future link
2185 * speed changes cause interrupts and are handled by nv_link_irq().
2189 miistat = readl(base + NvRegMIIStatus);
2190 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
2191 dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat);
2193 /* set linkspeed to invalid value, thus force nv_update_linkspeed
2196 ret = nv_update_linkspeed(dev);
2199 netif_start_queue(dev);
2201 netif_carrier_on(dev);
2203 printk("%s: no link during initialization.\n", dev->name);
2204 netif_carrier_off(dev);
2207 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2208 spin_unlock_irq(&np->lock);
2216 static int nv_close(struct net_device *dev)
2218 struct fe_priv *np = get_nvpriv(dev);
2221 spin_lock_irq(&np->lock);
2222 np->in_shutdown = 1;
2223 spin_unlock_irq(&np->lock);
2224 synchronize_irq(dev->irq);
2226 del_timer_sync(&np->oom_kick);
2227 del_timer_sync(&np->nic_poll);
2229 netif_stop_queue(dev);
2230 spin_lock_irq(&np->lock);
2235 /* disable interrupts on the nic or we will lock up */
2236 base = get_hwbase(dev);
2237 writel(0, base + NvRegIrqMask);
2239 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
2241 spin_unlock_irq(&np->lock);
2243 free_irq(dev->irq, dev);
2250 /* special op: write back the misordered MAC address - otherwise
2251 * the next nv_probe would see a wrong address.
2253 writel(np->orig_mac[0], base + NvRegMacAddrA);
2254 writel(np->orig_mac[1], base + NvRegMacAddrB);
2256 /* FIXME: power down nic */
2261 static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
2263 struct net_device *dev;
2269 dev = alloc_etherdev(sizeof(struct fe_priv));
2274 np = get_nvpriv(dev);
2275 np->pci_dev = pci_dev;
2276 spin_lock_init(&np->lock);
2277 SET_MODULE_OWNER(dev);
2278 SET_NETDEV_DEV(dev, &pci_dev->dev);
2280 init_timer(&np->oom_kick);
2281 np->oom_kick.data = (unsigned long) dev;
2282 np->oom_kick.function = &nv_do_rx_refill; /* timer handler */
2283 init_timer(&np->nic_poll);
2284 np->nic_poll.data = (unsigned long) dev;
2285 np->nic_poll.function = &nv_do_nic_poll; /* timer handler */
2287 err = pci_enable_device(pci_dev);
2289 printk(KERN_INFO "forcedeth: pci_enable_dev failed (%d) for device %s\n",
2290 err, pci_name(pci_dev));
2294 pci_set_master(pci_dev);
2296 err = pci_request_regions(pci_dev, DRV_NAME);
2302 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2303 dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n",
2304 pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i),
2305 pci_resource_len(pci_dev, i),
2306 pci_resource_flags(pci_dev, i));
2307 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
2308 pci_resource_len(pci_dev, i) >= NV_PCI_REGSZ) {
2309 addr = pci_resource_start(pci_dev, i);
2313 if (i == DEVICE_COUNT_RESOURCE) {
2314 printk(KERN_INFO "forcedeth: Couldn't find register window for device %s.\n",
2319 /* handle different descriptor versions */
2320 if (id->driver_data & DEV_HAS_HIGH_DMA) {
2321 /* packet format 3: supports 40-bit addressing */
2322 np->desc_ver = DESC_VER_3;
2323 if (pci_set_dma_mask(pci_dev, 0x0000007fffffffffULL)) {
2324 printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
2327 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
2328 } else if (id->driver_data & DEV_HAS_LARGEDESC) {
2329 /* packet format 2: supports jumbo frames */
2330 np->desc_ver = DESC_VER_2;
2331 np->txrxctl_bits = NVREG_TXRXCTL_DESC_2;
2333 /* original packet format */
2334 np->desc_ver = DESC_VER_1;
2335 np->txrxctl_bits = NVREG_TXRXCTL_DESC_1;
2338 np->pkt_limit = NV_PKTLIMIT_1;
2339 if (id->driver_data & DEV_HAS_LARGEDESC)
2340 np->pkt_limit = NV_PKTLIMIT_2;
2342 if (id->driver_data & DEV_HAS_CHECKSUM) {
2343 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
2344 dev->features |= NETIF_F_HW_CSUM;
2348 np->base = ioremap(addr, NV_PCI_REGSZ);
2351 dev->base_addr = (unsigned long)np->base;
2353 dev->irq = pci_dev->irq;
2355 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
2356 np->rx_ring.orig = pci_alloc_consistent(pci_dev,
2357 sizeof(struct ring_desc) * (RX_RING + TX_RING),
2359 if (!np->rx_ring.orig)
2361 np->tx_ring.orig = &np->rx_ring.orig[RX_RING];
2363 np->rx_ring.ex = pci_alloc_consistent(pci_dev,
2364 sizeof(struct ring_desc_ex) * (RX_RING + TX_RING),
2366 if (!np->rx_ring.ex)
2368 np->tx_ring.ex = &np->rx_ring.ex[RX_RING];
2371 dev->open = nv_open;
2372 dev->stop = nv_close;
2373 dev->hard_start_xmit = nv_start_xmit;
2374 dev->get_stats = nv_get_stats;
2375 dev->change_mtu = nv_change_mtu;
2376 dev->set_mac_address = nv_set_mac_address;
2377 dev->set_multicast_list = nv_set_multicast;
2378 #ifdef CONFIG_NET_POLL_CONTROLLER
2379 dev->poll_controller = nv_poll_controller;
2381 SET_ETHTOOL_OPS(dev, &ops);
2382 dev->tx_timeout = nv_tx_timeout;
2383 dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
2385 pci_set_drvdata(pci_dev, dev);
2387 /* read the mac address */
2388 base = get_hwbase(dev);
2389 np->orig_mac[0] = readl(base + NvRegMacAddrA);
2390 np->orig_mac[1] = readl(base + NvRegMacAddrB);
2392 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
2393 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
2394 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
2395 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
2396 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
2397 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
2398 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
2400 if (!is_valid_ether_addr(dev->perm_addr)) {
2402 * Bad mac address. At least one bios sets the mac address
2403 * to 01:23:45:67:89:ab
2405 printk(KERN_ERR "%s: Invalid Mac address detected: %02x:%02x:%02x:%02x:%02x:%02x\n",
2407 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
2408 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
2409 printk(KERN_ERR "Please complain to your hardware vendor. Switching to a random MAC.\n");
2410 dev->dev_addr[0] = 0x00;
2411 dev->dev_addr[1] = 0x00;
2412 dev->dev_addr[2] = 0x6c;
2413 get_random_bytes(&dev->dev_addr[3], 3);
2416 dprintk(KERN_DEBUG "%s: MAC Address %02x:%02x:%02x:%02x:%02x:%02x\n", pci_name(pci_dev),
2417 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
2418 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
2421 writel(0, base + NvRegWakeUpFlags);
2424 if (np->desc_ver == DESC_VER_1) {
2425 np->tx_flags = NV_TX_LASTPACKET|NV_TX_VALID;
2427 np->tx_flags = NV_TX2_LASTPACKET|NV_TX2_VALID;
2429 np->irqmask = NVREG_IRQMASK_WANTED;
2430 if (id->driver_data & DEV_NEED_TIMERIRQ)
2431 np->irqmask |= NVREG_IRQ_TIMER;
2432 if (id->driver_data & DEV_NEED_LINKTIMER) {
2433 dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev));
2434 np->need_linktimer = 1;
2435 np->link_timeout = jiffies + LINK_TIMEOUT;
2437 dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev));
2438 np->need_linktimer = 0;
2441 /* find a suitable phy */
2442 for (i = 1; i < 32; i++) {
2445 spin_lock_irq(&np->lock);
2446 id1 = mii_rw(dev, i, MII_PHYSID1, MII_READ);
2447 spin_unlock_irq(&np->lock);
2448 if (id1 < 0 || id1 == 0xffff)
2450 spin_lock_irq(&np->lock);
2451 id2 = mii_rw(dev, i, MII_PHYSID2, MII_READ);
2452 spin_unlock_irq(&np->lock);
2453 if (id2 < 0 || id2 == 0xffff)
2456 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
2457 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
2458 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
2459 pci_name(pci_dev), id1, id2, i);
2461 np->phy_oui = id1 | id2;
2465 /* PHY in isolate mode? No phy attached and user wants to
2466 * test loopback? Very odd, but can be correct.
2468 printk(KERN_INFO "%s: open: Could not find a valid PHY.\n",
2477 /* set default link speed settings */
2478 np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2482 err = register_netdev(dev);
2484 printk(KERN_INFO "forcedeth: unable to register netdev: %d\n", err);
2487 printk(KERN_INFO "%s: forcedeth.c: subsystem: %05x:%04x bound to %s\n",
2488 dev->name, pci_dev->subsystem_vendor, pci_dev->subsystem_device,
2494 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
2495 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING),
2496 np->rx_ring.orig, np->ring_addr);
2498 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (RX_RING + TX_RING),
2499 np->rx_ring.ex, np->ring_addr);
2500 pci_set_drvdata(pci_dev, NULL);
2502 iounmap(get_hwbase(dev));
2504 pci_release_regions(pci_dev);
2506 pci_disable_device(pci_dev);
2513 static void __devexit nv_remove(struct pci_dev *pci_dev)
2515 struct net_device *dev = pci_get_drvdata(pci_dev);
2516 struct fe_priv *np = get_nvpriv(dev);
2518 unregister_netdev(dev);
2520 /* free all structures */
2521 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
2522 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), np->rx_ring.orig, np->ring_addr);
2524 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (RX_RING + TX_RING), np->rx_ring.ex, np->ring_addr);
2525 iounmap(get_hwbase(dev));
2526 pci_release_regions(pci_dev);
2527 pci_disable_device(pci_dev);
2529 pci_set_drvdata(pci_dev, NULL);
2532 static struct pci_device_id pci_tbl[] = {
2533 { /* nForce Ethernet Controller */
2534 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1),
2535 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2537 { /* nForce2 Ethernet Controller */
2538 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_2),
2539 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2541 { /* nForce3 Ethernet Controller */
2542 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_3),
2543 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2545 { /* nForce3 Ethernet Controller */
2546 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4),
2547 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
2549 { /* nForce3 Ethernet Controller */
2550 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5),
2551 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
2553 { /* nForce3 Ethernet Controller */
2554 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6),
2555 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
2557 { /* nForce3 Ethernet Controller */
2558 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7),
2559 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
2561 { /* CK804 Ethernet Controller */
2562 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8),
2563 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
2565 { /* CK804 Ethernet Controller */
2566 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9),
2567 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
2569 { /* MCP04 Ethernet Controller */
2570 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10),
2571 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
2573 { /* MCP04 Ethernet Controller */
2574 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11),
2575 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
2577 { /* MCP51 Ethernet Controller */
2578 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12),
2579 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA,
2581 { /* MCP51 Ethernet Controller */
2582 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13),
2583 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA,
2585 { /* MCP55 Ethernet Controller */
2586 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
2587 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
2589 { /* MCP55 Ethernet Controller */
2590 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
2591 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
2596 static struct pci_driver driver = {
2597 .name = "forcedeth",
2598 .id_table = pci_tbl,
2600 .remove = __devexit_p(nv_remove),
2604 static int __init init_nic(void)
2606 printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION);
2607 return pci_module_init(&driver);
2610 static void __exit exit_nic(void)
2612 pci_unregister_driver(&driver);
2615 module_param(max_interrupt_work, int, 0);
2616 MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
2618 MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
2619 MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
2620 MODULE_LICENSE("GPL");
2622 MODULE_DEVICE_TABLE(pci, pci_tbl);
2624 module_init(init_nic);
2625 module_exit(exit_nic);