2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
4 * Note: This driver is a cleanroom reimplementation based on reverse
5 * engineered documentation written by Carl-Daniel Hailfinger
6 * and Andrew de Quincey.
8 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
9 * trademarks of NVIDIA Corporation in the United States and other
12 * Copyright (C) 2003,4,5 Manfred Spraul
13 * Copyright (C) 2004 Andrew de Quincey (wol support)
14 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
15 * IRQ rate fixes, bigendian fixes, cleanups, verification)
16 * Copyright (c) 2004,5,6 NVIDIA Corporation
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2 of the License, or
21 * (at your option) any later version.
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
33 * 0.01: 05 Oct 2003: First release that compiles without warnings.
34 * 0.02: 05 Oct 2003: Fix bug for nv_drain_tx: do not try to free NULL skbs.
35 * Check all PCI BARs for the register window.
36 * udelay added to mii_rw.
37 * 0.03: 06 Oct 2003: Initialize dev->irq.
38 * 0.04: 07 Oct 2003: Initialize np->lock, reduce handled irqs, add printks.
39 * 0.05: 09 Oct 2003: printk removed again, irq status print tx_timeout.
40 * 0.06: 10 Oct 2003: MAC Address read updated, pff flag generation updated,
42 * 0.07: 14 Oct 2003: Further irq mask updates.
43 * 0.08: 20 Oct 2003: rx_desc.Length initialization added, nv_alloc_rx refill
44 * added into irq handler, NULL check for drain_ring.
45 * 0.09: 20 Oct 2003: Basic link speed irq implementation. Only handle the
46 * requested interrupt sources.
47 * 0.10: 20 Oct 2003: First cleanup for release.
48 * 0.11: 21 Oct 2003: hexdump for tx added, rx buffer sizes increased.
49 * MAC Address init fix, set_multicast cleanup.
50 * 0.12: 23 Oct 2003: Cleanups for release.
51 * 0.13: 25 Oct 2003: Limit for concurrent tx packets increased to 10.
52 * Set link speed correctly. start rx before starting
53 * tx (nv_start_rx sets the link speed).
54 * 0.14: 25 Oct 2003: Nic dependant irq mask.
55 * 0.15: 08 Nov 2003: fix smp deadlock with set_multicast_list during
57 * 0.16: 15 Nov 2003: include file cleanup for ppc64, rx buffer size
58 * increased to 1628 bytes.
59 * 0.17: 16 Nov 2003: undo rx buffer size increase. Substract 1 from
61 * 0.18: 17 Nov 2003: fix oops due to late initialization of dev_stats
62 * 0.19: 29 Nov 2003: Handle RxNoBuf, detect & handle invalid mac
63 * addresses, really stop rx if already running
64 * in nv_start_rx, clean up a bit.
65 * 0.20: 07 Dec 2003: alloc fixes
66 * 0.21: 12 Jan 2004: additional alloc fix, nic polling fix.
67 * 0.22: 19 Jan 2004: reprogram timer to a sane rate, avoid lockup
69 * 0.23: 26 Jan 2004: various small cleanups
70 * 0.24: 27 Feb 2004: make driver even less anonymous in backtraces
71 * 0.25: 09 Mar 2004: wol support
72 * 0.26: 03 Jun 2004: netdriver specific annotation, sparse-related fixes
73 * 0.27: 19 Jun 2004: Gigabit support, new descriptor rings,
74 * added CK804/MCP04 device IDs, code fixes
75 * for registers, link status and other minor fixes.
76 * 0.28: 21 Jun 2004: Big cleanup, making driver mostly endian safe
77 * 0.29: 31 Aug 2004: Add backup timer for link change notification.
78 * 0.30: 25 Sep 2004: rx checksum support for nf 250 Gb. Add rx reset
79 * into nv_close, otherwise reenabling for wol can
80 * cause DMA to kfree'd memory.
81 * 0.31: 14 Nov 2004: ethtool support for getting/setting link
83 * 0.32: 16 Apr 2005: RX_ERROR4 handling added.
84 * 0.33: 16 May 2005: Support for MCP51 added.
85 * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics.
86 * 0.35: 26 Jun 2005: Support for MCP55 added.
87 * 0.36: 28 Jun 2005: Add jumbo frame support.
88 * 0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list
89 * 0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of
91 * 0.39: 18 Jul 2005: Add 64bit descriptor support.
92 * 0.40: 19 Jul 2005: Add support for mac address change.
93 * 0.41: 30 Jul 2005: Write back original MAC in nv_close instead
95 * 0.42: 06 Aug 2005: Fix lack of link speed initialization
96 * in the second (and later) nv_open call
97 * 0.43: 10 Aug 2005: Add support for tx checksum.
98 * 0.44: 20 Aug 2005: Add support for scatter gather and segmentation.
99 * 0.45: 18 Sep 2005: Remove nv_stop/start_rx from every link check
100 * 0.46: 20 Oct 2005: Add irq optimization modes.
101 * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan.
102 * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single
103 * 0.49: 10 Dec 2005: Fix tso for large buffers.
104 * 0.50: 20 Jan 2006: Add 8021pq tagging support.
105 * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings.
106 * 0.52: 20 Jan 2006: Add MSI/MSIX support.
107 * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset.
108 * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
109 * 0.55: 22 Mar 2006: Add flow control (pause frame).
110 * 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support.
111 * 0.57: 14 May 2006: Mac address set in probe/remove and order corrections.
112 * 0.58: 30 Oct 2006: Added support for sideband management unit.
113 * 0.59: 30 Oct 2006: Added support for recoverable error.
114 * 0.60: 20 Jan 2007: Code optimizations for rings, rx & tx data paths, and stats.
117 * We suspect that on some hardware no TX done interrupts are generated.
118 * This means recovery from netif_stop_queue only happens if the hw timer
119 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
120 * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
121 * If your hardware reliably generates tx done interrupts, then you can remove
122 * DEV_NEED_TIMERIRQ from the driver_data flags.
123 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
124 * superfluous timer interrupts from the nic.
126 #ifdef CONFIG_FORCEDETH_NAPI
127 #define DRIVERNAPI "-NAPI"
131 #define FORCEDETH_VERSION "0.60"
132 #define DRV_NAME "forcedeth"
134 #include <linux/module.h>
135 #include <linux/types.h>
136 #include <linux/pci.h>
137 #include <linux/interrupt.h>
138 #include <linux/netdevice.h>
139 #include <linux/etherdevice.h>
140 #include <linux/delay.h>
141 #include <linux/spinlock.h>
142 #include <linux/ethtool.h>
143 #include <linux/timer.h>
144 #include <linux/skbuff.h>
145 #include <linux/mii.h>
146 #include <linux/random.h>
147 #include <linux/init.h>
148 #include <linux/if_vlan.h>
149 #include <linux/dma-mapping.h>
153 #include <asm/uaccess.h>
154 #include <asm/system.h>
157 #define dprintk printk
159 #define dprintk(x...) do { } while (0)
162 #define TX_WORK_PER_LOOP 64
163 #define RX_WORK_PER_LOOP 64
169 #define DEV_NEED_TIMERIRQ 0x0001 /* set the timer irq flag in the irq mask */
170 #define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */
171 #define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */
172 #define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */
173 #define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */
174 #define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */
175 #define DEV_HAS_MSI 0x0040 /* device supports MSI */
176 #define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */
177 #define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */
178 #define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */
179 #define DEV_HAS_STATISTICS_V1 0x0400 /* device supports hw statistics version 1 */
180 #define DEV_HAS_STATISTICS_V2 0x0800 /* device supports hw statistics version 2 */
181 #define DEV_HAS_TEST_EXTENDED 0x1000 /* device supports extended diagnostic test */
182 #define DEV_HAS_MGMT_UNIT 0x2000 /* device supports management unit */
183 #define DEV_HAS_CORRECT_MACADDR 0x4000 /* device supports correct mac address order */
186 NvRegIrqStatus = 0x000,
187 #define NVREG_IRQSTAT_MIIEVENT 0x040
188 #define NVREG_IRQSTAT_MASK 0x81ff
189 NvRegIrqMask = 0x004,
190 #define NVREG_IRQ_RX_ERROR 0x0001
191 #define NVREG_IRQ_RX 0x0002
192 #define NVREG_IRQ_RX_NOBUF 0x0004
193 #define NVREG_IRQ_TX_ERR 0x0008
194 #define NVREG_IRQ_TX_OK 0x0010
195 #define NVREG_IRQ_TIMER 0x0020
196 #define NVREG_IRQ_LINK 0x0040
197 #define NVREG_IRQ_RX_FORCED 0x0080
198 #define NVREG_IRQ_TX_FORCED 0x0100
199 #define NVREG_IRQ_RECOVER_ERROR 0x8000
200 #define NVREG_IRQMASK_THROUGHPUT 0x00df
201 #define NVREG_IRQMASK_CPU 0x0060
202 #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
203 #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
204 #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
206 #define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \
207 NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \
208 NVREG_IRQ_TX_FORCED|NVREG_IRQ_RECOVER_ERROR))
210 NvRegUnknownSetupReg6 = 0x008,
211 #define NVREG_UNKSETUP6_VAL 3
214 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
215 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
217 NvRegPollingInterval = 0x00c,
218 #define NVREG_POLL_DEFAULT_THROUGHPUT 970 /* backup tx cleanup if loop max reached */
219 #define NVREG_POLL_DEFAULT_CPU 13
220 NvRegMSIMap0 = 0x020,
221 NvRegMSIMap1 = 0x024,
222 NvRegMSIIrqMask = 0x030,
223 #define NVREG_MSI_VECTOR_0_ENABLED 0x01
225 #define NVREG_MISC1_PAUSE_TX 0x01
226 #define NVREG_MISC1_HD 0x02
227 #define NVREG_MISC1_FORCE 0x3b0f3c
229 NvRegMacReset = 0x3c,
230 #define NVREG_MAC_RESET_ASSERT 0x0F3
231 NvRegTransmitterControl = 0x084,
232 #define NVREG_XMITCTL_START 0x01
233 #define NVREG_XMITCTL_MGMT_ST 0x40000000
234 #define NVREG_XMITCTL_SYNC_MASK 0x000f0000
235 #define NVREG_XMITCTL_SYNC_NOT_READY 0x0
236 #define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000
237 #define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00
238 #define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0
239 #define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000
240 #define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000
241 #define NVREG_XMITCTL_HOST_LOADED 0x00004000
242 #define NVREG_XMITCTL_TX_PATH_EN 0x01000000
243 NvRegTransmitterStatus = 0x088,
244 #define NVREG_XMITSTAT_BUSY 0x01
246 NvRegPacketFilterFlags = 0x8c,
247 #define NVREG_PFF_PAUSE_RX 0x08
248 #define NVREG_PFF_ALWAYS 0x7F0000
249 #define NVREG_PFF_PROMISC 0x80
250 #define NVREG_PFF_MYADDR 0x20
251 #define NVREG_PFF_LOOPBACK 0x10
253 NvRegOffloadConfig = 0x90,
254 #define NVREG_OFFLOAD_HOMEPHY 0x601
255 #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE
256 NvRegReceiverControl = 0x094,
257 #define NVREG_RCVCTL_START 0x01
258 #define NVREG_RCVCTL_RX_PATH_EN 0x01000000
259 NvRegReceiverStatus = 0x98,
260 #define NVREG_RCVSTAT_BUSY 0x01
262 NvRegRandomSeed = 0x9c,
263 #define NVREG_RNDSEED_MASK 0x00ff
264 #define NVREG_RNDSEED_FORCE 0x7f00
265 #define NVREG_RNDSEED_FORCE2 0x2d00
266 #define NVREG_RNDSEED_FORCE3 0x7400
268 NvRegTxDeferral = 0xA0,
269 #define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
270 #define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
271 #define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f
272 NvRegRxDeferral = 0xA4,
273 #define NVREG_RX_DEFERRAL_DEFAULT 0x16
274 NvRegMacAddrA = 0xA8,
275 NvRegMacAddrB = 0xAC,
276 NvRegMulticastAddrA = 0xB0,
277 #define NVREG_MCASTADDRA_FORCE 0x01
278 NvRegMulticastAddrB = 0xB4,
279 NvRegMulticastMaskA = 0xB8,
280 NvRegMulticastMaskB = 0xBC,
282 NvRegPhyInterface = 0xC0,
283 #define PHY_RGMII 0x10000000
285 NvRegTxRingPhysAddr = 0x100,
286 NvRegRxRingPhysAddr = 0x104,
287 NvRegRingSizes = 0x108,
288 #define NVREG_RINGSZ_TXSHIFT 0
289 #define NVREG_RINGSZ_RXSHIFT 16
290 NvRegTransmitPoll = 0x10c,
291 #define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000
292 NvRegLinkSpeed = 0x110,
293 #define NVREG_LINKSPEED_FORCE 0x10000
294 #define NVREG_LINKSPEED_10 1000
295 #define NVREG_LINKSPEED_100 100
296 #define NVREG_LINKSPEED_1000 50
297 #define NVREG_LINKSPEED_MASK (0xFFF)
298 NvRegUnknownSetupReg5 = 0x130,
299 #define NVREG_UNKSETUP5_BIT31 (1<<31)
300 NvRegTxWatermark = 0x13c,
301 #define NVREG_TX_WM_DESC1_DEFAULT 0x0200010
302 #define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000
303 #define NVREG_TX_WM_DESC2_3_1000 0xfe08000
304 NvRegTxRxControl = 0x144,
305 #define NVREG_TXRXCTL_KICK 0x0001
306 #define NVREG_TXRXCTL_BIT1 0x0002
307 #define NVREG_TXRXCTL_BIT2 0x0004
308 #define NVREG_TXRXCTL_IDLE 0x0008
309 #define NVREG_TXRXCTL_RESET 0x0010
310 #define NVREG_TXRXCTL_RXCHECK 0x0400
311 #define NVREG_TXRXCTL_DESC_1 0
312 #define NVREG_TXRXCTL_DESC_2 0x002100
313 #define NVREG_TXRXCTL_DESC_3 0xc02200
314 #define NVREG_TXRXCTL_VLANSTRIP 0x00040
315 #define NVREG_TXRXCTL_VLANINS 0x00080
316 NvRegTxRingPhysAddrHigh = 0x148,
317 NvRegRxRingPhysAddrHigh = 0x14C,
318 NvRegTxPauseFrame = 0x170,
319 #define NVREG_TX_PAUSEFRAME_DISABLE 0x1ff0080
320 #define NVREG_TX_PAUSEFRAME_ENABLE 0x0c00030
321 NvRegMIIStatus = 0x180,
322 #define NVREG_MIISTAT_ERROR 0x0001
323 #define NVREG_MIISTAT_LINKCHANGE 0x0008
324 #define NVREG_MIISTAT_MASK 0x000f
325 #define NVREG_MIISTAT_MASK2 0x000f
326 NvRegMIIMask = 0x184,
327 #define NVREG_MII_LINKCHANGE 0x0008
329 NvRegAdapterControl = 0x188,
330 #define NVREG_ADAPTCTL_START 0x02
331 #define NVREG_ADAPTCTL_LINKUP 0x04
332 #define NVREG_ADAPTCTL_PHYVALID 0x40000
333 #define NVREG_ADAPTCTL_RUNNING 0x100000
334 #define NVREG_ADAPTCTL_PHYSHIFT 24
335 NvRegMIISpeed = 0x18c,
336 #define NVREG_MIISPEED_BIT8 (1<<8)
337 #define NVREG_MIIDELAY 5
338 NvRegMIIControl = 0x190,
339 #define NVREG_MIICTL_INUSE 0x08000
340 #define NVREG_MIICTL_WRITE 0x00400
341 #define NVREG_MIICTL_ADDRSHIFT 5
342 NvRegMIIData = 0x194,
343 NvRegWakeUpFlags = 0x200,
344 #define NVREG_WAKEUPFLAGS_VAL 0x7770
345 #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
346 #define NVREG_WAKEUPFLAGS_ENABLESHIFT 16
347 #define NVREG_WAKEUPFLAGS_D3SHIFT 12
348 #define NVREG_WAKEUPFLAGS_D2SHIFT 8
349 #define NVREG_WAKEUPFLAGS_D1SHIFT 4
350 #define NVREG_WAKEUPFLAGS_D0SHIFT 0
351 #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01
352 #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02
353 #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04
354 #define NVREG_WAKEUPFLAGS_ENABLE 0x1111
356 NvRegPatternCRC = 0x204,
357 NvRegPatternMask = 0x208,
358 NvRegPowerCap = 0x268,
359 #define NVREG_POWERCAP_D3SUPP (1<<30)
360 #define NVREG_POWERCAP_D2SUPP (1<<26)
361 #define NVREG_POWERCAP_D1SUPP (1<<25)
362 NvRegPowerState = 0x26c,
363 #define NVREG_POWERSTATE_POWEREDUP 0x8000
364 #define NVREG_POWERSTATE_VALID 0x0100
365 #define NVREG_POWERSTATE_MASK 0x0003
366 #define NVREG_POWERSTATE_D0 0x0000
367 #define NVREG_POWERSTATE_D1 0x0001
368 #define NVREG_POWERSTATE_D2 0x0002
369 #define NVREG_POWERSTATE_D3 0x0003
371 NvRegTxZeroReXmt = 0x284,
372 NvRegTxOneReXmt = 0x288,
373 NvRegTxManyReXmt = 0x28c,
374 NvRegTxLateCol = 0x290,
375 NvRegTxUnderflow = 0x294,
376 NvRegTxLossCarrier = 0x298,
377 NvRegTxExcessDef = 0x29c,
378 NvRegTxRetryErr = 0x2a0,
379 NvRegRxFrameErr = 0x2a4,
380 NvRegRxExtraByte = 0x2a8,
381 NvRegRxLateCol = 0x2ac,
383 NvRegRxFrameTooLong = 0x2b4,
384 NvRegRxOverflow = 0x2b8,
385 NvRegRxFCSErr = 0x2bc,
386 NvRegRxFrameAlignErr = 0x2c0,
387 NvRegRxLenErr = 0x2c4,
388 NvRegRxUnicast = 0x2c8,
389 NvRegRxMulticast = 0x2cc,
390 NvRegRxBroadcast = 0x2d0,
392 NvRegTxFrame = 0x2d8,
394 NvRegTxPause = 0x2e0,
395 NvRegRxPause = 0x2e4,
396 NvRegRxDropFrame = 0x2e8,
397 NvRegVlanControl = 0x300,
398 #define NVREG_VLANCONTROL_ENABLE 0x2000
399 NvRegMSIXMap0 = 0x3e0,
400 NvRegMSIXMap1 = 0x3e4,
401 NvRegMSIXIrqStatus = 0x3f0,
403 NvRegPowerState2 = 0x600,
404 #define NVREG_POWERSTATE2_POWERUP_MASK 0x0F11
405 #define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
408 /* Big endian: should work, but is untested */
414 struct ring_desc_ex {
422 struct ring_desc* orig;
423 struct ring_desc_ex* ex;
426 #define FLAG_MASK_V1 0xffff0000
427 #define FLAG_MASK_V2 0xffffc000
428 #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
429 #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
431 #define NV_TX_LASTPACKET (1<<16)
432 #define NV_TX_RETRYERROR (1<<19)
433 #define NV_TX_FORCED_INTERRUPT (1<<24)
434 #define NV_TX_DEFERRED (1<<26)
435 #define NV_TX_CARRIERLOST (1<<27)
436 #define NV_TX_LATECOLLISION (1<<28)
437 #define NV_TX_UNDERFLOW (1<<29)
438 #define NV_TX_ERROR (1<<30)
439 #define NV_TX_VALID (1<<31)
441 #define NV_TX2_LASTPACKET (1<<29)
442 #define NV_TX2_RETRYERROR (1<<18)
443 #define NV_TX2_FORCED_INTERRUPT (1<<30)
444 #define NV_TX2_DEFERRED (1<<25)
445 #define NV_TX2_CARRIERLOST (1<<26)
446 #define NV_TX2_LATECOLLISION (1<<27)
447 #define NV_TX2_UNDERFLOW (1<<28)
448 /* error and valid are the same for both */
449 #define NV_TX2_ERROR (1<<30)
450 #define NV_TX2_VALID (1<<31)
451 #define NV_TX2_TSO (1<<28)
452 #define NV_TX2_TSO_SHIFT 14
453 #define NV_TX2_TSO_MAX_SHIFT 14
454 #define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT)
455 #define NV_TX2_CHECKSUM_L3 (1<<27)
456 #define NV_TX2_CHECKSUM_L4 (1<<26)
458 #define NV_TX3_VLAN_TAG_PRESENT (1<<18)
460 #define NV_RX_DESCRIPTORVALID (1<<16)
461 #define NV_RX_MISSEDFRAME (1<<17)
462 #define NV_RX_SUBSTRACT1 (1<<18)
463 #define NV_RX_ERROR1 (1<<23)
464 #define NV_RX_ERROR2 (1<<24)
465 #define NV_RX_ERROR3 (1<<25)
466 #define NV_RX_ERROR4 (1<<26)
467 #define NV_RX_CRCERR (1<<27)
468 #define NV_RX_OVERFLOW (1<<28)
469 #define NV_RX_FRAMINGERR (1<<29)
470 #define NV_RX_ERROR (1<<30)
471 #define NV_RX_AVAIL (1<<31)
473 #define NV_RX2_CHECKSUMMASK (0x1C000000)
474 #define NV_RX2_CHECKSUMOK1 (0x10000000)
475 #define NV_RX2_CHECKSUMOK2 (0x14000000)
476 #define NV_RX2_CHECKSUMOK3 (0x18000000)
477 #define NV_RX2_DESCRIPTORVALID (1<<29)
478 #define NV_RX2_SUBSTRACT1 (1<<25)
479 #define NV_RX2_ERROR1 (1<<18)
480 #define NV_RX2_ERROR2 (1<<19)
481 #define NV_RX2_ERROR3 (1<<20)
482 #define NV_RX2_ERROR4 (1<<21)
483 #define NV_RX2_CRCERR (1<<22)
484 #define NV_RX2_OVERFLOW (1<<23)
485 #define NV_RX2_FRAMINGERR (1<<24)
486 /* error and avail are the same for both */
487 #define NV_RX2_ERROR (1<<30)
488 #define NV_RX2_AVAIL (1<<31)
490 #define NV_RX3_VLAN_TAG_PRESENT (1<<16)
491 #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
493 /* Miscelaneous hardware related defines: */
494 #define NV_PCI_REGSZ_VER1 0x270
495 #define NV_PCI_REGSZ_VER2 0x2d4
496 #define NV_PCI_REGSZ_VER3 0x604
498 /* various timeout delays: all in usec */
499 #define NV_TXRX_RESET_DELAY 4
500 #define NV_TXSTOP_DELAY1 10
501 #define NV_TXSTOP_DELAY1MAX 500000
502 #define NV_TXSTOP_DELAY2 100
503 #define NV_RXSTOP_DELAY1 10
504 #define NV_RXSTOP_DELAY1MAX 500000
505 #define NV_RXSTOP_DELAY2 100
506 #define NV_SETUP5_DELAY 5
507 #define NV_SETUP5_DELAYMAX 50000
508 #define NV_POWERUP_DELAY 5
509 #define NV_POWERUP_DELAYMAX 5000
510 #define NV_MIIBUSY_DELAY 50
511 #define NV_MIIPHY_DELAY 10
512 #define NV_MIIPHY_DELAYMAX 10000
513 #define NV_MAC_RESET_DELAY 64
515 #define NV_WAKEUPPATTERNS 5
516 #define NV_WAKEUPMASKENTRIES 4
518 /* General driver defaults */
519 #define NV_WATCHDOG_TIMEO (5*HZ)
521 #define RX_RING_DEFAULT 128
522 #define TX_RING_DEFAULT 256
523 #define RX_RING_MIN 128
524 #define TX_RING_MIN 64
525 #define RING_MAX_DESC_VER_1 1024
526 #define RING_MAX_DESC_VER_2_3 16384
528 /* rx/tx mac addr + type + vlan + align + slack*/
529 #define NV_RX_HEADERS (64)
530 /* even more slack. */
531 #define NV_RX_ALLOC_PAD (64)
533 /* maximum mtu size */
534 #define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */
535 #define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */
537 #define OOM_REFILL (1+HZ/20)
538 #define POLL_WAIT (1+HZ/100)
539 #define LINK_TIMEOUT (3*HZ)
540 #define STATS_INTERVAL (10*HZ)
544 * The nic supports three different descriptor types:
545 * - DESC_VER_1: Original
546 * - DESC_VER_2: support for jumbo frames.
547 * - DESC_VER_3: 64-bit format.
554 #define PHY_OUI_MARVELL 0x5043
555 #define PHY_OUI_CICADA 0x03f1
556 #define PHY_OUI_VITESSE 0x01c1
557 #define PHY_OUI_REALTEK 0x0732
558 #define PHYID1_OUI_MASK 0x03ff
559 #define PHYID1_OUI_SHFT 6
560 #define PHYID2_OUI_MASK 0xfc00
561 #define PHYID2_OUI_SHFT 10
562 #define PHYID2_MODEL_MASK 0x03f0
563 #define PHY_MODEL_MARVELL_E3016 0x220
564 #define PHY_MARVELL_E3016_INITMASK 0x0300
565 #define PHY_CICADA_INIT1 0x0f000
566 #define PHY_CICADA_INIT2 0x0e00
567 #define PHY_CICADA_INIT3 0x01000
568 #define PHY_CICADA_INIT4 0x0200
569 #define PHY_CICADA_INIT5 0x0004
570 #define PHY_CICADA_INIT6 0x02000
571 #define PHY_VITESSE_INIT_REG1 0x1f
572 #define PHY_VITESSE_INIT_REG2 0x10
573 #define PHY_VITESSE_INIT_REG3 0x11
574 #define PHY_VITESSE_INIT_REG4 0x12
575 #define PHY_VITESSE_INIT_MSK1 0xc
576 #define PHY_VITESSE_INIT_MSK2 0x0180
577 #define PHY_VITESSE_INIT1 0x52b5
578 #define PHY_VITESSE_INIT2 0xaf8a
579 #define PHY_VITESSE_INIT3 0x8
580 #define PHY_VITESSE_INIT4 0x8f8a
581 #define PHY_VITESSE_INIT5 0xaf86
582 #define PHY_VITESSE_INIT6 0x8f86
583 #define PHY_VITESSE_INIT7 0xaf82
584 #define PHY_VITESSE_INIT8 0x0100
585 #define PHY_VITESSE_INIT9 0x8f82
586 #define PHY_VITESSE_INIT10 0x0
587 #define PHY_REALTEK_INIT_REG1 0x1f
588 #define PHY_REALTEK_INIT_REG2 0x19
589 #define PHY_REALTEK_INIT_REG3 0x13
590 #define PHY_REALTEK_INIT1 0x0000
591 #define PHY_REALTEK_INIT2 0x8e00
592 #define PHY_REALTEK_INIT3 0x0001
593 #define PHY_REALTEK_INIT4 0xad17
595 #define PHY_GIGABIT 0x0100
597 #define PHY_TIMEOUT 0x1
598 #define PHY_ERROR 0x2
602 #define PHY_HALF 0x100
604 #define NV_PAUSEFRAME_RX_CAPABLE 0x0001
605 #define NV_PAUSEFRAME_TX_CAPABLE 0x0002
606 #define NV_PAUSEFRAME_RX_ENABLE 0x0004
607 #define NV_PAUSEFRAME_TX_ENABLE 0x0008
608 #define NV_PAUSEFRAME_RX_REQ 0x0010
609 #define NV_PAUSEFRAME_TX_REQ 0x0020
610 #define NV_PAUSEFRAME_AUTONEG 0x0040
612 /* MSI/MSI-X defines */
613 #define NV_MSI_X_MAX_VECTORS 8
614 #define NV_MSI_X_VECTORS_MASK 0x000f
615 #define NV_MSI_CAPABLE 0x0010
616 #define NV_MSI_X_CAPABLE 0x0020
617 #define NV_MSI_ENABLED 0x0040
618 #define NV_MSI_X_ENABLED 0x0080
620 #define NV_MSI_X_VECTOR_ALL 0x0
621 #define NV_MSI_X_VECTOR_RX 0x0
622 #define NV_MSI_X_VECTOR_TX 0x1
623 #define NV_MSI_X_VECTOR_OTHER 0x2
626 struct nv_ethtool_str {
627 char name[ETH_GSTRING_LEN];
630 static const struct nv_ethtool_str nv_estats_str[] = {
635 { "tx_late_collision" },
636 { "tx_fifo_errors" },
637 { "tx_carrier_errors" },
638 { "tx_excess_deferral" },
639 { "tx_retry_error" },
640 { "rx_frame_error" },
642 { "rx_late_collision" },
644 { "rx_frame_too_long" },
645 { "rx_over_errors" },
647 { "rx_frame_align_error" },
648 { "rx_length_error" },
653 { "rx_errors_total" },
654 { "tx_errors_total" },
656 /* version 2 stats */
665 struct nv_ethtool_stats {
670 u64 tx_late_collision;
672 u64 tx_carrier_errors;
673 u64 tx_excess_deferral;
677 u64 rx_late_collision;
679 u64 rx_frame_too_long;
682 u64 rx_frame_align_error;
691 /* version 2 stats */
700 #define NV_DEV_STATISTICS_V2_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
701 #define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
704 #define NV_TEST_COUNT_BASE 3
705 #define NV_TEST_COUNT_EXTENDED 4
707 static const struct nv_ethtool_str nv_etests_str[] = {
708 { "link (online/offline)" },
709 { "register (offline) " },
710 { "interrupt (offline) " },
711 { "loopback (offline) " }
714 struct register_test {
719 static const struct register_test nv_registers_test[] = {
720 { NvRegUnknownSetupReg6, 0x01 },
721 { NvRegMisc1, 0x03c },
722 { NvRegOffloadConfig, 0x03ff },
723 { NvRegMulticastAddrA, 0xffffffff },
724 { NvRegTxWatermark, 0x0ff },
725 { NvRegWakeUpFlags, 0x07777 },
732 unsigned int dma_len;
737 * All hardware access under dev->priv->lock, except the performance
739 * - rx is (pseudo-) lockless: it relies on the single-threading provided
740 * by the arch code for interrupts.
741 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
742 * needs dev->priv->lock :-(
743 * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
746 /* in dev: base, irq */
750 struct net_device *dev;
751 struct napi_struct napi;
754 * Locking: spin_lock(&np->lock); */
755 struct net_device_stats stats;
756 struct nv_ethtool_stats estats;
764 unsigned int phy_oui;
765 unsigned int phy_model;
770 /* General data: RO fields */
771 dma_addr_t ring_addr;
772 struct pci_dev *pci_dev;
785 /* rx specific fields.
786 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
788 union ring_type get_rx, put_rx, first_rx, last_rx;
789 struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
790 struct nv_skb_map *first_rx_ctx, *last_rx_ctx;
791 struct nv_skb_map *rx_skb;
793 union ring_type rx_ring;
794 unsigned int rx_buf_sz;
795 unsigned int pkt_limit;
796 struct timer_list oom_kick;
797 struct timer_list nic_poll;
798 struct timer_list stats_poll;
802 /* media detection workaround.
803 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
806 unsigned long link_timeout;
808 * tx specific fields.
810 union ring_type get_tx, put_tx, first_tx, last_tx;
811 struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
812 struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
813 struct nv_skb_map *tx_skb;
815 union ring_type tx_ring;
821 struct vlan_group *vlangrp;
823 /* msi/msi-x fields */
825 struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
832 * Maximum number of loops until we assume that a bit in the irq mask
833 * is stuck. Overridable with module param.
835 static int max_interrupt_work = 5;
838 * Optimization can be either throuput mode or cpu mode
840 * Throughput Mode: Every tx and rx packet will generate an interrupt.
841 * CPU Mode: Interrupts are controlled by a timer.
844 NV_OPTIMIZATION_MODE_THROUGHPUT,
845 NV_OPTIMIZATION_MODE_CPU
847 static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
850 * Poll interval for timer irq
852 * This interval determines how frequent an interrupt is generated.
853 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
854 * Min = 0, and Max = 65535
856 static int poll_interval = -1;
865 static int msi = NV_MSI_INT_ENABLED;
871 NV_MSIX_INT_DISABLED,
874 static int msix = NV_MSIX_INT_DISABLED;
880 NV_DMA_64BIT_DISABLED,
883 static int dma_64bit = NV_DMA_64BIT_ENABLED;
885 static inline struct fe_priv *get_nvpriv(struct net_device *dev)
887 return netdev_priv(dev);
890 static inline u8 __iomem *get_hwbase(struct net_device *dev)
892 return ((struct fe_priv *)netdev_priv(dev))->base;
895 static inline void pci_push(u8 __iomem *base)
897 /* force out pending posted writes */
901 static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
903 return le32_to_cpu(prd->flaglen)
904 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
907 static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
909 return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
912 static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
913 int delay, int delaymax, const char *msg)
915 u8 __iomem *base = get_hwbase(dev);
926 } while ((readl(base + offset) & mask) != target);
930 #define NV_SETUP_RX_RING 0x01
931 #define NV_SETUP_TX_RING 0x02
933 static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
935 struct fe_priv *np = get_nvpriv(dev);
936 u8 __iomem *base = get_hwbase(dev);
938 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
939 if (rxtx_flags & NV_SETUP_RX_RING) {
940 writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr);
942 if (rxtx_flags & NV_SETUP_TX_RING) {
943 writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
946 if (rxtx_flags & NV_SETUP_RX_RING) {
947 writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr);
948 writel((u32) (cpu_to_le64(np->ring_addr) >> 32), base + NvRegRxRingPhysAddrHigh);
950 if (rxtx_flags & NV_SETUP_TX_RING) {
951 writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
952 writel((u32) (cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh);
957 static void free_rings(struct net_device *dev)
959 struct fe_priv *np = get_nvpriv(dev);
961 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
962 if (np->rx_ring.orig)
963 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
964 np->rx_ring.orig, np->ring_addr);
967 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
968 np->rx_ring.ex, np->ring_addr);
976 static int using_multi_irqs(struct net_device *dev)
978 struct fe_priv *np = get_nvpriv(dev);
980 if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
981 ((np->msi_flags & NV_MSI_X_ENABLED) &&
982 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
988 static void nv_enable_irq(struct net_device *dev)
990 struct fe_priv *np = get_nvpriv(dev);
992 if (!using_multi_irqs(dev)) {
993 if (np->msi_flags & NV_MSI_X_ENABLED)
994 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
996 enable_irq(dev->irq);
998 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
999 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1000 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1004 static void nv_disable_irq(struct net_device *dev)
1006 struct fe_priv *np = get_nvpriv(dev);
1008 if (!using_multi_irqs(dev)) {
1009 if (np->msi_flags & NV_MSI_X_ENABLED)
1010 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1012 disable_irq(dev->irq);
1014 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1015 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1016 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1020 /* In MSIX mode, a write to irqmask behaves as XOR */
1021 static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
1023 u8 __iomem *base = get_hwbase(dev);
1025 writel(mask, base + NvRegIrqMask);
1028 static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
1030 struct fe_priv *np = get_nvpriv(dev);
1031 u8 __iomem *base = get_hwbase(dev);
1033 if (np->msi_flags & NV_MSI_X_ENABLED) {
1034 writel(mask, base + NvRegIrqMask);
1036 if (np->msi_flags & NV_MSI_ENABLED)
1037 writel(0, base + NvRegMSIIrqMask);
1038 writel(0, base + NvRegIrqMask);
1042 #define MII_READ (-1)
1043 /* mii_rw: read/write a register on the PHY.
1045 * Caller must guarantee serialization
1047 static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
1049 u8 __iomem *base = get_hwbase(dev);
1053 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
1055 reg = readl(base + NvRegMIIControl);
1056 if (reg & NVREG_MIICTL_INUSE) {
1057 writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
1058 udelay(NV_MIIBUSY_DELAY);
1061 reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
1062 if (value != MII_READ) {
1063 writel(value, base + NvRegMIIData);
1064 reg |= NVREG_MIICTL_WRITE;
1066 writel(reg, base + NvRegMIIControl);
1068 if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
1069 NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) {
1070 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n",
1071 dev->name, miireg, addr);
1073 } else if (value != MII_READ) {
1074 /* it was a write operation - fewer failures are detectable */
1075 dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
1076 dev->name, value, miireg, addr);
1078 } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
1079 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n",
1080 dev->name, miireg, addr);
1083 retval = readl(base + NvRegMIIData);
1084 dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
1085 dev->name, miireg, addr, retval);
1091 static int phy_reset(struct net_device *dev, u32 bmcr_setup)
1093 struct fe_priv *np = netdev_priv(dev);
1095 unsigned int tries = 0;
1097 miicontrol = BMCR_RESET | bmcr_setup;
1098 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) {
1102 /* wait for 500ms */
1105 /* must wait till reset is deasserted */
1106 while (miicontrol & BMCR_RESET) {
1108 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1109 /* FIXME: 100 tries seem excessive */
1116 static int phy_init(struct net_device *dev)
1118 struct fe_priv *np = get_nvpriv(dev);
1119 u8 __iomem *base = get_hwbase(dev);
1120 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg;
1122 /* phy errata for E3016 phy */
1123 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
1124 reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1125 reg &= ~PHY_MARVELL_E3016_INITMASK;
1126 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
1127 printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev));
1131 if (np->phy_oui == PHY_OUI_REALTEK) {
1132 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1133 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1136 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
1137 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1140 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
1141 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1144 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
1145 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1148 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1149 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1154 /* set advertise register */
1155 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1156 reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP);
1157 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
1158 printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev));
1162 /* get phy interface type */
1163 phyinterface = readl(base + NvRegPhyInterface);
1165 /* see if gigabit phy */
1166 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1167 if (mii_status & PHY_GIGABIT) {
1168 np->gigabit = PHY_GIGABIT;
1169 mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
1170 mii_control_1000 &= ~ADVERTISE_1000HALF;
1171 if (phyinterface & PHY_RGMII)
1172 mii_control_1000 |= ADVERTISE_1000FULL;
1174 mii_control_1000 &= ~ADVERTISE_1000FULL;
1176 if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
1177 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1184 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1185 mii_control |= BMCR_ANENABLE;
1188 * (certain phys need bmcr to be setup with reset)
1190 if (phy_reset(dev, mii_control)) {
1191 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
1195 /* phy vendor specific configuration */
1196 if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) {
1197 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
1198 phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
1199 phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
1200 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) {
1201 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1204 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1205 phy_reserved |= PHY_CICADA_INIT5;
1206 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) {
1207 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1211 if (np->phy_oui == PHY_OUI_CICADA) {
1212 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
1213 phy_reserved |= PHY_CICADA_INIT6;
1214 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) {
1215 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1219 if (np->phy_oui == PHY_OUI_VITESSE) {
1220 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) {
1221 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1224 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) {
1225 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1228 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1229 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1230 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1233 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1234 phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1235 phy_reserved |= PHY_VITESSE_INIT3;
1236 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1237 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1240 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) {
1241 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1244 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) {
1245 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1248 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1249 phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1250 phy_reserved |= PHY_VITESSE_INIT3;
1251 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1252 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1255 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1256 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1257 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1260 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) {
1261 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1264 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) {
1265 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1268 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1269 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1270 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1273 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1274 phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
1275 phy_reserved |= PHY_VITESSE_INIT8;
1276 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1277 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1280 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) {
1281 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1284 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) {
1285 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1289 if (np->phy_oui == PHY_OUI_REALTEK) {
1290 /* reset could have cleared these out, set them back */
1291 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1292 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1295 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
1296 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1299 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
1300 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1303 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
1304 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1307 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1308 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1313 /* some phys clear out pause advertisment on reset, set it back */
1314 mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
1316 /* restart auto negotiation */
1317 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1318 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
1319 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1326 static void nv_start_rx(struct net_device *dev)
1328 struct fe_priv *np = netdev_priv(dev);
1329 u8 __iomem *base = get_hwbase(dev);
1330 u32 rx_ctrl = readl(base + NvRegReceiverControl);
1332 dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
1333 /* Already running? Stop it. */
1334 if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
1335 rx_ctrl &= ~NVREG_RCVCTL_START;
1336 writel(rx_ctrl, base + NvRegReceiverControl);
1339 writel(np->linkspeed, base + NvRegLinkSpeed);
1341 rx_ctrl |= NVREG_RCVCTL_START;
1343 rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
1344 writel(rx_ctrl, base + NvRegReceiverControl);
1345 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
1346 dev->name, np->duplex, np->linkspeed);
1350 static void nv_stop_rx(struct net_device *dev)
1352 struct fe_priv *np = netdev_priv(dev);
1353 u8 __iomem *base = get_hwbase(dev);
1354 u32 rx_ctrl = readl(base + NvRegReceiverControl);
1356 dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
1357 if (!np->mac_in_use)
1358 rx_ctrl &= ~NVREG_RCVCTL_START;
1360 rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
1361 writel(rx_ctrl, base + NvRegReceiverControl);
1362 reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
1363 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX,
1364 KERN_INFO "nv_stop_rx: ReceiverStatus remained busy");
1366 udelay(NV_RXSTOP_DELAY2);
1367 if (!np->mac_in_use)
1368 writel(0, base + NvRegLinkSpeed);
1371 static void nv_start_tx(struct net_device *dev)
1373 struct fe_priv *np = netdev_priv(dev);
1374 u8 __iomem *base = get_hwbase(dev);
1375 u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1377 dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name);
1378 tx_ctrl |= NVREG_XMITCTL_START;
1380 tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
1381 writel(tx_ctrl, base + NvRegTransmitterControl);
1385 static void nv_stop_tx(struct net_device *dev)
1387 struct fe_priv *np = netdev_priv(dev);
1388 u8 __iomem *base = get_hwbase(dev);
1389 u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1391 dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
1392 if (!np->mac_in_use)
1393 tx_ctrl &= ~NVREG_XMITCTL_START;
1395 tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
1396 writel(tx_ctrl, base + NvRegTransmitterControl);
1397 reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
1398 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX,
1399 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");
1401 udelay(NV_TXSTOP_DELAY2);
1402 if (!np->mac_in_use)
1403 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV,
1404 base + NvRegTransmitPoll);
1407 static void nv_txrx_reset(struct net_device *dev)
1409 struct fe_priv *np = netdev_priv(dev);
1410 u8 __iomem *base = get_hwbase(dev);
1412 dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
1413 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1415 udelay(NV_TXRX_RESET_DELAY);
1416 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1420 static void nv_mac_reset(struct net_device *dev)
1422 struct fe_priv *np = netdev_priv(dev);
1423 u8 __iomem *base = get_hwbase(dev);
1425 dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name);
1426 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1428 writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
1430 udelay(NV_MAC_RESET_DELAY);
1431 writel(0, base + NvRegMacReset);
1433 udelay(NV_MAC_RESET_DELAY);
1434 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1438 static void nv_get_hw_stats(struct net_device *dev)
1440 struct fe_priv *np = netdev_priv(dev);
1441 u8 __iomem *base = get_hwbase(dev);
1443 np->estats.tx_bytes += readl(base + NvRegTxCnt);
1444 np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
1445 np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
1446 np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
1447 np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
1448 np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
1449 np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
1450 np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
1451 np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
1452 np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
1453 np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
1454 np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
1455 np->estats.rx_runt += readl(base + NvRegRxRunt);
1456 np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
1457 np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
1458 np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
1459 np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
1460 np->estats.rx_length_error += readl(base + NvRegRxLenErr);
1461 np->estats.rx_unicast += readl(base + NvRegRxUnicast);
1462 np->estats.rx_multicast += readl(base + NvRegRxMulticast);
1463 np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
1464 np->estats.rx_packets =
1465 np->estats.rx_unicast +
1466 np->estats.rx_multicast +
1467 np->estats.rx_broadcast;
1468 np->estats.rx_errors_total =
1469 np->estats.rx_crc_errors +
1470 np->estats.rx_over_errors +
1471 np->estats.rx_frame_error +
1472 (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
1473 np->estats.rx_late_collision +
1474 np->estats.rx_runt +
1475 np->estats.rx_frame_too_long;
1476 np->estats.tx_errors_total =
1477 np->estats.tx_late_collision +
1478 np->estats.tx_fifo_errors +
1479 np->estats.tx_carrier_errors +
1480 np->estats.tx_excess_deferral +
1481 np->estats.tx_retry_error;
1483 if (np->driver_data & DEV_HAS_STATISTICS_V2) {
1484 np->estats.tx_deferral += readl(base + NvRegTxDef);
1485 np->estats.tx_packets += readl(base + NvRegTxFrame);
1486 np->estats.rx_bytes += readl(base + NvRegRxCnt);
1487 np->estats.tx_pause += readl(base + NvRegTxPause);
1488 np->estats.rx_pause += readl(base + NvRegRxPause);
1489 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
1494 * nv_get_stats: dev->get_stats function
1495 * Get latest stats value from the nic.
1496 * Called with read_lock(&dev_base_lock) held for read -
1497 * only synchronized against unregister_netdevice.
1499 static struct net_device_stats *nv_get_stats(struct net_device *dev)
1501 struct fe_priv *np = netdev_priv(dev);
1503 /* If the nic supports hw counters then retrieve latest values */
1504 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) {
1505 nv_get_hw_stats(dev);
1507 /* copy to net_device stats */
1508 np->stats.tx_bytes = np->estats.tx_bytes;
1509 np->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
1510 np->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
1511 np->stats.rx_crc_errors = np->estats.rx_crc_errors;
1512 np->stats.rx_over_errors = np->estats.rx_over_errors;
1513 np->stats.rx_errors = np->estats.rx_errors_total;
1514 np->stats.tx_errors = np->estats.tx_errors_total;
1520 * nv_alloc_rx: fill rx ring entries.
1521 * Return 1 if the allocations for the skbs failed and the
1522 * rx engine is without Available descriptors
1524 static int nv_alloc_rx(struct net_device *dev)
1526 struct fe_priv *np = netdev_priv(dev);
1527 struct ring_desc* less_rx;
1529 less_rx = np->get_rx.orig;
1530 if (less_rx-- == np->first_rx.orig)
1531 less_rx = np->last_rx.orig;
1533 while (np->put_rx.orig != less_rx) {
1534 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1536 np->put_rx_ctx->skb = skb;
1537 np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
1540 PCI_DMA_FROMDEVICE);
1541 np->put_rx_ctx->dma_len = skb_tailroom(skb);
1542 np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
1544 np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
1545 if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
1546 np->put_rx.orig = np->first_rx.orig;
1547 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1548 np->put_rx_ctx = np->first_rx_ctx;
1556 static int nv_alloc_rx_optimized(struct net_device *dev)
1558 struct fe_priv *np = netdev_priv(dev);
1559 struct ring_desc_ex* less_rx;
1561 less_rx = np->get_rx.ex;
1562 if (less_rx-- == np->first_rx.ex)
1563 less_rx = np->last_rx.ex;
1565 while (np->put_rx.ex != less_rx) {
1566 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1568 np->put_rx_ctx->skb = skb;
1569 np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
1572 PCI_DMA_FROMDEVICE);
1573 np->put_rx_ctx->dma_len = skb_tailroom(skb);
1574 np->put_rx.ex->bufhigh = cpu_to_le64(np->put_rx_ctx->dma) >> 32;
1575 np->put_rx.ex->buflow = cpu_to_le64(np->put_rx_ctx->dma) & 0x0FFFFFFFF;
1577 np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1578 if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
1579 np->put_rx.ex = np->first_rx.ex;
1580 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1581 np->put_rx_ctx = np->first_rx_ctx;
1589 /* If rx bufs are exhausted called after 50ms to attempt to refresh */
1590 #ifdef CONFIG_FORCEDETH_NAPI
1591 static void nv_do_rx_refill(unsigned long data)
1593 struct net_device *dev = (struct net_device *) data;
1594 struct fe_priv *np = netdev_priv(dev);
1596 /* Just reschedule NAPI rx processing */
1597 netif_rx_schedule(dev, &np->napi);
1600 static void nv_do_rx_refill(unsigned long data)
1602 struct net_device *dev = (struct net_device *) data;
1603 struct fe_priv *np = netdev_priv(dev);
1606 if (!using_multi_irqs(dev)) {
1607 if (np->msi_flags & NV_MSI_X_ENABLED)
1608 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1610 disable_irq(dev->irq);
1612 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1614 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1615 retcode = nv_alloc_rx(dev);
1617 retcode = nv_alloc_rx_optimized(dev);
1619 spin_lock_irq(&np->lock);
1620 if (!np->in_shutdown)
1621 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
1622 spin_unlock_irq(&np->lock);
1624 if (!using_multi_irqs(dev)) {
1625 if (np->msi_flags & NV_MSI_X_ENABLED)
1626 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1628 enable_irq(dev->irq);
1630 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1635 static void nv_init_rx(struct net_device *dev)
1637 struct fe_priv *np = netdev_priv(dev);
1639 np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
1640 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1641 np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
1643 np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
1644 np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb;
1645 np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
1647 for (i = 0; i < np->rx_ring_size; i++) {
1648 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1649 np->rx_ring.orig[i].flaglen = 0;
1650 np->rx_ring.orig[i].buf = 0;
1652 np->rx_ring.ex[i].flaglen = 0;
1653 np->rx_ring.ex[i].txvlan = 0;
1654 np->rx_ring.ex[i].bufhigh = 0;
1655 np->rx_ring.ex[i].buflow = 0;
1657 np->rx_skb[i].skb = NULL;
1658 np->rx_skb[i].dma = 0;
1662 static void nv_init_tx(struct net_device *dev)
1664 struct fe_priv *np = netdev_priv(dev);
1666 np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
1667 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1668 np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
1670 np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
1671 np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
1672 np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
1674 for (i = 0; i < np->tx_ring_size; i++) {
1675 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1676 np->tx_ring.orig[i].flaglen = 0;
1677 np->tx_ring.orig[i].buf = 0;
1679 np->tx_ring.ex[i].flaglen = 0;
1680 np->tx_ring.ex[i].txvlan = 0;
1681 np->tx_ring.ex[i].bufhigh = 0;
1682 np->tx_ring.ex[i].buflow = 0;
1684 np->tx_skb[i].skb = NULL;
1685 np->tx_skb[i].dma = 0;
1689 static int nv_init_ring(struct net_device *dev)
1691 struct fe_priv *np = netdev_priv(dev);
1695 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1696 return nv_alloc_rx(dev);
1698 return nv_alloc_rx_optimized(dev);
1701 static int nv_release_txskb(struct net_device *dev, struct nv_skb_map* tx_skb)
1703 struct fe_priv *np = netdev_priv(dev);
1706 pci_unmap_page(np->pci_dev, tx_skb->dma,
1712 dev_kfree_skb_any(tx_skb->skb);
1720 static void nv_drain_tx(struct net_device *dev)
1722 struct fe_priv *np = netdev_priv(dev);
1725 for (i = 0; i < np->tx_ring_size; i++) {
1726 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1727 np->tx_ring.orig[i].flaglen = 0;
1728 np->tx_ring.orig[i].buf = 0;
1730 np->tx_ring.ex[i].flaglen = 0;
1731 np->tx_ring.ex[i].txvlan = 0;
1732 np->tx_ring.ex[i].bufhigh = 0;
1733 np->tx_ring.ex[i].buflow = 0;
1735 if (nv_release_txskb(dev, &np->tx_skb[i]))
1736 np->stats.tx_dropped++;
1740 static void nv_drain_rx(struct net_device *dev)
1742 struct fe_priv *np = netdev_priv(dev);
1745 for (i = 0; i < np->rx_ring_size; i++) {
1746 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1747 np->rx_ring.orig[i].flaglen = 0;
1748 np->rx_ring.orig[i].buf = 0;
1750 np->rx_ring.ex[i].flaglen = 0;
1751 np->rx_ring.ex[i].txvlan = 0;
1752 np->rx_ring.ex[i].bufhigh = 0;
1753 np->rx_ring.ex[i].buflow = 0;
1756 if (np->rx_skb[i].skb) {
1757 pci_unmap_single(np->pci_dev, np->rx_skb[i].dma,
1758 (skb_end_pointer(np->rx_skb[i].skb) -
1759 np->rx_skb[i].skb->data),
1760 PCI_DMA_FROMDEVICE);
1761 dev_kfree_skb(np->rx_skb[i].skb);
1762 np->rx_skb[i].skb = NULL;
1767 static void drain_ring(struct net_device *dev)
1773 static inline u32 nv_get_empty_tx_slots(struct fe_priv *np)
1775 return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
1779 * nv_start_xmit: dev->hard_start_xmit function
1780 * Called with netif_tx_lock held.
1782 static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1784 struct fe_priv *np = netdev_priv(dev);
1786 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
1787 unsigned int fragments = skb_shinfo(skb)->nr_frags;
1791 u32 size = skb->len-skb->data_len;
1792 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1794 struct ring_desc* put_tx;
1795 struct ring_desc* start_tx;
1796 struct ring_desc* prev_tx;
1797 struct nv_skb_map* prev_tx_ctx;
1799 /* add fragments to entries count */
1800 for (i = 0; i < fragments; i++) {
1801 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
1802 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1805 empty_slots = nv_get_empty_tx_slots(np);
1806 if (unlikely(empty_slots <= entries)) {
1807 spin_lock_irq(&np->lock);
1808 netif_stop_queue(dev);
1810 spin_unlock_irq(&np->lock);
1811 return NETDEV_TX_BUSY;
1814 start_tx = put_tx = np->put_tx.orig;
1816 /* setup the header buffer */
1819 prev_tx_ctx = np->put_tx_ctx;
1820 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1821 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
1823 np->put_tx_ctx->dma_len = bcnt;
1824 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
1825 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1827 tx_flags = np->tx_flags;
1830 if (unlikely(put_tx++ == np->last_tx.orig))
1831 put_tx = np->first_tx.orig;
1832 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
1833 np->put_tx_ctx = np->first_tx_ctx;
1836 /* setup the fragments */
1837 for (i = 0; i < fragments; i++) {
1838 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1839 u32 size = frag->size;
1844 prev_tx_ctx = np->put_tx_ctx;
1845 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1846 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
1848 np->put_tx_ctx->dma_len = bcnt;
1849 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
1850 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1854 if (unlikely(put_tx++ == np->last_tx.orig))
1855 put_tx = np->first_tx.orig;
1856 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
1857 np->put_tx_ctx = np->first_tx_ctx;
1861 /* set last fragment flag */
1862 prev_tx->flaglen |= cpu_to_le32(tx_flags_extra);
1864 /* save skb in this slot's context area */
1865 prev_tx_ctx->skb = skb;
1867 if (skb_is_gso(skb))
1868 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
1870 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
1871 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
1873 spin_lock_irq(&np->lock);
1876 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
1877 np->put_tx.orig = put_tx;
1879 spin_unlock_irq(&np->lock);
1881 dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n",
1882 dev->name, entries, tx_flags_extra);
1885 for (j=0; j<64; j++) {
1887 dprintk("\n%03x:", j);
1888 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
1893 dev->trans_start = jiffies;
1894 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
1895 return NETDEV_TX_OK;
1898 static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
1900 struct fe_priv *np = netdev_priv(dev);
1903 unsigned int fragments = skb_shinfo(skb)->nr_frags;
1907 u32 size = skb->len-skb->data_len;
1908 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1910 struct ring_desc_ex* put_tx;
1911 struct ring_desc_ex* start_tx;
1912 struct ring_desc_ex* prev_tx;
1913 struct nv_skb_map* prev_tx_ctx;
1915 /* add fragments to entries count */
1916 for (i = 0; i < fragments; i++) {
1917 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
1918 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1921 empty_slots = nv_get_empty_tx_slots(np);
1922 if (unlikely(empty_slots <= entries)) {
1923 spin_lock_irq(&np->lock);
1924 netif_stop_queue(dev);
1926 spin_unlock_irq(&np->lock);
1927 return NETDEV_TX_BUSY;
1930 start_tx = put_tx = np->put_tx.ex;
1932 /* setup the header buffer */
1935 prev_tx_ctx = np->put_tx_ctx;
1936 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1937 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
1939 np->put_tx_ctx->dma_len = bcnt;
1940 put_tx->bufhigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32;
1941 put_tx->buflow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF;
1942 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1944 tx_flags = NV_TX2_VALID;
1947 if (unlikely(put_tx++ == np->last_tx.ex))
1948 put_tx = np->first_tx.ex;
1949 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
1950 np->put_tx_ctx = np->first_tx_ctx;
1953 /* setup the fragments */
1954 for (i = 0; i < fragments; i++) {
1955 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1956 u32 size = frag->size;
1961 prev_tx_ctx = np->put_tx_ctx;
1962 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1963 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
1965 np->put_tx_ctx->dma_len = bcnt;
1966 put_tx->bufhigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32;
1967 put_tx->buflow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF;
1968 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1972 if (unlikely(put_tx++ == np->last_tx.ex))
1973 put_tx = np->first_tx.ex;
1974 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
1975 np->put_tx_ctx = np->first_tx_ctx;
1979 /* set last fragment flag */
1980 prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET);
1982 /* save skb in this slot's context area */
1983 prev_tx_ctx->skb = skb;
1985 if (skb_is_gso(skb))
1986 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
1988 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
1989 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
1992 if (likely(!np->vlangrp)) {
1993 start_tx->txvlan = 0;
1995 if (vlan_tx_tag_present(skb))
1996 start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb));
1998 start_tx->txvlan = 0;
2001 spin_lock_irq(&np->lock);
2004 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
2005 np->put_tx.ex = put_tx;
2007 spin_unlock_irq(&np->lock);
2009 dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n",
2010 dev->name, entries, tx_flags_extra);
2013 for (j=0; j<64; j++) {
2015 dprintk("\n%03x:", j);
2016 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2021 dev->trans_start = jiffies;
2022 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2023 return NETDEV_TX_OK;
2027 * nv_tx_done: check for completed packets, release the skbs.
2029 * Caller must own np->lock.
2031 static void nv_tx_done(struct net_device *dev)
2033 struct fe_priv *np = netdev_priv(dev);
2035 struct ring_desc* orig_get_tx = np->get_tx.orig;
2037 while ((np->get_tx.orig != np->put_tx.orig) &&
2038 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID)) {
2040 dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n",
2043 pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
2044 np->get_tx_ctx->dma_len,
2046 np->get_tx_ctx->dma = 0;
2048 if (np->desc_ver == DESC_VER_1) {
2049 if (flags & NV_TX_LASTPACKET) {
2050 if (flags & NV_TX_ERROR) {
2051 if (flags & NV_TX_UNDERFLOW)
2052 np->stats.tx_fifo_errors++;
2053 if (flags & NV_TX_CARRIERLOST)
2054 np->stats.tx_carrier_errors++;
2055 np->stats.tx_errors++;
2057 np->stats.tx_packets++;
2058 np->stats.tx_bytes += np->get_tx_ctx->skb->len;
2060 dev_kfree_skb_any(np->get_tx_ctx->skb);
2061 np->get_tx_ctx->skb = NULL;
2064 if (flags & NV_TX2_LASTPACKET) {
2065 if (flags & NV_TX2_ERROR) {
2066 if (flags & NV_TX2_UNDERFLOW)
2067 np->stats.tx_fifo_errors++;
2068 if (flags & NV_TX2_CARRIERLOST)
2069 np->stats.tx_carrier_errors++;
2070 np->stats.tx_errors++;
2072 np->stats.tx_packets++;
2073 np->stats.tx_bytes += np->get_tx_ctx->skb->len;
2075 dev_kfree_skb_any(np->get_tx_ctx->skb);
2076 np->get_tx_ctx->skb = NULL;
2079 if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
2080 np->get_tx.orig = np->first_tx.orig;
2081 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
2082 np->get_tx_ctx = np->first_tx_ctx;
2084 if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {
2086 netif_wake_queue(dev);
2090 static void nv_tx_done_optimized(struct net_device *dev, int limit)
2092 struct fe_priv *np = netdev_priv(dev);
2094 struct ring_desc_ex* orig_get_tx = np->get_tx.ex;
2096 while ((np->get_tx.ex != np->put_tx.ex) &&
2097 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX_VALID) &&
2100 dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n",
2103 pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
2104 np->get_tx_ctx->dma_len,
2106 np->get_tx_ctx->dma = 0;
2108 if (flags & NV_TX2_LASTPACKET) {
2109 if (!(flags & NV_TX2_ERROR))
2110 np->stats.tx_packets++;
2111 dev_kfree_skb_any(np->get_tx_ctx->skb);
2112 np->get_tx_ctx->skb = NULL;
2114 if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
2115 np->get_tx.ex = np->first_tx.ex;
2116 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
2117 np->get_tx_ctx = np->first_tx_ctx;
2119 if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) {
2121 netif_wake_queue(dev);
2126 * nv_tx_timeout: dev->tx_timeout function
2127 * Called with netif_tx_lock held.
2129 static void nv_tx_timeout(struct net_device *dev)
2131 struct fe_priv *np = netdev_priv(dev);
2132 u8 __iomem *base = get_hwbase(dev);
2135 if (np->msi_flags & NV_MSI_X_ENABLED)
2136 status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2138 status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
2140 printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status);
2145 printk(KERN_INFO "%s: Ring at %lx\n",
2146 dev->name, (unsigned long)np->ring_addr);
2147 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
2148 for (i=0;i<=np->register_size;i+= 32) {
2149 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
2151 readl(base + i + 0), readl(base + i + 4),
2152 readl(base + i + 8), readl(base + i + 12),
2153 readl(base + i + 16), readl(base + i + 20),
2154 readl(base + i + 24), readl(base + i + 28));
2156 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
2157 for (i=0;i<np->tx_ring_size;i+= 4) {
2158 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
2159 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
2161 le32_to_cpu(np->tx_ring.orig[i].buf),
2162 le32_to_cpu(np->tx_ring.orig[i].flaglen),
2163 le32_to_cpu(np->tx_ring.orig[i+1].buf),
2164 le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
2165 le32_to_cpu(np->tx_ring.orig[i+2].buf),
2166 le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
2167 le32_to_cpu(np->tx_ring.orig[i+3].buf),
2168 le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
2170 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
2172 le32_to_cpu(np->tx_ring.ex[i].bufhigh),
2173 le32_to_cpu(np->tx_ring.ex[i].buflow),
2174 le32_to_cpu(np->tx_ring.ex[i].flaglen),
2175 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
2176 le32_to_cpu(np->tx_ring.ex[i+1].buflow),
2177 le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
2178 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
2179 le32_to_cpu(np->tx_ring.ex[i+2].buflow),
2180 le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
2181 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
2182 le32_to_cpu(np->tx_ring.ex[i+3].buflow),
2183 le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
2188 spin_lock_irq(&np->lock);
2190 /* 1) stop tx engine */
2193 /* 2) check that the packets were not sent already: */
2194 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
2197 nv_tx_done_optimized(dev, np->tx_ring_size);
2199 /* 3) if there are dead entries: clear everything */
2200 if (np->get_tx_ctx != np->put_tx_ctx) {
2201 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name);
2204 setup_hw_rings(dev, NV_SETUP_TX_RING);
2207 netif_wake_queue(dev);
2209 /* 4) restart tx engine */
2211 spin_unlock_irq(&np->lock);
2215 * Called when the nic notices a mismatch between the actual data len on the
2216 * wire and the len indicated in the 802 header
2218 static int nv_getlen(struct net_device *dev, void *packet, int datalen)
2220 int hdrlen; /* length of the 802 header */
2221 int protolen; /* length as stored in the proto field */
2223 /* 1) calculate len according to header */
2224 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
2225 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto );
2228 protolen = ntohs( ((struct ethhdr *)packet)->h_proto);
2231 dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
2232 dev->name, datalen, protolen, hdrlen);
2233 if (protolen > ETH_DATA_LEN)
2234 return datalen; /* Value in proto field not a len, no checks possible */
2237 /* consistency checks: */
2238 if (datalen > ETH_ZLEN) {
2239 if (datalen >= protolen) {
2240 /* more data on wire than in 802 header, trim of
2243 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
2244 dev->name, protolen);
2247 /* less data on wire than mentioned in header.
2248 * Discard the packet.
2250 dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n",
2255 /* short packet. Accept only if 802 values are also short */
2256 if (protolen > ETH_ZLEN) {
2257 dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n",
2261 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
2262 dev->name, datalen);
2267 static int nv_rx_process(struct net_device *dev, int limit)
2269 struct fe_priv *np = netdev_priv(dev);
2271 u32 rx_processed_cnt = 0;
2272 struct sk_buff *skb;
2275 while((np->get_rx.orig != np->put_rx.orig) &&
2276 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) &&
2277 (rx_processed_cnt++ < limit)) {
2279 dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n",
2283 * the packet is for us - immediately tear down the pci mapping.
2284 * TODO: check if a prefetch of the first cacheline improves
2287 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2288 np->get_rx_ctx->dma_len,
2289 PCI_DMA_FROMDEVICE);
2290 skb = np->get_rx_ctx->skb;
2291 np->get_rx_ctx->skb = NULL;
2295 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
2296 for (j=0; j<64; j++) {
2298 dprintk("\n%03x:", j);
2299 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2303 /* look at what we actually got: */
2304 if (np->desc_ver == DESC_VER_1) {
2305 if (likely(flags & NV_RX_DESCRIPTORVALID)) {
2306 len = flags & LEN_MASK_V1;
2307 if (unlikely(flags & NV_RX_ERROR)) {
2308 if (flags & NV_RX_ERROR4) {
2309 len = nv_getlen(dev, skb->data, len);
2311 np->stats.rx_errors++;
2316 /* framing errors are soft errors */
2317 else if (flags & NV_RX_FRAMINGERR) {
2318 if (flags & NV_RX_SUBSTRACT1) {
2322 /* the rest are hard errors */
2324 if (flags & NV_RX_MISSEDFRAME)
2325 np->stats.rx_missed_errors++;
2326 if (flags & NV_RX_CRCERR)
2327 np->stats.rx_crc_errors++;
2328 if (flags & NV_RX_OVERFLOW)
2329 np->stats.rx_over_errors++;
2330 np->stats.rx_errors++;
2340 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2341 len = flags & LEN_MASK_V2;
2342 if (unlikely(flags & NV_RX2_ERROR)) {
2343 if (flags & NV_RX2_ERROR4) {
2344 len = nv_getlen(dev, skb->data, len);
2346 np->stats.rx_errors++;
2351 /* framing errors are soft errors */
2352 else if (flags & NV_RX2_FRAMINGERR) {
2353 if (flags & NV_RX2_SUBSTRACT1) {
2357 /* the rest are hard errors */
2359 if (flags & NV_RX2_CRCERR)
2360 np->stats.rx_crc_errors++;
2361 if (flags & NV_RX2_OVERFLOW)
2362 np->stats.rx_over_errors++;
2363 np->stats.rx_errors++;
2368 if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ {
2369 skb->ip_summed = CHECKSUM_UNNECESSARY;
2371 if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 ||
2372 (flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) {
2373 skb->ip_summed = CHECKSUM_UNNECESSARY;
2381 /* got a valid packet - forward it to the network core */
2383 skb->protocol = eth_type_trans(skb, dev);
2384 dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n",
2385 dev->name, len, skb->protocol);
2386 #ifdef CONFIG_FORCEDETH_NAPI
2387 netif_receive_skb(skb);
2391 dev->last_rx = jiffies;
2392 np->stats.rx_packets++;
2393 np->stats.rx_bytes += len;
2395 if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
2396 np->get_rx.orig = np->first_rx.orig;
2397 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2398 np->get_rx_ctx = np->first_rx_ctx;
2401 return rx_processed_cnt;
2404 static int nv_rx_process_optimized(struct net_device *dev, int limit)
2406 struct fe_priv *np = netdev_priv(dev);
2409 u32 rx_processed_cnt = 0;
2410 struct sk_buff *skb;
2413 while((np->get_rx.ex != np->put_rx.ex) &&
2414 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
2415 (rx_processed_cnt++ < limit)) {
2417 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n",
2421 * the packet is for us - immediately tear down the pci mapping.
2422 * TODO: check if a prefetch of the first cacheline improves
2425 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2426 np->get_rx_ctx->dma_len,
2427 PCI_DMA_FROMDEVICE);
2428 skb = np->get_rx_ctx->skb;
2429 np->get_rx_ctx->skb = NULL;
2433 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
2434 for (j=0; j<64; j++) {
2436 dprintk("\n%03x:", j);
2437 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2441 /* look at what we actually got: */
2442 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2443 len = flags & LEN_MASK_V2;
2444 if (unlikely(flags & NV_RX2_ERROR)) {
2445 if (flags & NV_RX2_ERROR4) {
2446 len = nv_getlen(dev, skb->data, len);
2452 /* framing errors are soft errors */
2453 else if (flags & NV_RX2_FRAMINGERR) {
2454 if (flags & NV_RX2_SUBSTRACT1) {
2458 /* the rest are hard errors */
2465 if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ {
2466 skb->ip_summed = CHECKSUM_UNNECESSARY;
2468 if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 ||
2469 (flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) {
2470 skb->ip_summed = CHECKSUM_UNNECESSARY;
2474 /* got a valid packet - forward it to the network core */
2476 skb->protocol = eth_type_trans(skb, dev);
2477 prefetch(skb->data);
2479 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n",
2480 dev->name, len, skb->protocol);
2482 if (likely(!np->vlangrp)) {
2483 #ifdef CONFIG_FORCEDETH_NAPI
2484 netif_receive_skb(skb);
2489 vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
2490 if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
2491 #ifdef CONFIG_FORCEDETH_NAPI
2492 vlan_hwaccel_receive_skb(skb, np->vlangrp,
2493 vlanflags & NV_RX3_VLAN_TAG_MASK);
2495 vlan_hwaccel_rx(skb, np->vlangrp,
2496 vlanflags & NV_RX3_VLAN_TAG_MASK);
2499 #ifdef CONFIG_FORCEDETH_NAPI
2500 netif_receive_skb(skb);
2507 dev->last_rx = jiffies;
2508 np->stats.rx_packets++;
2509 np->stats.rx_bytes += len;
2514 if (unlikely(np->get_rx.ex++ == np->last_rx.ex))
2515 np->get_rx.ex = np->first_rx.ex;
2516 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2517 np->get_rx_ctx = np->first_rx_ctx;
2520 return rx_processed_cnt;
2523 static void set_bufsize(struct net_device *dev)
2525 struct fe_priv *np = netdev_priv(dev);
2527 if (dev->mtu <= ETH_DATA_LEN)
2528 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
2530 np->rx_buf_sz = dev->mtu + NV_RX_HEADERS;
2534 * nv_change_mtu: dev->change_mtu function
2535 * Called with dev_base_lock held for read.
2537 static int nv_change_mtu(struct net_device *dev, int new_mtu)
2539 struct fe_priv *np = netdev_priv(dev);
2542 if (new_mtu < 64 || new_mtu > np->pkt_limit)
2548 /* return early if the buffer sizes will not change */
2549 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
2551 if (old_mtu == new_mtu)
2554 /* synchronized against open : rtnl_lock() held by caller */
2555 if (netif_running(dev)) {
2556 u8 __iomem *base = get_hwbase(dev);
2558 * It seems that the nic preloads valid ring entries into an
2559 * internal buffer. The procedure for flushing everything is
2560 * guessed, there is probably a simpler approach.
2561 * Changing the MTU is a rare event, it shouldn't matter.
2563 nv_disable_irq(dev);
2564 netif_tx_lock_bh(dev);
2565 spin_lock(&np->lock);
2570 /* drain rx queue */
2573 /* reinit driver view of the rx queue */
2575 if (nv_init_ring(dev)) {
2576 if (!np->in_shutdown)
2577 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2579 /* reinit nic view of the rx queue */
2580 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
2581 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
2582 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
2583 base + NvRegRingSizes);
2585 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2588 /* restart rx engine */
2591 spin_unlock(&np->lock);
2592 netif_tx_unlock_bh(dev);
2598 static void nv_copy_mac_to_hw(struct net_device *dev)
2600 u8 __iomem *base = get_hwbase(dev);
2603 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
2604 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
2605 mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
2607 writel(mac[0], base + NvRegMacAddrA);
2608 writel(mac[1], base + NvRegMacAddrB);
2612 * nv_set_mac_address: dev->set_mac_address function
2613 * Called with rtnl_lock() held.
2615 static int nv_set_mac_address(struct net_device *dev, void *addr)
2617 struct fe_priv *np = netdev_priv(dev);
2618 struct sockaddr *macaddr = (struct sockaddr*)addr;
2620 if (!is_valid_ether_addr(macaddr->sa_data))
2621 return -EADDRNOTAVAIL;
2623 /* synchronized against open : rtnl_lock() held by caller */
2624 memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
2626 if (netif_running(dev)) {
2627 netif_tx_lock_bh(dev);
2628 spin_lock_irq(&np->lock);
2630 /* stop rx engine */
2633 /* set mac address */
2634 nv_copy_mac_to_hw(dev);
2636 /* restart rx engine */
2638 spin_unlock_irq(&np->lock);
2639 netif_tx_unlock_bh(dev);
2641 nv_copy_mac_to_hw(dev);
2647 * nv_set_multicast: dev->set_multicast function
2648 * Called with netif_tx_lock held.
2650 static void nv_set_multicast(struct net_device *dev)
2652 struct fe_priv *np = netdev_priv(dev);
2653 u8 __iomem *base = get_hwbase(dev);
2656 u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX;
2658 memset(addr, 0, sizeof(addr));
2659 memset(mask, 0, sizeof(mask));
2661 if (dev->flags & IFF_PROMISC) {
2662 pff |= NVREG_PFF_PROMISC;
2664 pff |= NVREG_PFF_MYADDR;
2666 if (dev->flags & IFF_ALLMULTI || dev->mc_list) {
2670 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff;
2671 if (dev->flags & IFF_ALLMULTI) {
2672 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
2674 struct dev_mc_list *walk;
2676 walk = dev->mc_list;
2677 while (walk != NULL) {
2679 a = le32_to_cpu(*(u32 *) walk->dmi_addr);
2680 b = le16_to_cpu(*(u16 *) (&walk->dmi_addr[4]));
2688 addr[0] = alwaysOn[0];
2689 addr[1] = alwaysOn[1];
2690 mask[0] = alwaysOn[0] | alwaysOff[0];
2691 mask[1] = alwaysOn[1] | alwaysOff[1];
2694 addr[0] |= NVREG_MCASTADDRA_FORCE;
2695 pff |= NVREG_PFF_ALWAYS;
2696 spin_lock_irq(&np->lock);
2698 writel(addr[0], base + NvRegMulticastAddrA);
2699 writel(addr[1], base + NvRegMulticastAddrB);
2700 writel(mask[0], base + NvRegMulticastMaskA);
2701 writel(mask[1], base + NvRegMulticastMaskB);
2702 writel(pff, base + NvRegPacketFilterFlags);
2703 dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n",
2706 spin_unlock_irq(&np->lock);
2709 static void nv_update_pause(struct net_device *dev, u32 pause_flags)
2711 struct fe_priv *np = netdev_priv(dev);
2712 u8 __iomem *base = get_hwbase(dev);
2714 np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
2716 if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
2717 u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX;
2718 if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) {
2719 writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags);
2720 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
2722 writel(pff, base + NvRegPacketFilterFlags);
2725 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
2726 u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
2727 if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
2728 writel(NVREG_TX_PAUSEFRAME_ENABLE, base + NvRegTxPauseFrame);
2729 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
2730 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2732 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
2733 writel(regmisc, base + NvRegMisc1);
2739 * nv_update_linkspeed: Setup the MAC according to the link partner
2740 * @dev: Network device to be configured
2742 * The function queries the PHY and checks if there is a link partner.
2743 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
2744 * set to 10 MBit HD.
2746 * The function returns 0 if there is no link partner and 1 if there is
2747 * a good link partner.
2749 static int nv_update_linkspeed(struct net_device *dev)
2751 struct fe_priv *np = netdev_priv(dev);
2752 u8 __iomem *base = get_hwbase(dev);
2755 int adv_lpa, adv_pause, lpa_pause;
2756 int newls = np->linkspeed;
2757 int newdup = np->duplex;
2760 u32 control_1000, status_1000, phyreg, pause_flags, txreg;
2762 /* BMSR_LSTATUS is latched, read it twice:
2763 * we want the current value.
2765 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
2766 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
2768 if (!(mii_status & BMSR_LSTATUS)) {
2769 dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n",
2771 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2777 if (np->autoneg == 0) {
2778 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
2779 dev->name, np->fixed_mode);
2780 if (np->fixed_mode & LPA_100FULL) {
2781 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
2783 } else if (np->fixed_mode & LPA_100HALF) {
2784 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
2786 } else if (np->fixed_mode & LPA_10FULL) {
2787 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2790 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2796 /* check auto negotiation is complete */
2797 if (!(mii_status & BMSR_ANEGCOMPLETE)) {
2798 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */
2799 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2802 dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name);
2806 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
2807 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
2808 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
2809 dev->name, adv, lpa);
2812 if (np->gigabit == PHY_GIGABIT) {
2813 control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
2814 status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
2816 if ((control_1000 & ADVERTISE_1000FULL) &&
2817 (status_1000 & LPA_1000FULL)) {
2818 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n",
2820 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
2826 /* FIXME: handle parallel detection properly */
2827 adv_lpa = lpa & adv;
2828 if (adv_lpa & LPA_100FULL) {
2829 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
2831 } else if (adv_lpa & LPA_100HALF) {
2832 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
2834 } else if (adv_lpa & LPA_10FULL) {
2835 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2837 } else if (adv_lpa & LPA_10HALF) {
2838 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2841 dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa);
2842 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2847 if (np->duplex == newdup && np->linkspeed == newls)
2850 dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n",
2851 dev->name, np->linkspeed, np->duplex, newls, newdup);
2853 np->duplex = newdup;
2854 np->linkspeed = newls;
2856 if (np->gigabit == PHY_GIGABIT) {
2857 phyreg = readl(base + NvRegRandomSeed);
2858 phyreg &= ~(0x3FF00);
2859 if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10)
2860 phyreg |= NVREG_RNDSEED_FORCE3;
2861 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)
2862 phyreg |= NVREG_RNDSEED_FORCE2;
2863 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
2864 phyreg |= NVREG_RNDSEED_FORCE;
2865 writel(phyreg, base + NvRegRandomSeed);
2868 phyreg = readl(base + NvRegPhyInterface);
2869 phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
2870 if (np->duplex == 0)
2872 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
2874 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
2876 writel(phyreg, base + NvRegPhyInterface);
2878 if (phyreg & PHY_RGMII) {
2879 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
2880 txreg = NVREG_TX_DEFERRAL_RGMII_1000;
2882 txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
2884 txreg = NVREG_TX_DEFERRAL_DEFAULT;
2886 writel(txreg, base + NvRegTxDeferral);
2888 if (np->desc_ver == DESC_VER_1) {
2889 txreg = NVREG_TX_WM_DESC1_DEFAULT;
2891 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
2892 txreg = NVREG_TX_WM_DESC2_3_1000;
2894 txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
2896 writel(txreg, base + NvRegTxWatermark);
2898 writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD),
2901 writel(np->linkspeed, base + NvRegLinkSpeed);
2905 /* setup pause frame */
2906 if (np->duplex != 0) {
2907 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
2908 adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM);
2909 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
2911 switch (adv_pause) {
2912 case ADVERTISE_PAUSE_CAP:
2913 if (lpa_pause & LPA_PAUSE_CAP) {
2914 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
2915 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
2916 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2919 case ADVERTISE_PAUSE_ASYM:
2920 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
2922 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2925 case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM:
2926 if (lpa_pause & LPA_PAUSE_CAP)
2928 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
2929 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
2930 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2932 if (lpa_pause == LPA_PAUSE_ASYM)
2934 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
2939 pause_flags = np->pause_flags;
2942 nv_update_pause(dev, pause_flags);
2947 static void nv_linkchange(struct net_device *dev)
2949 if (nv_update_linkspeed(dev)) {
2950 if (!netif_carrier_ok(dev)) {
2951 netif_carrier_on(dev);
2952 printk(KERN_INFO "%s: link up.\n", dev->name);
2956 if (netif_carrier_ok(dev)) {
2957 netif_carrier_off(dev);
2958 printk(KERN_INFO "%s: link down.\n", dev->name);
2964 static void nv_link_irq(struct net_device *dev)
2966 u8 __iomem *base = get_hwbase(dev);
2969 miistat = readl(base + NvRegMIIStatus);
2970 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
2971 dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat);
2973 if (miistat & (NVREG_MIISTAT_LINKCHANGE))
2975 dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name);
2978 static irqreturn_t nv_nic_irq(int foo, void *data)
2980 struct net_device *dev = (struct net_device *) data;
2981 struct fe_priv *np = netdev_priv(dev);
2982 u8 __iomem *base = get_hwbase(dev);
2986 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
2989 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
2990 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
2991 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
2993 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2994 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
2996 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
2997 if (!(events & np->irqmask))
3000 spin_lock(&np->lock);
3002 spin_unlock(&np->lock);
3004 #ifdef CONFIG_FORCEDETH_NAPI
3005 if (events & NVREG_IRQ_RX_ALL) {
3006 netif_rx_schedule(dev, &np->napi);
3008 /* Disable furthur receive irq's */
3009 spin_lock(&np->lock);
3010 np->irqmask &= ~NVREG_IRQ_RX_ALL;
3012 if (np->msi_flags & NV_MSI_X_ENABLED)
3013 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3015 writel(np->irqmask, base + NvRegIrqMask);
3016 spin_unlock(&np->lock);
3019 if (nv_rx_process(dev, RX_WORK_PER_LOOP)) {
3020 if (unlikely(nv_alloc_rx(dev))) {
3021 spin_lock(&np->lock);
3022 if (!np->in_shutdown)
3023 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3024 spin_unlock(&np->lock);
3028 if (unlikely(events & NVREG_IRQ_LINK)) {
3029 spin_lock(&np->lock);
3031 spin_unlock(&np->lock);
3033 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3034 spin_lock(&np->lock);
3036 spin_unlock(&np->lock);
3037 np->link_timeout = jiffies + LINK_TIMEOUT;
3039 if (unlikely(events & (NVREG_IRQ_TX_ERR))) {
3040 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
3043 if (unlikely(events & (NVREG_IRQ_UNKNOWN))) {
3044 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
3047 if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) {
3048 spin_lock(&np->lock);
3049 /* disable interrupts on the nic */
3050 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3051 writel(0, base + NvRegIrqMask);
3053 writel(np->irqmask, base + NvRegIrqMask);
3056 if (!np->in_shutdown) {
3057 np->nic_poll_irq = np->irqmask;
3058 np->recover_error = 1;
3059 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3061 spin_unlock(&np->lock);
3064 if (unlikely(i > max_interrupt_work)) {
3065 spin_lock(&np->lock);
3066 /* disable interrupts on the nic */
3067 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3068 writel(0, base + NvRegIrqMask);
3070 writel(np->irqmask, base + NvRegIrqMask);
3073 if (!np->in_shutdown) {
3074 np->nic_poll_irq = np->irqmask;
3075 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3077 spin_unlock(&np->lock);
3078 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
3083 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
3085 return IRQ_RETVAL(i);
3089 * All _optimized functions are used to help increase performance
3090 * (reduce CPU and increase throughput). They use descripter version 3,
3091 * compiler directives, and reduce memory accesses.
3093 static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3095 struct net_device *dev = (struct net_device *) data;
3096 struct fe_priv *np = netdev_priv(dev);
3097 u8 __iomem *base = get_hwbase(dev);
3101 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name);
3104 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3105 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3106 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
3108 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3109 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
3111 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3112 if (!(events & np->irqmask))
3115 spin_lock(&np->lock);
3116 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3117 spin_unlock(&np->lock);
3119 #ifdef CONFIG_FORCEDETH_NAPI
3120 if (events & NVREG_IRQ_RX_ALL) {
3121 netif_rx_schedule(dev, &np->napi);
3123 /* Disable furthur receive irq's */
3124 spin_lock(&np->lock);
3125 np->irqmask &= ~NVREG_IRQ_RX_ALL;
3127 if (np->msi_flags & NV_MSI_X_ENABLED)
3128 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3130 writel(np->irqmask, base + NvRegIrqMask);
3131 spin_unlock(&np->lock);
3134 if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
3135 if (unlikely(nv_alloc_rx_optimized(dev))) {
3136 spin_lock(&np->lock);
3137 if (!np->in_shutdown)
3138 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3139 spin_unlock(&np->lock);
3143 if (unlikely(events & NVREG_IRQ_LINK)) {
3144 spin_lock(&np->lock);
3146 spin_unlock(&np->lock);
3148 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3149 spin_lock(&np->lock);
3151 spin_unlock(&np->lock);
3152 np->link_timeout = jiffies + LINK_TIMEOUT;
3154 if (unlikely(events & (NVREG_IRQ_TX_ERR))) {
3155 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
3158 if (unlikely(events & (NVREG_IRQ_UNKNOWN))) {
3159 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
3162 if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) {
3163 spin_lock(&np->lock);
3164 /* disable interrupts on the nic */
3165 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3166 writel(0, base + NvRegIrqMask);
3168 writel(np->irqmask, base + NvRegIrqMask);
3171 if (!np->in_shutdown) {
3172 np->nic_poll_irq = np->irqmask;
3173 np->recover_error = 1;
3174 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3176 spin_unlock(&np->lock);
3180 if (unlikely(i > max_interrupt_work)) {
3181 spin_lock(&np->lock);
3182 /* disable interrupts on the nic */
3183 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3184 writel(0, base + NvRegIrqMask);
3186 writel(np->irqmask, base + NvRegIrqMask);
3189 if (!np->in_shutdown) {
3190 np->nic_poll_irq = np->irqmask;
3191 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3193 spin_unlock(&np->lock);
3194 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
3199 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name);
3201 return IRQ_RETVAL(i);
3204 static irqreturn_t nv_nic_irq_tx(int foo, void *data)
3206 struct net_device *dev = (struct net_device *) data;
3207 struct fe_priv *np = netdev_priv(dev);
3208 u8 __iomem *base = get_hwbase(dev);
3211 unsigned long flags;
3213 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name);
3216 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
3217 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
3218 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events);
3219 if (!(events & np->irqmask))
3222 spin_lock_irqsave(&np->lock, flags);
3223 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3224 spin_unlock_irqrestore(&np->lock, flags);
3226 if (unlikely(events & (NVREG_IRQ_TX_ERR))) {
3227 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
3230 if (unlikely(i > max_interrupt_work)) {
3231 spin_lock_irqsave(&np->lock, flags);
3232 /* disable interrupts on the nic */
3233 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
3236 if (!np->in_shutdown) {
3237 np->nic_poll_irq |= NVREG_IRQ_TX_ALL;
3238 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3240 spin_unlock_irqrestore(&np->lock, flags);
3241 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
3246 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name);
3248 return IRQ_RETVAL(i);
3251 #ifdef CONFIG_FORCEDETH_NAPI
3252 static int nv_napi_poll(struct napi_struct *napi, int budget)
3254 struct fe_priv *np = container_of(napi, struct fe_priv, napi);
3255 struct net_device *dev = np->dev;
3256 u8 __iomem *base = get_hwbase(dev);
3257 unsigned long flags;
3260 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3261 pkts = nv_rx_process(dev, budget);
3262 retcode = nv_alloc_rx(dev);
3264 pkts = nv_rx_process_optimized(dev, budget);
3265 retcode = nv_alloc_rx_optimized(dev);
3269 spin_lock_irqsave(&np->lock, flags);
3270 if (!np->in_shutdown)
3271 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3272 spin_unlock_irqrestore(&np->lock, flags);
3275 if (pkts < budget) {
3276 /* re-enable receive interrupts */
3277 spin_lock_irqsave(&np->lock, flags);
3279 __netif_rx_complete(dev, napi);
3281 np->irqmask |= NVREG_IRQ_RX_ALL;
3282 if (np->msi_flags & NV_MSI_X_ENABLED)
3283 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3285 writel(np->irqmask, base + NvRegIrqMask);
3287 spin_unlock_irqrestore(&np->lock, flags);
3293 #ifdef CONFIG_FORCEDETH_NAPI
3294 static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3296 struct net_device *dev = (struct net_device *) data;
3297 struct fe_priv *np = netdev_priv(dev);
3298 u8 __iomem *base = get_hwbase(dev);
3301 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3302 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
3305 netif_rx_schedule(dev, &np->napi);
3306 /* disable receive interrupts on the nic */
3307 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3313 static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3315 struct net_device *dev = (struct net_device *) data;
3316 struct fe_priv *np = netdev_priv(dev);
3317 u8 __iomem *base = get_hwbase(dev);
3320 unsigned long flags;
3322 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name);
3325 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3326 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
3327 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
3328 if (!(events & np->irqmask))
3331 if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
3332 if (unlikely(nv_alloc_rx_optimized(dev))) {
3333 spin_lock_irqsave(&np->lock, flags);
3334 if (!np->in_shutdown)
3335 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3336 spin_unlock_irqrestore(&np->lock, flags);
3340 if (unlikely(i > max_interrupt_work)) {
3341 spin_lock_irqsave(&np->lock, flags);
3342 /* disable interrupts on the nic */
3343 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3346 if (!np->in_shutdown) {
3347 np->nic_poll_irq |= NVREG_IRQ_RX_ALL;
3348 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3350 spin_unlock_irqrestore(&np->lock, flags);
3351 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
3355 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name);
3357 return IRQ_RETVAL(i);
3361 static irqreturn_t nv_nic_irq_other(int foo, void *data)
3363 struct net_device *dev = (struct net_device *) data;
3364 struct fe_priv *np = netdev_priv(dev);
3365 u8 __iomem *base = get_hwbase(dev);
3368 unsigned long flags;
3370 dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name);
3373 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
3374 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
3375 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3376 if (!(events & np->irqmask))
3379 /* check tx in case we reached max loop limit in tx isr */
3380 spin_lock_irqsave(&np->lock, flags);
3381 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3382 spin_unlock_irqrestore(&np->lock, flags);
3384 if (events & NVREG_IRQ_LINK) {
3385 spin_lock_irqsave(&np->lock, flags);
3387 spin_unlock_irqrestore(&np->lock, flags);
3389 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
3390 spin_lock_irqsave(&np->lock, flags);
3392 spin_unlock_irqrestore(&np->lock, flags);
3393 np->link_timeout = jiffies + LINK_TIMEOUT;
3395 if (events & NVREG_IRQ_RECOVER_ERROR) {
3396 spin_lock_irq(&np->lock);
3397 /* disable interrupts on the nic */
3398 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3401 if (!np->in_shutdown) {
3402 np->nic_poll_irq |= NVREG_IRQ_OTHER;
3403 np->recover_error = 1;
3404 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3406 spin_unlock_irq(&np->lock);
3409 if (events & (NVREG_IRQ_UNKNOWN)) {
3410 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
3413 if (unlikely(i > max_interrupt_work)) {
3414 spin_lock_irqsave(&np->lock, flags);
3415 /* disable interrupts on the nic */
3416 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3419 if (!np->in_shutdown) {
3420 np->nic_poll_irq |= NVREG_IRQ_OTHER;
3421 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3423 spin_unlock_irqrestore(&np->lock, flags);
3424 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
3429 dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name);
3431 return IRQ_RETVAL(i);
3434 static irqreturn_t nv_nic_irq_test(int foo, void *data)
3436 struct net_device *dev = (struct net_device *) data;
3437 struct fe_priv *np = netdev_priv(dev);
3438 u8 __iomem *base = get_hwbase(dev);
3441 dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name);
3443 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3444 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3445 writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus);
3447 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3448 writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
3451 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3452 if (!(events & NVREG_IRQ_TIMER))
3453 return IRQ_RETVAL(0);
3455 spin_lock(&np->lock);
3457 spin_unlock(&np->lock);
3459 dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name);
3461 return IRQ_RETVAL(1);
3464 static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
3466 u8 __iomem *base = get_hwbase(dev);
3470 /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
3471 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
3472 * the remaining 8 interrupts.
3474 for (i = 0; i < 8; i++) {
3475 if ((irqmask >> i) & 0x1) {
3476 msixmap |= vector << (i << 2);
3479 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
3482 for (i = 0; i < 8; i++) {
3483 if ((irqmask >> (i + 8)) & 0x1) {
3484 msixmap |= vector << (i << 2);
3487 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
3490 static int nv_request_irq(struct net_device *dev, int intr_test)
3492 struct fe_priv *np = get_nvpriv(dev);
3493 u8 __iomem *base = get_hwbase(dev);
3496 irqreturn_t (*handler)(int foo, void *data);
3499 handler = nv_nic_irq_test;
3501 if (np->desc_ver == DESC_VER_3)
3502 handler = nv_nic_irq_optimized;
3504 handler = nv_nic_irq;
3507 if (np->msi_flags & NV_MSI_X_CAPABLE) {
3508 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
3509 np->msi_x_entry[i].entry = i;
3511 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
3512 np->msi_flags |= NV_MSI_X_ENABLED;
3513 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
3514 /* Request irq for rx handling */
3515 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, IRQF_SHARED, dev->name, dev) != 0) {
3516 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
3517 pci_disable_msix(np->pci_dev);
3518 np->msi_flags &= ~NV_MSI_X_ENABLED;
3521 /* Request irq for tx handling */
3522 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, IRQF_SHARED, dev->name, dev) != 0) {
3523 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
3524 pci_disable_msix(np->pci_dev);
3525 np->msi_flags &= ~NV_MSI_X_ENABLED;
3528 /* Request irq for link and timer handling */
3529 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, IRQF_SHARED, dev->name, dev) != 0) {
3530 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
3531 pci_disable_msix(np->pci_dev);
3532 np->msi_flags &= ~NV_MSI_X_ENABLED;
3535 /* map interrupts to their respective vector */
3536 writel(0, base + NvRegMSIXMap0);
3537 writel(0, base + NvRegMSIXMap1);
3538 set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
3539 set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
3540 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
3542 /* Request irq for all interrupts */
3543 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) {
3544 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
3545 pci_disable_msix(np->pci_dev);
3546 np->msi_flags &= ~NV_MSI_X_ENABLED;
3550 /* map interrupts to vector 0 */
3551 writel(0, base + NvRegMSIXMap0);
3552 writel(0, base + NvRegMSIXMap1);
3556 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
3557 if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
3558 np->msi_flags |= NV_MSI_ENABLED;
3559 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
3560 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
3561 pci_disable_msi(np->pci_dev);
3562 np->msi_flags &= ~NV_MSI_ENABLED;
3566 /* map interrupts to vector 0 */
3567 writel(0, base + NvRegMSIMap0);
3568 writel(0, base + NvRegMSIMap1);
3569 /* enable msi vector 0 */
3570 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
3574 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
3581 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
3583 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
3588 static void nv_free_irq(struct net_device *dev)
3590 struct fe_priv *np = get_nvpriv(dev);
3593 if (np->msi_flags & NV_MSI_X_ENABLED) {
3594 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
3595 free_irq(np->msi_x_entry[i].vector, dev);
3597 pci_disable_msix(np->pci_dev);
3598 np->msi_flags &= ~NV_MSI_X_ENABLED;
3600 free_irq(np->pci_dev->irq, dev);
3601 if (np->msi_flags & NV_MSI_ENABLED) {
3602 pci_disable_msi(np->pci_dev);
3603 np->msi_flags &= ~NV_MSI_ENABLED;
3608 static void nv_do_nic_poll(unsigned long data)
3610 struct net_device *dev = (struct net_device *) data;
3611 struct fe_priv *np = netdev_priv(dev);
3612 u8 __iomem *base = get_hwbase(dev);
3616 * First disable irq(s) and then
3617 * reenable interrupts on the nic, we have to do this before calling
3618 * nv_nic_irq because that may decide to do otherwise
3621 if (!using_multi_irqs(dev)) {
3622 if (np->msi_flags & NV_MSI_X_ENABLED)
3623 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
3625 disable_irq_lockdep(dev->irq);
3628 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
3629 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
3630 mask |= NVREG_IRQ_RX_ALL;
3632 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
3633 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
3634 mask |= NVREG_IRQ_TX_ALL;
3636 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
3637 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
3638 mask |= NVREG_IRQ_OTHER;
3641 np->nic_poll_irq = 0;
3643 if (np->recover_error) {
3644 np->recover_error = 0;
3645 printk(KERN_INFO "forcedeth: MAC in recoverable error state\n");
3646 if (netif_running(dev)) {
3647 netif_tx_lock_bh(dev);
3648 spin_lock(&np->lock);
3653 /* drain rx queue */
3656 /* reinit driver view of the rx queue */
3658 if (nv_init_ring(dev)) {
3659 if (!np->in_shutdown)
3660 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3662 /* reinit nic view of the rx queue */
3663 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
3664 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
3665 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
3666 base + NvRegRingSizes);
3668 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
3671 /* restart rx engine */
3674 spin_unlock(&np->lock);
3675 netif_tx_unlock_bh(dev);
3679 /* FIXME: Do we need synchronize_irq(dev->irq) here? */
3681 writel(mask, base + NvRegIrqMask);
3684 if (!using_multi_irqs(dev)) {
3685 if (np->desc_ver == DESC_VER_3)
3686 nv_nic_irq_optimized(0, dev);
3689 if (np->msi_flags & NV_MSI_X_ENABLED)
3690 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
3692 enable_irq_lockdep(dev->irq);
3694 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
3695 nv_nic_irq_rx(0, dev);
3696 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
3698 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
3699 nv_nic_irq_tx(0, dev);
3700 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
3702 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
3703 nv_nic_irq_other(0, dev);
3704 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
3709 #ifdef CONFIG_NET_POLL_CONTROLLER
3710 static void nv_poll_controller(struct net_device *dev)
3712 nv_do_nic_poll((unsigned long) dev);
3716 static void nv_do_stats_poll(unsigned long data)
3718 struct net_device *dev = (struct net_device *) data;
3719 struct fe_priv *np = netdev_priv(dev);
3721 nv_get_hw_stats(dev);
3723 if (!np->in_shutdown)
3724 mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
3727 static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3729 struct fe_priv *np = netdev_priv(dev);
3730 strcpy(info->driver, "forcedeth");
3731 strcpy(info->version, FORCEDETH_VERSION);
3732 strcpy(info->bus_info, pci_name(np->pci_dev));
3735 static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
3737 struct fe_priv *np = netdev_priv(dev);
3738 wolinfo->supported = WAKE_MAGIC;
3740 spin_lock_irq(&np->lock);
3742 wolinfo->wolopts = WAKE_MAGIC;
3743 spin_unlock_irq(&np->lock);
3746 static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
3748 struct fe_priv *np = netdev_priv(dev);
3749 u8 __iomem *base = get_hwbase(dev);
3752 if (wolinfo->wolopts == 0) {
3754 } else if (wolinfo->wolopts & WAKE_MAGIC) {
3756 flags = NVREG_WAKEUPFLAGS_ENABLE;
3758 if (netif_running(dev)) {
3759 spin_lock_irq(&np->lock);
3760 writel(flags, base + NvRegWakeUpFlags);
3761 spin_unlock_irq(&np->lock);
3766 static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3768 struct fe_priv *np = netdev_priv(dev);
3771 spin_lock_irq(&np->lock);
3772 ecmd->port = PORT_MII;
3773 if (!netif_running(dev)) {
3774 /* We do not track link speed / duplex setting if the
3775 * interface is disabled. Force a link check */
3776 if (nv_update_linkspeed(dev)) {
3777 if (!netif_carrier_ok(dev))
3778 netif_carrier_on(dev);
3780 if (netif_carrier_ok(dev))
3781 netif_carrier_off(dev);
3785 if (netif_carrier_ok(dev)) {
3786 switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
3787 case NVREG_LINKSPEED_10:
3788 ecmd->speed = SPEED_10;
3790 case NVREG_LINKSPEED_100:
3791 ecmd->speed = SPEED_100;
3793 case NVREG_LINKSPEED_1000:
3794 ecmd->speed = SPEED_1000;
3797 ecmd->duplex = DUPLEX_HALF;
3799 ecmd->duplex = DUPLEX_FULL;
3805 ecmd->autoneg = np->autoneg;
3807 ecmd->advertising = ADVERTISED_MII;
3809 ecmd->advertising |= ADVERTISED_Autoneg;
3810 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3811 if (adv & ADVERTISE_10HALF)
3812 ecmd->advertising |= ADVERTISED_10baseT_Half;
3813 if (adv & ADVERTISE_10FULL)
3814 ecmd->advertising |= ADVERTISED_10baseT_Full;
3815 if (adv & ADVERTISE_100HALF)
3816 ecmd->advertising |= ADVERTISED_100baseT_Half;
3817 if (adv & ADVERTISE_100FULL)
3818 ecmd->advertising |= ADVERTISED_100baseT_Full;
3819 if (np->gigabit == PHY_GIGABIT) {
3820 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
3821 if (adv & ADVERTISE_1000FULL)
3822 ecmd->advertising |= ADVERTISED_1000baseT_Full;
3825 ecmd->supported = (SUPPORTED_Autoneg |
3826 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
3827 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
3829 if (np->gigabit == PHY_GIGABIT)
3830 ecmd->supported |= SUPPORTED_1000baseT_Full;
3832 ecmd->phy_address = np->phyaddr;
3833 ecmd->transceiver = XCVR_EXTERNAL;
3835 /* ignore maxtxpkt, maxrxpkt for now */
3836 spin_unlock_irq(&np->lock);
3840 static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3842 struct fe_priv *np = netdev_priv(dev);
3844 if (ecmd->port != PORT_MII)
3846 if (ecmd->transceiver != XCVR_EXTERNAL)
3848 if (ecmd->phy_address != np->phyaddr) {
3849 /* TODO: support switching between multiple phys. Should be
3850 * trivial, but not enabled due to lack of test hardware. */
3853 if (ecmd->autoneg == AUTONEG_ENABLE) {
3856 mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
3857 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
3858 if (np->gigabit == PHY_GIGABIT)
3859 mask |= ADVERTISED_1000baseT_Full;
3861 if ((ecmd->advertising & mask) == 0)
3864 } else if (ecmd->autoneg == AUTONEG_DISABLE) {
3865 /* Note: autonegotiation disable, speed 1000 intentionally
3866 * forbidden - noone should need that. */
3868 if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
3870 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
3876 netif_carrier_off(dev);
3877 if (netif_running(dev)) {
3878 nv_disable_irq(dev);
3879 netif_tx_lock_bh(dev);
3880 spin_lock(&np->lock);
3884 spin_unlock(&np->lock);
3885 netif_tx_unlock_bh(dev);
3888 if (ecmd->autoneg == AUTONEG_ENABLE) {
3893 /* advertise only what has been requested */
3894 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3895 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3896 if (ecmd->advertising & ADVERTISED_10baseT_Half)
3897 adv |= ADVERTISE_10HALF;
3898 if (ecmd->advertising & ADVERTISED_10baseT_Full)
3899 adv |= ADVERTISE_10FULL;
3900 if (ecmd->advertising & ADVERTISED_100baseT_Half)
3901 adv |= ADVERTISE_100HALF;
3902 if (ecmd->advertising & ADVERTISED_100baseT_Full)
3903 adv |= ADVERTISE_100FULL;
3904 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
3905 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
3906 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3907 adv |= ADVERTISE_PAUSE_ASYM;
3908 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
3910 if (np->gigabit == PHY_GIGABIT) {
3911 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
3912 adv &= ~ADVERTISE_1000FULL;
3913 if (ecmd->advertising & ADVERTISED_1000baseT_Full)
3914 adv |= ADVERTISE_1000FULL;
3915 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
3918 if (netif_running(dev))
3919 printk(KERN_INFO "%s: link down.\n", dev->name);
3920 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
3921 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
3922 bmcr |= BMCR_ANENABLE;
3923 /* reset the phy in order for settings to stick,
3924 * and cause autoneg to start */
3925 if (phy_reset(dev, bmcr)) {
3926 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
3930 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
3931 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
3938 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3939 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3940 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
3941 adv |= ADVERTISE_10HALF;
3942 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
3943 adv |= ADVERTISE_10FULL;
3944 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
3945 adv |= ADVERTISE_100HALF;
3946 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
3947 adv |= ADVERTISE_100FULL;
3948 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
3949 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */
3950 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
3951 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3953 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) {
3954 adv |= ADVERTISE_PAUSE_ASYM;
3955 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3957 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
3958 np->fixed_mode = adv;
3960 if (np->gigabit == PHY_GIGABIT) {
3961 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
3962 adv &= ~ADVERTISE_1000FULL;
3963 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
3966 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
3967 bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
3968 if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL))
3969 bmcr |= BMCR_FULLDPLX;
3970 if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
3971 bmcr |= BMCR_SPEED100;
3972 if (np->phy_oui == PHY_OUI_MARVELL) {
3973 /* reset the phy in order for forced mode settings to stick */
3974 if (phy_reset(dev, bmcr)) {
3975 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
3979 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
3980 if (netif_running(dev)) {
3981 /* Wait a bit and then reconfigure the nic. */
3988 if (netif_running(dev)) {
3997 #define FORCEDETH_REGS_VER 1
3999 static int nv_get_regs_len(struct net_device *dev)
4001 struct fe_priv *np = netdev_priv(dev);
4002 return np->register_size;
4005 static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
4007 struct fe_priv *np = netdev_priv(dev);
4008 u8 __iomem *base = get_hwbase(dev);
4012 regs->version = FORCEDETH_REGS_VER;
4013 spin_lock_irq(&np->lock);
4014 for (i = 0;i <= np->register_size/sizeof(u32); i++)
4015 rbuf[i] = readl(base + i*sizeof(u32));
4016 spin_unlock_irq(&np->lock);
4019 static int nv_nway_reset(struct net_device *dev)
4021 struct fe_priv *np = netdev_priv(dev);
4027 netif_carrier_off(dev);
4028 if (netif_running(dev)) {
4029 nv_disable_irq(dev);
4030 netif_tx_lock_bh(dev);
4031 spin_lock(&np->lock);
4035 spin_unlock(&np->lock);
4036 netif_tx_unlock_bh(dev);
4037 printk(KERN_INFO "%s: link down.\n", dev->name);
4040 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4041 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4042 bmcr |= BMCR_ANENABLE;
4043 /* reset the phy in order for settings to stick*/
4044 if (phy_reset(dev, bmcr)) {
4045 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
4049 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4050 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4053 if (netif_running(dev)) {
4066 static int nv_set_tso(struct net_device *dev, u32 value)
4068 struct fe_priv *np = netdev_priv(dev);
4070 if ((np->driver_data & DEV_HAS_CHECKSUM))
4071 return ethtool_op_set_tso(dev, value);
4076 static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
4078 struct fe_priv *np = netdev_priv(dev);
4080 ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4081 ring->rx_mini_max_pending = 0;
4082 ring->rx_jumbo_max_pending = 0;
4083 ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4085 ring->rx_pending = np->rx_ring_size;
4086 ring->rx_mini_pending = 0;
4087 ring->rx_jumbo_pending = 0;
4088 ring->tx_pending = np->tx_ring_size;
4091 static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
4093 struct fe_priv *np = netdev_priv(dev);
4094 u8 __iomem *base = get_hwbase(dev);
4095 u8 *rxtx_ring, *rx_skbuff, *tx_skbuff;
4096 dma_addr_t ring_addr;
4098 if (ring->rx_pending < RX_RING_MIN ||
4099 ring->tx_pending < TX_RING_MIN ||
4100 ring->rx_mini_pending != 0 ||
4101 ring->rx_jumbo_pending != 0 ||
4102 (np->desc_ver == DESC_VER_1 &&
4103 (ring->rx_pending > RING_MAX_DESC_VER_1 ||
4104 ring->tx_pending > RING_MAX_DESC_VER_1)) ||
4105 (np->desc_ver != DESC_VER_1 &&
4106 (ring->rx_pending > RING_MAX_DESC_VER_2_3 ||
4107 ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
4111 /* allocate new rings */
4112 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
4113 rxtx_ring = pci_alloc_consistent(np->pci_dev,
4114 sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4117 rxtx_ring = pci_alloc_consistent(np->pci_dev,
4118 sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4121 rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL);
4122 tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL);
4123 if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
4124 /* fall back to old rings */
4125 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
4127 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4128 rxtx_ring, ring_addr);
4131 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4132 rxtx_ring, ring_addr);
4141 if (netif_running(dev)) {
4142 nv_disable_irq(dev);
4143 netif_tx_lock_bh(dev);
4144 spin_lock(&np->lock);
4156 /* set new values */
4157 np->rx_ring_size = ring->rx_pending;
4158 np->tx_ring_size = ring->tx_pending;
4159 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
4160 np->rx_ring.orig = (struct ring_desc*)rxtx_ring;
4161 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
4163 np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring;
4164 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
4166 np->rx_skb = (struct nv_skb_map*)rx_skbuff;
4167 np->tx_skb = (struct nv_skb_map*)tx_skbuff;
4168 np->ring_addr = ring_addr;
4170 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
4171 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
4173 if (netif_running(dev)) {
4174 /* reinit driver view of the queues */
4176 if (nv_init_ring(dev)) {
4177 if (!np->in_shutdown)
4178 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4181 /* reinit nic view of the queues */
4182 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4183 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4184 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4185 base + NvRegRingSizes);
4187 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4190 /* restart engines */
4193 spin_unlock(&np->lock);
4194 netif_tx_unlock_bh(dev);
4202 static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4204 struct fe_priv *np = netdev_priv(dev);
4206 pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0;
4207 pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0;
4208 pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0;
4211 static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4213 struct fe_priv *np = netdev_priv(dev);
4216 if ((!np->autoneg && np->duplex == 0) ||
4217 (np->autoneg && !pause->autoneg && np->duplex == 0)) {
4218 printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n",
4222 if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
4223 printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name);
4227 netif_carrier_off(dev);
4228 if (netif_running(dev)) {
4229 nv_disable_irq(dev);
4230 netif_tx_lock_bh(dev);
4231 spin_lock(&np->lock);
4235 spin_unlock(&np->lock);
4236 netif_tx_unlock_bh(dev);
4239 np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
4240 if (pause->rx_pause)
4241 np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
4242 if (pause->tx_pause)
4243 np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
4245 if (np->autoneg && pause->autoneg) {
4246 np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
4248 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4249 adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4250 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
4251 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4252 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4253 adv |= ADVERTISE_PAUSE_ASYM;
4254 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4256 if (netif_running(dev))
4257 printk(KERN_INFO "%s: link down.\n", dev->name);
4258 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4259 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4260 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4262 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4263 if (pause->rx_pause)
4264 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4265 if (pause->tx_pause)
4266 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4268 if (!netif_running(dev))
4269 nv_update_linkspeed(dev);
4271 nv_update_pause(dev, np->pause_flags);
4274 if (netif_running(dev)) {
4282 static u32 nv_get_rx_csum(struct net_device *dev)
4284 struct fe_priv *np = netdev_priv(dev);
4285 return (np->rx_csum) != 0;
4288 static int nv_set_rx_csum(struct net_device *dev, u32 data)
4290 struct fe_priv *np = netdev_priv(dev);
4291 u8 __iomem *base = get_hwbase(dev);
4294 if (np->driver_data & DEV_HAS_CHECKSUM) {
4297 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
4300 /* vlan is dependent on rx checksum offload */
4301 if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE))
4302 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
4304 if (netif_running(dev)) {
4305 spin_lock_irq(&np->lock);
4306 writel(np->txrxctl_bits, base + NvRegTxRxControl);
4307 spin_unlock_irq(&np->lock);
4316 static int nv_set_tx_csum(struct net_device *dev, u32 data)
4318 struct fe_priv *np = netdev_priv(dev);
4320 if (np->driver_data & DEV_HAS_CHECKSUM)
4321 return ethtool_op_set_tx_hw_csum(dev, data);
4326 static int nv_set_sg(struct net_device *dev, u32 data)
4328 struct fe_priv *np = netdev_priv(dev);
4330 if (np->driver_data & DEV_HAS_CHECKSUM)
4331 return ethtool_op_set_sg(dev, data);
4336 static int nv_get_sset_count(struct net_device *dev, int sset)
4338 struct fe_priv *np = netdev_priv(dev);
4342 if (np->driver_data & DEV_HAS_TEST_EXTENDED)
4343 return NV_TEST_COUNT_EXTENDED;
4345 return NV_TEST_COUNT_BASE;
4347 if (np->driver_data & DEV_HAS_STATISTICS_V1)
4348 return NV_DEV_STATISTICS_V1_COUNT;
4349 else if (np->driver_data & DEV_HAS_STATISTICS_V2)
4350 return NV_DEV_STATISTICS_V2_COUNT;
4358 static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer)
4360 struct fe_priv *np = netdev_priv(dev);
4363 nv_do_stats_poll((unsigned long)dev);
4365 memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
4368 static int nv_link_test(struct net_device *dev)
4370 struct fe_priv *np = netdev_priv(dev);
4373 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4374 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4376 /* check phy link status */
4377 if (!(mii_status & BMSR_LSTATUS))
4383 static int nv_register_test(struct net_device *dev)
4385 u8 __iomem *base = get_hwbase(dev);
4387 u32 orig_read, new_read;
4390 orig_read = readl(base + nv_registers_test[i].reg);
4392 /* xor with mask to toggle bits */
4393 orig_read ^= nv_registers_test[i].mask;
4395 writel(orig_read, base + nv_registers_test[i].reg);
4397 new_read = readl(base + nv_registers_test[i].reg);
4399 if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask))
4402 /* restore original value */
4403 orig_read ^= nv_registers_test[i].mask;
4404 writel(orig_read, base + nv_registers_test[i].reg);
4406 } while (nv_registers_test[++i].reg != 0);
4411 static int nv_interrupt_test(struct net_device *dev)
4413 struct fe_priv *np = netdev_priv(dev);
4414 u8 __iomem *base = get_hwbase(dev);
4417 u32 save_msi_flags, save_poll_interval = 0;
4419 if (netif_running(dev)) {
4420 /* free current irq */
4422 save_poll_interval = readl(base+NvRegPollingInterval);
4425 /* flag to test interrupt handler */
4428 /* setup test irq */
4429 save_msi_flags = np->msi_flags;
4430 np->msi_flags &= ~NV_MSI_X_VECTORS_MASK;
4431 np->msi_flags |= 0x001; /* setup 1 vector */
4432 if (nv_request_irq(dev, 1))
4435 /* setup timer interrupt */
4436 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
4437 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4439 nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER);
4441 /* wait for at least one interrupt */
4444 spin_lock_irq(&np->lock);
4446 /* flag should be set within ISR */
4447 testcnt = np->intr_test;
4451 nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER);
4452 if (!(np->msi_flags & NV_MSI_X_ENABLED))
4453 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4455 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4457 spin_unlock_irq(&np->lock);
4461 np->msi_flags = save_msi_flags;
4463 if (netif_running(dev)) {
4464 writel(save_poll_interval, base + NvRegPollingInterval);
4465 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4466 /* restore original irq */
4467 if (nv_request_irq(dev, 0))
4474 static int nv_loopback_test(struct net_device *dev)
4476 struct fe_priv *np = netdev_priv(dev);
4477 u8 __iomem *base = get_hwbase(dev);
4478 struct sk_buff *tx_skb, *rx_skb;
4479 dma_addr_t test_dma_addr;
4480 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
4482 int len, i, pkt_len;
4484 u32 filter_flags = 0;
4485 u32 misc1_flags = 0;
4488 if (netif_running(dev)) {
4489 nv_disable_irq(dev);
4490 filter_flags = readl(base + NvRegPacketFilterFlags);
4491 misc1_flags = readl(base + NvRegMisc1);
4496 /* reinit driver view of the rx queue */
4500 /* setup hardware for loopback */
4501 writel(NVREG_MISC1_FORCE, base + NvRegMisc1);
4502 writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags);
4504 /* reinit nic view of the rx queue */
4505 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4506 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4507 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4508 base + NvRegRingSizes);
4511 /* restart rx engine */
4515 /* setup packet for tx */
4516 pkt_len = ETH_DATA_LEN;
4517 tx_skb = dev_alloc_skb(pkt_len);
4519 printk(KERN_ERR "dev_alloc_skb() failed during loopback test"
4520 " of %s\n", dev->name);
4524 test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
4525 skb_tailroom(tx_skb),
4526 PCI_DMA_FROMDEVICE);
4527 pkt_data = skb_put(tx_skb, pkt_len);
4528 for (i = 0; i < pkt_len; i++)
4529 pkt_data[i] = (u8)(i & 0xff);
4531 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
4532 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
4533 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
4535 np->tx_ring.ex[0].bufhigh = cpu_to_le64(test_dma_addr) >> 32;
4536 np->tx_ring.ex[0].buflow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF;
4537 np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
4539 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4540 pci_push(get_hwbase(dev));
4544 /* check for rx of the packet */
4545 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
4546 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
4547 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
4550 flags = le32_to_cpu(np->rx_ring.ex[0].flaglen);
4551 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
4554 if (flags & NV_RX_AVAIL) {
4556 } else if (np->desc_ver == DESC_VER_1) {
4557 if (flags & NV_RX_ERROR)
4560 if (flags & NV_RX2_ERROR) {
4566 if (len != pkt_len) {
4568 dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n",
4569 dev->name, len, pkt_len);
4571 rx_skb = np->rx_skb[0].skb;
4572 for (i = 0; i < pkt_len; i++) {
4573 if (rx_skb->data[i] != (u8)(i & 0xff)) {
4575 dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n",
4582 dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name);
4585 pci_unmap_page(np->pci_dev, test_dma_addr,
4586 (skb_end_pointer(tx_skb) - tx_skb->data),
4588 dev_kfree_skb_any(tx_skb);
4594 /* drain rx queue */
4598 if (netif_running(dev)) {
4599 writel(misc1_flags, base + NvRegMisc1);
4600 writel(filter_flags, base + NvRegPacketFilterFlags);
4607 static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
4609 struct fe_priv *np = netdev_priv(dev);
4610 u8 __iomem *base = get_hwbase(dev);
4612 memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64));
4614 if (!nv_link_test(dev)) {
4615 test->flags |= ETH_TEST_FL_FAILED;
4619 if (test->flags & ETH_TEST_FL_OFFLINE) {
4620 if (netif_running(dev)) {
4621 netif_stop_queue(dev);
4622 #ifdef CONFIG_FORCEDETH_NAPI
4623 napi_disable(&np->napi);
4625 netif_tx_lock_bh(dev);
4626 spin_lock_irq(&np->lock);
4627 nv_disable_hw_interrupts(dev, np->irqmask);
4628 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
4629 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4631 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4637 /* drain rx queue */
4640 spin_unlock_irq(&np->lock);
4641 netif_tx_unlock_bh(dev);
4644 if (!nv_register_test(dev)) {
4645 test->flags |= ETH_TEST_FL_FAILED;
4649 result = nv_interrupt_test(dev);
4651 test->flags |= ETH_TEST_FL_FAILED;
4659 if (!nv_loopback_test(dev)) {
4660 test->flags |= ETH_TEST_FL_FAILED;
4664 if (netif_running(dev)) {
4665 /* reinit driver view of the rx queue */
4667 if (nv_init_ring(dev)) {
4668 if (!np->in_shutdown)
4669 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4671 /* reinit nic view of the rx queue */
4672 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4673 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4674 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4675 base + NvRegRingSizes);
4677 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4679 /* restart rx engine */
4682 netif_start_queue(dev);
4683 #ifdef CONFIG_FORCEDETH_NAPI
4684 napi_enable(&np->napi);
4686 nv_enable_hw_interrupts(dev, np->irqmask);
4691 static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
4693 switch (stringset) {
4695 memcpy(buffer, &nv_estats_str, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(struct nv_ethtool_str));
4698 memcpy(buffer, &nv_etests_str, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(struct nv_ethtool_str));
4703 static const struct ethtool_ops ops = {
4704 .get_drvinfo = nv_get_drvinfo,
4705 .get_link = ethtool_op_get_link,
4706 .get_wol = nv_get_wol,
4707 .set_wol = nv_set_wol,
4708 .get_settings = nv_get_settings,
4709 .set_settings = nv_set_settings,
4710 .get_regs_len = nv_get_regs_len,
4711 .get_regs = nv_get_regs,
4712 .nway_reset = nv_nway_reset,
4713 .set_tso = nv_set_tso,
4714 .get_ringparam = nv_get_ringparam,
4715 .set_ringparam = nv_set_ringparam,
4716 .get_pauseparam = nv_get_pauseparam,
4717 .set_pauseparam = nv_set_pauseparam,
4718 .get_rx_csum = nv_get_rx_csum,
4719 .set_rx_csum = nv_set_rx_csum,
4720 .set_tx_csum = nv_set_tx_csum,
4721 .set_sg = nv_set_sg,
4722 .get_strings = nv_get_strings,
4723 .get_ethtool_stats = nv_get_ethtool_stats,
4724 .get_sset_count = nv_get_sset_count,
4725 .self_test = nv_self_test,
4728 static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
4730 struct fe_priv *np = get_nvpriv(dev);
4732 spin_lock_irq(&np->lock);
4734 /* save vlan group */
4738 /* enable vlan on MAC */
4739 np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS;
4741 /* disable vlan on MAC */
4742 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
4743 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
4746 writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4748 spin_unlock_irq(&np->lock);
4751 /* The mgmt unit and driver use a semaphore to access the phy during init */
4752 static int nv_mgmt_acquire_sema(struct net_device *dev)
4754 u8 __iomem *base = get_hwbase(dev);
4756 u32 tx_ctrl, mgmt_sema;
4758 for (i = 0; i < 10; i++) {
4759 mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK;
4760 if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE)
4765 if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE)
4768 for (i = 0; i < 2; i++) {
4769 tx_ctrl = readl(base + NvRegTransmitterControl);
4770 tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ;
4771 writel(tx_ctrl, base + NvRegTransmitterControl);
4773 /* verify that semaphore was acquired */
4774 tx_ctrl = readl(base + NvRegTransmitterControl);
4775 if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) &&
4776 ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE))
4785 static int nv_open(struct net_device *dev)
4787 struct fe_priv *np = netdev_priv(dev);
4788 u8 __iomem *base = get_hwbase(dev);
4792 dprintk(KERN_DEBUG "nv_open: begin\n");
4794 /* erase previous misconfiguration */
4795 if (np->driver_data & DEV_HAS_POWER_CNTRL)
4797 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
4798 writel(0, base + NvRegMulticastAddrB);
4799 writel(0, base + NvRegMulticastMaskA);
4800 writel(0, base + NvRegMulticastMaskB);
4801 writel(0, base + NvRegPacketFilterFlags);
4803 writel(0, base + NvRegTransmitterControl);
4804 writel(0, base + NvRegReceiverControl);
4806 writel(0, base + NvRegAdapterControl);
4808 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
4809 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
4811 /* initialize descriptor rings */
4813 oom = nv_init_ring(dev);
4815 writel(0, base + NvRegLinkSpeed);
4816 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
4818 writel(0, base + NvRegUnknownSetupReg6);
4820 np->in_shutdown = 0;
4823 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4824 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4825 base + NvRegRingSizes);
4827 writel(np->linkspeed, base + NvRegLinkSpeed);
4828 if (np->desc_ver == DESC_VER_1)
4829 writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
4831 writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark);
4832 writel(np->txrxctl_bits, base + NvRegTxRxControl);
4833 writel(np->vlanctl_bits, base + NvRegVlanControl);
4835 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
4836 reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
4837 NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX,
4838 KERN_INFO "open: SetupReg5, Bit 31 remained off\n");
4840 writel(0, base + NvRegMIIMask);
4841 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4842 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
4844 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
4845 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
4846 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
4847 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4849 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
4850 get_random_bytes(&i, sizeof(i));
4851 writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed);
4852 writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral);
4853 writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral);
4854 if (poll_interval == -1) {
4855 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
4856 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
4858 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
4861 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
4862 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4863 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
4864 base + NvRegAdapterControl);
4865 writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
4866 writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask);
4868 writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
4870 i = readl(base + NvRegPowerState);
4871 if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0)
4872 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
4876 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
4878 nv_disable_hw_interrupts(dev, np->irqmask);
4880 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
4881 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4884 if (nv_request_irq(dev, 0)) {
4888 /* ask for interrupts */
4889 nv_enable_hw_interrupts(dev, np->irqmask);
4891 spin_lock_irq(&np->lock);
4892 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
4893 writel(0, base + NvRegMulticastAddrB);
4894 writel(0, base + NvRegMulticastMaskA);
4895 writel(0, base + NvRegMulticastMaskB);
4896 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
4897 /* One manual link speed update: Interrupts are enabled, future link
4898 * speed changes cause interrupts and are handled by nv_link_irq().
4902 miistat = readl(base + NvRegMIIStatus);
4903 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
4904 dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat);
4906 /* set linkspeed to invalid value, thus force nv_update_linkspeed
4909 ret = nv_update_linkspeed(dev);
4912 netif_start_queue(dev);
4913 #ifdef CONFIG_FORCEDETH_NAPI
4914 napi_enable(&np->napi);
4918 netif_carrier_on(dev);
4920 printk(KERN_INFO "%s: no link during initialization.\n", dev->name);
4921 netif_carrier_off(dev);
4924 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4926 /* start statistics timer */
4927 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2))
4928 mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
4930 spin_unlock_irq(&np->lock);
4938 static int nv_close(struct net_device *dev)
4940 struct fe_priv *np = netdev_priv(dev);
4943 spin_lock_irq(&np->lock);
4944 np->in_shutdown = 1;
4945 spin_unlock_irq(&np->lock);
4946 #ifdef CONFIG_FORCEDETH_NAPI
4947 napi_disable(&np->napi);
4949 synchronize_irq(dev->irq);
4951 del_timer_sync(&np->oom_kick);
4952 del_timer_sync(&np->nic_poll);
4953 del_timer_sync(&np->stats_poll);
4955 netif_stop_queue(dev);
4956 spin_lock_irq(&np->lock);
4961 /* disable interrupts on the nic or we will lock up */
4962 base = get_hwbase(dev);
4963 nv_disable_hw_interrupts(dev, np->irqmask);
4965 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
4967 spin_unlock_irq(&np->lock);
4973 if (np->wolenabled) {
4974 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
4978 /* FIXME: power down nic */
4983 static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
4985 struct net_device *dev;
4990 u32 powerstate, txreg;
4991 u32 phystate_orig = 0, phystate;
4992 int phyinitialized = 0;
4993 DECLARE_MAC_BUF(mac);
4995 dev = alloc_etherdev(sizeof(struct fe_priv));
5000 np = netdev_priv(dev);
5002 np->pci_dev = pci_dev;
5003 spin_lock_init(&np->lock);
5004 SET_NETDEV_DEV(dev, &pci_dev->dev);
5006 init_timer(&np->oom_kick);
5007 np->oom_kick.data = (unsigned long) dev;
5008 np->oom_kick.function = &nv_do_rx_refill; /* timer handler */
5009 init_timer(&np->nic_poll);
5010 np->nic_poll.data = (unsigned long) dev;
5011 np->nic_poll.function = &nv_do_nic_poll; /* timer handler */
5012 init_timer(&np->stats_poll);
5013 np->stats_poll.data = (unsigned long) dev;
5014 np->stats_poll.function = &nv_do_stats_poll; /* timer handler */
5016 err = pci_enable_device(pci_dev);
5018 printk(KERN_INFO "forcedeth: pci_enable_dev failed (%d) for device %s\n",
5019 err, pci_name(pci_dev));
5023 pci_set_master(pci_dev);
5025 err = pci_request_regions(pci_dev, DRV_NAME);
5029 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2))
5030 np->register_size = NV_PCI_REGSZ_VER3;
5031 else if (id->driver_data & DEV_HAS_STATISTICS_V1)
5032 np->register_size = NV_PCI_REGSZ_VER2;
5034 np->register_size = NV_PCI_REGSZ_VER1;
5038 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
5039 dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n",
5040 pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i),
5041 pci_resource_len(pci_dev, i),
5042 pci_resource_flags(pci_dev, i));
5043 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
5044 pci_resource_len(pci_dev, i) >= np->register_size) {
5045 addr = pci_resource_start(pci_dev, i);
5049 if (i == DEVICE_COUNT_RESOURCE) {
5050 printk(KERN_INFO "forcedeth: Couldn't find register window for device %s.\n",
5055 /* copy of driver data */
5056 np->driver_data = id->driver_data;
5058 /* handle different descriptor versions */
5059 if (id->driver_data & DEV_HAS_HIGH_DMA) {
5060 /* packet format 3: supports 40-bit addressing */
5061 np->desc_ver = DESC_VER_3;
5062 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
5064 if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) {
5065 printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
5068 dev->features |= NETIF_F_HIGHDMA;
5069 printk(KERN_INFO "forcedeth: using HIGHDMA\n");
5071 if (pci_set_consistent_dma_mask(pci_dev, DMA_39BIT_MASK)) {
5072 printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed, using 32-bit ring buffers for device %s.\n",
5076 } else if (id->driver_data & DEV_HAS_LARGEDESC) {
5077 /* packet format 2: supports jumbo frames */
5078 np->desc_ver = DESC_VER_2;
5079 np->txrxctl_bits = NVREG_TXRXCTL_DESC_2;
5081 /* original packet format */
5082 np->desc_ver = DESC_VER_1;
5083 np->txrxctl_bits = NVREG_TXRXCTL_DESC_1;
5086 np->pkt_limit = NV_PKTLIMIT_1;
5087 if (id->driver_data & DEV_HAS_LARGEDESC)
5088 np->pkt_limit = NV_PKTLIMIT_2;
5090 if (id->driver_data & DEV_HAS_CHECKSUM) {
5092 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
5093 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
5094 dev->features |= NETIF_F_TSO;
5097 np->vlanctl_bits = 0;
5098 if (id->driver_data & DEV_HAS_VLAN) {
5099 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
5100 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
5101 dev->vlan_rx_register = nv_vlan_rx_register;
5105 if ((id->driver_data & DEV_HAS_MSI) && msi) {
5106 np->msi_flags |= NV_MSI_CAPABLE;
5108 if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
5109 np->msi_flags |= NV_MSI_X_CAPABLE;
5112 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
5113 if (id->driver_data & DEV_HAS_PAUSEFRAME_TX) {
5114 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
5119 np->base = ioremap(addr, np->register_size);
5122 dev->base_addr = (unsigned long)np->base;
5124 dev->irq = pci_dev->irq;
5126 np->rx_ring_size = RX_RING_DEFAULT;
5127 np->tx_ring_size = TX_RING_DEFAULT;
5129 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
5130 np->rx_ring.orig = pci_alloc_consistent(pci_dev,
5131 sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
5133 if (!np->rx_ring.orig)
5135 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
5137 np->rx_ring.ex = pci_alloc_consistent(pci_dev,
5138 sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
5140 if (!np->rx_ring.ex)
5142 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
5144 np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
5145 np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
5146 if (!np->rx_skb || !np->tx_skb)
5149 dev->open = nv_open;
5150 dev->stop = nv_close;
5151 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
5152 dev->hard_start_xmit = nv_start_xmit;
5154 dev->hard_start_xmit = nv_start_xmit_optimized;
5155 dev->get_stats = nv_get_stats;
5156 dev->change_mtu = nv_change_mtu;
5157 dev->set_mac_address = nv_set_mac_address;
5158 dev->set_multicast_list = nv_set_multicast;
5159 #ifdef CONFIG_NET_POLL_CONTROLLER
5160 dev->poll_controller = nv_poll_controller;
5162 #ifdef CONFIG_FORCEDETH_NAPI
5163 netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
5165 SET_ETHTOOL_OPS(dev, &ops);
5166 dev->tx_timeout = nv_tx_timeout;
5167 dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
5169 pci_set_drvdata(pci_dev, dev);
5171 /* read the mac address */
5172 base = get_hwbase(dev);
5173 np->orig_mac[0] = readl(base + NvRegMacAddrA);
5174 np->orig_mac[1] = readl(base + NvRegMacAddrB);
5176 /* check the workaround bit for correct mac address order */
5177 txreg = readl(base + NvRegTransmitPoll);
5178 if ((txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) ||
5179 (id->driver_data & DEV_HAS_CORRECT_MACADDR)) {
5180 /* mac address is already in correct order */
5181 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
5182 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
5183 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
5184 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
5185 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
5186 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
5188 /* need to reverse mac address to correct order */
5189 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
5190 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
5191 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
5192 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
5193 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
5194 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
5195 /* set permanent address to be correct aswell */
5196 np->orig_mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
5197 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
5198 np->orig_mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
5199 writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
5201 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
5203 if (!is_valid_ether_addr(dev->perm_addr)) {
5205 * Bad mac address. At least one bios sets the mac address
5206 * to 01:23:45:67:89:ab
5208 printk(KERN_ERR "%s: Invalid Mac address detected: %s\n",
5209 pci_name(pci_dev), print_mac(mac, dev->dev_addr));
5210 printk(KERN_ERR "Please complain to your hardware vendor. Switching to a random MAC.\n");
5211 dev->dev_addr[0] = 0x00;
5212 dev->dev_addr[1] = 0x00;
5213 dev->dev_addr[2] = 0x6c;
5214 get_random_bytes(&dev->dev_addr[3], 3);
5217 dprintk(KERN_DEBUG "%s: MAC Address %s\n",
5218 pci_name(pci_dev), print_mac(mac, dev->dev_addr));
5220 /* set mac address */
5221 nv_copy_mac_to_hw(dev);
5224 writel(0, base + NvRegWakeUpFlags);
5227 if (id->driver_data & DEV_HAS_POWER_CNTRL) {
5229 /* take phy and nic out of low power mode */
5230 powerstate = readl(base + NvRegPowerState2);
5231 powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
5232 if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 ||
5233 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) &&
5234 pci_dev->revision >= 0xA3)
5235 powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
5236 writel(powerstate, base + NvRegPowerState2);
5239 if (np->desc_ver == DESC_VER_1) {
5240 np->tx_flags = NV_TX_VALID;
5242 np->tx_flags = NV_TX2_VALID;
5244 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) {
5245 np->irqmask = NVREG_IRQMASK_THROUGHPUT;
5246 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5247 np->msi_flags |= 0x0003;
5249 np->irqmask = NVREG_IRQMASK_CPU;
5250 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5251 np->msi_flags |= 0x0001;
5254 if (id->driver_data & DEV_NEED_TIMERIRQ)
5255 np->irqmask |= NVREG_IRQ_TIMER;
5256 if (id->driver_data & DEV_NEED_LINKTIMER) {
5257 dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev));
5258 np->need_linktimer = 1;
5259 np->link_timeout = jiffies + LINK_TIMEOUT;
5261 dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev));
5262 np->need_linktimer = 0;
5265 /* clear phy state and temporarily halt phy interrupts */
5266 writel(0, base + NvRegMIIMask);
5267 phystate = readl(base + NvRegAdapterControl);
5268 if (phystate & NVREG_ADAPTCTL_RUNNING) {
5270 phystate &= ~NVREG_ADAPTCTL_RUNNING;
5271 writel(phystate, base + NvRegAdapterControl);
5273 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
5275 if (id->driver_data & DEV_HAS_MGMT_UNIT) {
5276 /* management unit running on the mac? */
5277 if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) {
5278 np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST;
5279 dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n", pci_name(pci_dev), np->mac_in_use);
5280 for (i = 0; i < 5000; i++) {
5282 if (nv_mgmt_acquire_sema(dev)) {
5283 /* management unit setup the phy already? */
5284 if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
5285 NVREG_XMITCTL_SYNC_PHY_INIT) {
5286 /* phy is inited by mgmt unit */
5288 dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev));
5290 /* we need to init the phy */
5298 /* find a suitable phy */
5299 for (i = 1; i <= 32; i++) {
5301 int phyaddr = i & 0x1F;
5303 spin_lock_irq(&np->lock);
5304 id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ);
5305 spin_unlock_irq(&np->lock);
5306 if (id1 < 0 || id1 == 0xffff)
5308 spin_lock_irq(&np->lock);
5309 id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ);
5310 spin_unlock_irq(&np->lock);
5311 if (id2 < 0 || id2 == 0xffff)
5314 np->phy_model = id2 & PHYID2_MODEL_MASK;
5315 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
5316 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
5317 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
5318 pci_name(pci_dev), id1, id2, phyaddr);
5319 np->phyaddr = phyaddr;
5320 np->phy_oui = id1 | id2;
5324 printk(KERN_INFO "%s: open: Could not find a valid PHY.\n",
5329 if (!phyinitialized) {
5333 /* see if it is a gigabit phy */
5334 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
5335 if (mii_status & PHY_GIGABIT) {
5336 np->gigabit = PHY_GIGABIT;
5340 /* set default link speed settings */
5341 np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
5345 err = register_netdev(dev);
5347 printk(KERN_INFO "forcedeth: unable to register netdev: %d\n", err);
5350 printk(KERN_INFO "%s: forcedeth.c: subsystem: %05x:%04x bound to %s\n",
5351 dev->name, pci_dev->subsystem_vendor, pci_dev->subsystem_device,
5358 writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
5359 pci_set_drvdata(pci_dev, NULL);
5363 iounmap(get_hwbase(dev));
5365 pci_release_regions(pci_dev);
5367 pci_disable_device(pci_dev);
5374 static void __devexit nv_remove(struct pci_dev *pci_dev)
5376 struct net_device *dev = pci_get_drvdata(pci_dev);
5377 struct fe_priv *np = netdev_priv(dev);
5378 u8 __iomem *base = get_hwbase(dev);
5380 unregister_netdev(dev);
5382 /* special op: write back the misordered MAC address - otherwise
5383 * the next nv_probe would see a wrong address.
5385 writel(np->orig_mac[0], base + NvRegMacAddrA);
5386 writel(np->orig_mac[1], base + NvRegMacAddrB);
5388 /* free all structures */
5390 iounmap(get_hwbase(dev));
5391 pci_release_regions(pci_dev);
5392 pci_disable_device(pci_dev);
5394 pci_set_drvdata(pci_dev, NULL);
5398 static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
5400 struct net_device *dev = pci_get_drvdata(pdev);
5401 struct fe_priv *np = netdev_priv(dev);
5403 if (!netif_running(dev))
5406 netif_device_detach(dev);
5411 pci_save_state(pdev);
5412 pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled);
5413 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5418 static int nv_resume(struct pci_dev *pdev)
5420 struct net_device *dev = pci_get_drvdata(pdev);
5423 if (!netif_running(dev))
5426 netif_device_attach(dev);
5428 pci_set_power_state(pdev, PCI_D0);
5429 pci_restore_state(pdev);
5430 pci_enable_wake(pdev, PCI_D0, 0);
5437 #define nv_suspend NULL
5438 #define nv_resume NULL
5439 #endif /* CONFIG_PM */
5441 static struct pci_device_id pci_tbl[] = {
5442 { /* nForce Ethernet Controller */
5443 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1),
5444 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
5446 { /* nForce2 Ethernet Controller */
5447 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_2),
5448 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
5450 { /* nForce3 Ethernet Controller */
5451 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_3),
5452 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
5454 { /* nForce3 Ethernet Controller */
5455 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4),
5456 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
5458 { /* nForce3 Ethernet Controller */
5459 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5),
5460 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
5462 { /* nForce3 Ethernet Controller */
5463 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6),
5464 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
5466 { /* nForce3 Ethernet Controller */
5467 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7),
5468 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
5470 { /* CK804 Ethernet Controller */
5471 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8),
5472 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
5474 { /* CK804 Ethernet Controller */
5475 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9),
5476 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
5478 { /* MCP04 Ethernet Controller */
5479 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10),
5480 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
5482 { /* MCP04 Ethernet Controller */
5483 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11),
5484 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
5486 { /* MCP51 Ethernet Controller */
5487 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12),
5488 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1,
5490 { /* MCP51 Ethernet Controller */
5491 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13),
5492 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1,
5494 { /* MCP55 Ethernet Controller */
5495 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
5496 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5498 { /* MCP55 Ethernet Controller */
5499 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
5500 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5502 { /* MCP61 Ethernet Controller */
5503 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16),
5504 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5506 { /* MCP61 Ethernet Controller */
5507 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17),
5508 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5510 { /* MCP61 Ethernet Controller */
5511 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18),
5512 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5514 { /* MCP61 Ethernet Controller */
5515 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19),
5516 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5518 { /* MCP65 Ethernet Controller */
5519 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20),
5520 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5522 { /* MCP65 Ethernet Controller */
5523 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21),
5524 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5526 { /* MCP65 Ethernet Controller */
5527 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22),
5528 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5530 { /* MCP65 Ethernet Controller */
5531 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23),
5532 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5534 { /* MCP67 Ethernet Controller */
5535 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24),
5536 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5538 { /* MCP67 Ethernet Controller */
5539 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25),
5540 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5542 { /* MCP67 Ethernet Controller */
5543 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26),
5544 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5546 { /* MCP67 Ethernet Controller */
5547 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27),
5548 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5550 { /* MCP73 Ethernet Controller */
5551 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_28),
5552 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5554 { /* MCP73 Ethernet Controller */
5555 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_29),
5556 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5558 { /* MCP73 Ethernet Controller */
5559 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_30),
5560 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5562 { /* MCP73 Ethernet Controller */
5563 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31),
5564 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5569 static struct pci_driver driver = {
5570 .name = "forcedeth",
5571 .id_table = pci_tbl,
5573 .remove = __devexit_p(nv_remove),
5574 .suspend = nv_suspend,
5575 .resume = nv_resume,
5578 static int __init init_nic(void)
5580 printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION);
5581 return pci_register_driver(&driver);
5584 static void __exit exit_nic(void)
5586 pci_unregister_driver(&driver);
5589 module_param(max_interrupt_work, int, 0);
5590 MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
5591 module_param(optimization_mode, int, 0);
5592 MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer.");
5593 module_param(poll_interval, int, 0);
5594 MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
5595 module_param(msi, int, 0);
5596 MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
5597 module_param(msix, int, 0);
5598 MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
5599 module_param(dma_64bit, int, 0);
5600 MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
5602 MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
5603 MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
5604 MODULE_LICENSE("GPL");
5606 MODULE_DEVICE_TABLE(pci, pci_tbl);
5608 module_init(init_nic);
5609 module_exit(exit_nic);