1 /* starfire.c: Linux device driver for the Adaptec Starfire network adapter. */
3 Written 1998-2000 by Donald Becker.
5 Current maintainer is Ion Badulescu <ionut ta badula tod org>. Please
6 send all bug reports to me, and not to Donald Becker, as this code
7 has been heavily modified from Donald's original version.
9 This software may be used and distributed according to the terms of
10 the GNU General Public License (GPL), incorporated herein by reference.
11 Drivers based on or derived from this code fall under the GPL and must
12 retain the authorship, copyright and license notice. This file is not
13 a complete program and may only be used when the entire operating
14 system is licensed under the GPL.
16 The information below comes from Donald Becker's original driver:
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
23 Support and updates available at
24 http://www.scyld.com/network/starfire.html
26 -----------------------------------------------------------
28 Linux kernel-specific changes:
31 - Use PCI driver interface
36 - Merge Becker version 0.15
38 LK1.1.3 (Andrew Morton)
42 - Merge Becker version 1.03
44 LK1.2.1 (Ion Badulescu <ionut@cs.columbia.edu>)
45 - Support hardware Rx/Tx checksumming
46 - Use the GFP firmware taken from Adaptec's Netware driver
48 LK1.2.2 (Ion Badulescu)
51 LK1.2.3 (Ion Badulescu)
52 - Fix the flaky mdio interface
53 - More compat clean-ups
55 LK1.2.4 (Ion Badulescu)
56 - More 2.2.x initialization fixes
58 LK1.2.5 (Ion Badulescu)
59 - Several fixes from Manfred Spraul
61 LK1.2.6 (Ion Badulescu)
62 - Fixed ifup/ifdown/ifup problem in 2.4.x
64 LK1.2.7 (Ion Badulescu)
66 - Made more functions static and __init
68 LK1.2.8 (Ion Badulescu)
69 - Quell bogus error messages, inform about the Tx threshold
70 - Removed #ifdef CONFIG_PCI, this driver is PCI only
72 LK1.2.9 (Ion Badulescu)
73 - Merged Jeff Garzik's changes from 2.4.4-pre5
74 - Added 2.2.x compatibility stuff required by the above changes
76 LK1.2.9a (Ion Badulescu)
77 - More updates from Jeff Garzik
79 LK1.3.0 (Ion Badulescu)
80 - Merged zerocopy support
82 LK1.3.1 (Ion Badulescu)
83 - Added ethtool support
84 - Added GPIO (media change) interrupt support
86 LK1.3.2 (Ion Badulescu)
87 - Fixed 2.2.x compatibility issues introduced in 1.3.1
88 - Fixed ethtool ioctl returning uninitialized memory
90 LK1.3.3 (Ion Badulescu)
91 - Initialize the TxMode register properly
92 - Don't dereference dev->priv after freeing it
94 LK1.3.4 (Ion Badulescu)
95 - Fixed initialization timing problems
96 - Fixed interrupt mask definitions
99 - ethtool NWAY_RST, GLINK, [GS]MSGLVL support
102 - Sparc64 support and fixes (Ion Badulescu)
103 - Better stats and error handling (Ion Badulescu)
104 - Use new pci_set_mwi() PCI API function (jgarzik)
106 LK1.3.7 (Ion Badulescu)
107 - minimal implementation of tx_timeout()
108 - correctly shutdown the Rx/Tx engines in netdev_close()
109 - added calls to netif_carrier_on/off
110 (patch from Stefan Rompf <srompf@isg.de>)
113 LK1.3.8 (Ion Badulescu)
114 - adjust DMA burst size on sparc64
116 - reworked zerocopy support for 64-bit buffers
117 - working and usable interrupt mitigation/latency
118 - reduced Tx interrupt frequency for lower interrupt overhead
120 LK1.3.9 (Ion Badulescu)
121 - bugfix for mcast filter
122 - enable the right kind of Tx interrupts (TxDMADone, not TxDone)
124 LK1.4.0 (Ion Badulescu)
127 LK1.4.1 (Ion Badulescu)
128 - flush PCI posting buffers after disabling Rx interrupts
129 - put the chip to a D3 slumber on driver unload
130 - added config option to enable/disable NAPI
132 LK1.4.2 (Ion Badulescu)
133 - finally added firmware (GPL'ed by Adaptec)
134 - removed compatibility code for 2.2.x
136 LK1.4.2.1 (Ion Badulescu)
137 - fixed 32/64 bit issues on i386 + CONFIG_HIGHMEM
138 - added 32-bit padding to outgoing skb's, removed previous workaround
140 TODO: - fix forced speed/duplexing code (broken a long time ago, when
141 somebody converted the driver to use the generic MII code)
145 #define DRV_NAME "starfire"
146 #define DRV_VERSION "1.03+LK1.4.2.1"
147 #define DRV_RELDATE "October 3, 2005"
149 #include <linux/module.h>
150 #include <linux/kernel.h>
151 #include <linux/pci.h>
152 #include <linux/netdevice.h>
153 #include <linux/etherdevice.h>
154 #include <linux/init.h>
155 #include <linux/delay.h>
156 #include <linux/crc32.h>
157 #include <linux/ethtool.h>
158 #include <linux/mii.h>
159 #include <linux/if_vlan.h>
160 #include <asm/processor.h> /* Processor type for cache alignment. */
161 #include <asm/uaccess.h>
164 #include "starfire_firmware.h"
166 * The current frame processor firmware fails to checksum a fragment
167 * of length 1. If and when this is fixed, the #define below can be removed.
169 #define HAS_BROKEN_FIRMWARE
172 * If using the broken firmware, data must be padded to the next 32-bit boundary.
174 #ifdef HAS_BROKEN_FIRMWARE
175 #define PADDING_MASK 3
179 * Define this if using the driver with the zero-copy patch
183 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
187 #ifndef CONFIG_ADAPTEC_STARFIRE_NAPI
188 #undef HAVE_NETDEV_POLL
191 /* The user-configurable values.
192 These may be modified when a driver module is loaded.*/
194 /* Used for tuning interrupt latency vs. overhead. */
195 static int intr_latency;
196 static int small_frames;
198 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
199 static int max_interrupt_work = 20;
201 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
202 The Starfire has a 512 element hash table based on the Ethernet CRC. */
203 static const int multicast_filter_limit = 512;
204 /* Whether to do TCP/UDP checksums in hardware */
205 static int enable_hw_cksum = 1;
207 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
209 * Set the copy breakpoint for the copy-only-tiny-frames scheme.
210 * Setting to > 1518 effectively disables this feature.
213 * The ia64 doesn't allow for unaligned loads even of integers being
214 * misaligned on a 2 byte boundary. Thus always force copying of
215 * packets as the starfire doesn't allow for misaligned DMAs ;-(
218 * The Alpha and the Sparc don't like unaligned loads, either. On Sparc64,
219 * at least, having unaligned frames leads to a rather serious performance
222 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
223 static int rx_copybreak = PKT_BUF_SZ;
225 static int rx_copybreak /* = 0 */;
228 /* PCI DMA burst size -- on sparc64 we want to force it to 64 bytes, on the others the default of 128 is fine. */
230 #define DMA_BURST_SIZE 64
232 #define DMA_BURST_SIZE 128
235 /* Used to pass the media type, etc.
236 Both 'options[]' and 'full_duplex[]' exist for driver interoperability.
237 The media type is usually passed in 'options[]'.
238 These variables are deprecated, use ethtool instead. -Ion
240 #define MAX_UNITS 8 /* More are supported, limit only on options */
241 static int options[MAX_UNITS] = {0, };
242 static int full_duplex[MAX_UNITS] = {0, };
244 /* Operational parameters that are set at compile time. */
246 /* The "native" ring sizes are either 256 or 2048.
247 However in some modes a descriptor may be marked to wrap the ring earlier.
249 #define RX_RING_SIZE 256
250 #define TX_RING_SIZE 32
251 /* The completion queues are fixed at 1024 entries i.e. 4K or 8KB. */
252 #define DONE_Q_SIZE 1024
253 /* All queues must be aligned on a 256-byte boundary */
254 #define QUEUE_ALIGN 256
256 #if RX_RING_SIZE > 256
257 #define RX_Q_ENTRIES Rx2048QEntries
259 #define RX_Q_ENTRIES Rx256QEntries
262 /* Operational parameters that usually are not changed. */
263 /* Time in jiffies before concluding the transmitter is hung. */
264 #define TX_TIMEOUT (2 * HZ)
268 * We need a much better method to determine if dma_addr_t is 64-bit.
270 #if (defined(__i386__) && defined(CONFIG_HIGHMEM64G)) || defined(__x86_64__) || defined (__ia64__) || defined(__mips64__) || (defined(__mips__) && defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR))
271 /* 64-bit dma_addr_t */
272 #define ADDR_64BITS /* This chip uses 64 bit addresses. */
273 #define netdrv_addr_t u64
274 #define cpu_to_dma(x) cpu_to_le64(x)
275 #define dma_to_cpu(x) le64_to_cpu(x)
276 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
277 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
278 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
279 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
280 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
281 #else /* 32-bit dma_addr_t */
282 #define netdrv_addr_t u32
283 #define cpu_to_dma(x) cpu_to_le32(x)
284 #define dma_to_cpu(x) le32_to_cpu(x)
285 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
286 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
287 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
288 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
289 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
292 #define skb_first_frag_len(skb) skb_headlen(skb)
293 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
295 #ifdef HAVE_NETDEV_POLL
296 #define init_poll(dev) \
298 dev->poll = &netdev_poll; \
299 dev->weight = max_interrupt_work; \
301 #define netdev_rx(dev, ioaddr) \
304 if (netif_rx_schedule_prep(dev)) { \
305 __netif_rx_schedule(dev); \
306 intr_enable = readl(ioaddr + IntrEnable); \
307 intr_enable &= ~(IntrRxDone | IntrRxEmpty); \
308 writel(intr_enable, ioaddr + IntrEnable); \
309 readl(ioaddr + IntrEnable); /* flush PCI posting buffers */ \
311 /* Paranoia check */ \
312 intr_enable = readl(ioaddr + IntrEnable); \
313 if (intr_enable & (IntrRxDone | IntrRxEmpty)) { \
314 printk(KERN_INFO "%s: interrupt while in polling mode!\n", dev->name); \
315 intr_enable &= ~(IntrRxDone | IntrRxEmpty); \
316 writel(intr_enable, ioaddr + IntrEnable); \
320 #define netdev_receive_skb(skb) netif_receive_skb(skb)
321 #define vlan_netdev_receive_skb(skb, vlgrp, vlid) vlan_hwaccel_receive_skb(skb, vlgrp, vlid)
322 static int netdev_poll(struct net_device *dev, int *budget);
323 #else /* not HAVE_NETDEV_POLL */
324 #define init_poll(dev)
325 #define netdev_receive_skb(skb) netif_rx(skb)
326 #define vlan_netdev_receive_skb(skb, vlgrp, vlid) vlan_hwaccel_rx(skb, vlgrp, vlid)
327 #define netdev_rx(dev, ioaddr) \
329 int quota = np->dirty_rx + RX_RING_SIZE - np->cur_rx; \
330 __netdev_rx(dev, "a);\
332 #endif /* not HAVE_NETDEV_POLL */
333 /* end of compatibility code */
336 /* These identify the driver base version and may not be removed. */
337 static const char version[] __devinitdata =
338 KERN_INFO "starfire.c:v1.03 7/26/2000 Written by Donald Becker <becker@scyld.com>\n"
339 KERN_INFO " (unofficial 2.2/2.4 kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
341 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
342 MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
343 MODULE_LICENSE("GPL");
344 MODULE_VERSION(DRV_VERSION);
346 module_param(max_interrupt_work, int, 0);
347 module_param(mtu, int, 0);
348 module_param(debug, int, 0);
349 module_param(rx_copybreak, int, 0);
350 module_param(intr_latency, int, 0);
351 module_param(small_frames, int, 0);
352 module_param_array(options, int, NULL, 0);
353 module_param_array(full_duplex, int, NULL, 0);
354 module_param(enable_hw_cksum, int, 0);
355 MODULE_PARM_DESC(max_interrupt_work, "Maximum events handled per interrupt");
356 MODULE_PARM_DESC(mtu, "MTU (all boards)");
357 MODULE_PARM_DESC(debug, "Debug level (0-6)");
358 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
359 MODULE_PARM_DESC(intr_latency, "Maximum interrupt latency, in microseconds");
360 MODULE_PARM_DESC(small_frames, "Maximum size of receive frames that bypass interrupt latency (0,64,128,256,512)");
361 MODULE_PARM_DESC(options, "Deprecated: Bits 0-3: media type, bit 17: full duplex");
362 MODULE_PARM_DESC(full_duplex, "Deprecated: Forced full-duplex setting (0/1)");
363 MODULE_PARM_DESC(enable_hw_cksum, "Enable/disable hardware cksum support (0/1)");
368 I. Board Compatibility
370 This driver is for the Adaptec 6915 "Starfire" 64 bit PCI Ethernet adapter.
372 II. Board-specific settings
374 III. Driver operation
378 The Starfire hardware uses multiple fixed-size descriptor queues/rings. The
379 ring sizes are set fixed by the hardware, but may optionally be wrapped
380 earlier by the END bit in the descriptor.
381 This driver uses that hardware queue size for the Rx ring, where a large
382 number of entries has no ill effect beyond increases the potential backlog.
383 The Tx ring is wrapped with the END bit, since a large hardware Tx queue
384 disables the queue layer priority ordering and we have no mechanism to
385 utilize the hardware two-level priority queue. When modifying the
386 RX/TX_RING_SIZE pay close attention to page sizes and the ring-empty warning
389 IIIb/c. Transmit/Receive Structure
391 See the Adaptec manual for the many possible structures, and options for
392 each structure. There are far too many to document all of them here.
394 For transmit this driver uses type 0/1 transmit descriptors (depending
395 on the 32/64 bitness of the architecture), and relies on automatic
396 minimum-length padding. It does not use the completion queue
397 consumer index, but instead checks for non-zero status entries.
399 For receive this driver uses type 2/3 receive descriptors. The driver
400 allocates full frame size skbuffs for the Rx ring buffers, so all frames
401 should fit in a single descriptor. The driver does not use the completion
402 queue consumer index, but instead checks for non-zero status entries.
404 When an incoming frame is less than RX_COPYBREAK bytes long, a fresh skbuff
405 is allocated and the frame is copied to the new skbuff. When the incoming
406 frame is larger, the skbuff is passed directly up the protocol stack.
407 Buffers consumed this way are replaced by newly allocated skbuffs in a later
410 A notable aspect of operation is that unaligned buffers are not permitted by
411 the Starfire hardware. Thus the IP header at offset 14 in an ethernet frame
412 isn't longword aligned, which may cause problems on some machine
413 e.g. Alphas and IA64. For these architectures, the driver is forced to copy
414 the frame into a new skbuff unconditionally. Copied frames are put into the
415 skbuff at an offset of "+2", thus 16-byte aligning the IP header.
417 IIId. Synchronization
419 The driver runs as two independent, single-threaded flows of control. One
420 is the send-packet routine, which enforces single-threaded use by the
421 dev->tbusy flag. The other thread is the interrupt handler, which is single
422 threaded by the hardware and interrupt handling software.
424 The send packet thread has partial control over the Tx ring and the netif_queue
425 status. If the number of free Tx slots in the ring falls below a certain number
426 (currently hardcoded to 4), it signals the upper layer to stop the queue.
428 The interrupt handler has exclusive control over the Rx ring and records stats
429 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
430 empty by incrementing the dirty_tx mark. Iff the netif_queue is stopped and the
431 number of free Tx slow is above the threshold, it signals the upper layer to
438 The Adaptec Starfire manuals, available only from Adaptec.
439 http://www.scyld.com/expert/100mbps.html
440 http://www.scyld.com/expert/NWay.html
444 - StopOnPerr is broken, don't enable
445 - Hardware ethernet padding exposes random data, perform software padding
446 instead (unverified -- works correctly for all the hardware I have)
452 enum chip_capability_flags {CanHaveMII=1, };
458 static struct pci_device_id starfire_pci_tbl[] = {
459 { 0x9004, 0x6915, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_6915 },
462 MODULE_DEVICE_TABLE(pci, starfire_pci_tbl);
464 /* A chip capabilities table, matching the CH_xxx entries in xxx_pci_tbl[] above. */
465 static const struct chip_info {
468 } netdrv_tbl[] __devinitdata = {
469 { "Adaptec Starfire 6915", CanHaveMII },
473 /* Offsets to the device registers.
474 Unlike software-only systems, device drivers interact with complex hardware.
475 It's not useful to define symbolic names for every register bit in the
476 device. The name can only partially document the semantics and make
477 the driver longer and more difficult to read.
478 In general, only the important configuration values or bits changed
479 multiple times should be defined symbolically.
481 enum register_offsets {
482 PCIDeviceConfig=0x50040, GenCtrl=0x50070, IntrTimerCtrl=0x50074,
483 IntrClear=0x50080, IntrStatus=0x50084, IntrEnable=0x50088,
484 MIICtrl=0x52000, TxStationAddr=0x50120, EEPROMCtrl=0x51000,
485 GPIOCtrl=0x5008C, TxDescCtrl=0x50090,
486 TxRingPtr=0x50098, HiPriTxRingPtr=0x50094, /* Low and High priority. */
487 TxRingHiAddr=0x5009C, /* 64 bit address extension. */
488 TxProducerIdx=0x500A0, TxConsumerIdx=0x500A4,
490 CompletionHiAddr=0x500B4, TxCompletionAddr=0x500B8,
491 RxCompletionAddr=0x500BC, RxCompletionQ2Addr=0x500C0,
492 CompletionQConsumerIdx=0x500C4, RxDMACtrl=0x500D0,
493 RxDescQCtrl=0x500D4, RxDescQHiAddr=0x500DC, RxDescQAddr=0x500E0,
494 RxDescQIdx=0x500E8, RxDMAStatus=0x500F0, RxFilterMode=0x500F4,
495 TxMode=0x55000, VlanType=0x55064,
496 PerfFilterTable=0x56000, HashTable=0x56100,
497 TxGfpMem=0x58000, RxGfpMem=0x5a000,
501 * Bits in the interrupt status/mask registers.
502 * Warning: setting Intr[Ab]NormalSummary in the IntrEnable register
503 * enables all the interrupt sources that are or'ed into those status bits.
505 enum intr_status_bits {
506 IntrLinkChange=0xf0000000, IntrStatsMax=0x08000000,
507 IntrAbnormalSummary=0x02000000, IntrGeneralTimer=0x01000000,
508 IntrSoftware=0x800000, IntrRxComplQ1Low=0x400000,
509 IntrTxComplQLow=0x200000, IntrPCI=0x100000,
510 IntrDMAErr=0x080000, IntrTxDataLow=0x040000,
511 IntrRxComplQ2Low=0x020000, IntrRxDescQ1Low=0x010000,
512 IntrNormalSummary=0x8000, IntrTxDone=0x4000,
513 IntrTxDMADone=0x2000, IntrTxEmpty=0x1000,
514 IntrEarlyRxQ2=0x0800, IntrEarlyRxQ1=0x0400,
515 IntrRxQ2Done=0x0200, IntrRxQ1Done=0x0100,
516 IntrRxGFPDead=0x80, IntrRxDescQ2Low=0x40,
517 IntrNoTxCsum=0x20, IntrTxBadID=0x10,
518 IntrHiPriTxBadID=0x08, IntrRxGfp=0x04,
519 IntrTxGfp=0x02, IntrPCIPad=0x01,
521 IntrRxDone=IntrRxQ2Done | IntrRxQ1Done,
522 IntrRxEmpty=IntrRxDescQ1Low | IntrRxDescQ2Low,
523 IntrNormalMask=0xff00, IntrAbnormalMask=0x3ff00fe,
526 /* Bits in the RxFilterMode register. */
528 AcceptBroadcast=0x04, AcceptAllMulticast=0x02, AcceptAll=0x01,
529 AcceptMulticast=0x10, PerfectFilter=0x40, HashFilter=0x30,
530 PerfectFilterVlan=0x80, MinVLANPrio=0xE000, VlanMode=0x0200,
534 /* Bits in the TxMode register */
536 MiiSoftReset=0x8000, MIILoopback=0x4000,
537 TxFlowEnable=0x0800, RxFlowEnable=0x0400,
538 PadEnable=0x04, FullDuplex=0x02, HugeFrame=0x01,
541 /* Bits in the TxDescCtrl register. */
543 TxDescSpaceUnlim=0x00, TxDescSpace32=0x10, TxDescSpace64=0x20,
544 TxDescSpace128=0x30, TxDescSpace256=0x40,
545 TxDescType0=0x00, TxDescType1=0x01, TxDescType2=0x02,
546 TxDescType3=0x03, TxDescType4=0x04,
547 TxNoDMACompletion=0x08,
548 TxDescQAddr64bit=0x80, TxDescQAddr32bit=0,
549 TxHiPriFIFOThreshShift=24, TxPadLenShift=16,
550 TxDMABurstSizeShift=8,
553 /* Bits in the RxDescQCtrl register. */
555 RxBufferLenShift=16, RxMinDescrThreshShift=0,
556 RxPrefetchMode=0x8000, RxVariableQ=0x2000,
557 Rx2048QEntries=0x4000, Rx256QEntries=0,
558 RxDescAddr64bit=0x1000, RxDescAddr32bit=0,
559 RxDescQAddr64bit=0x0100, RxDescQAddr32bit=0,
560 RxDescSpace4=0x000, RxDescSpace8=0x100,
561 RxDescSpace16=0x200, RxDescSpace32=0x300,
562 RxDescSpace64=0x400, RxDescSpace128=0x500,
566 /* Bits in the RxDMACtrl register. */
567 enum rx_dmactrl_bits {
568 RxReportBadFrames=0x80000000, RxDMAShortFrames=0x40000000,
569 RxDMABadFrames=0x20000000, RxDMACrcErrorFrames=0x10000000,
570 RxDMAControlFrame=0x08000000, RxDMAPauseFrame=0x04000000,
571 RxChecksumIgnore=0, RxChecksumRejectTCPUDP=0x02000000,
572 RxChecksumRejectTCPOnly=0x01000000,
573 RxCompletionQ2Enable=0x800000,
574 RxDMAQ2Disable=0, RxDMAQ2FPOnly=0x100000,
575 RxDMAQ2SmallPkt=0x200000, RxDMAQ2HighPrio=0x300000,
576 RxDMAQ2NonIP=0x400000,
577 RxUseBackupQueue=0x080000, RxDMACRC=0x040000,
578 RxEarlyIntThreshShift=12, RxHighPrioThreshShift=8,
582 /* Bits in the RxCompletionAddr register */
584 RxComplQAddr64bit=0x80, RxComplQAddr32bit=0,
585 RxComplProducerWrEn=0x40,
586 RxComplType0=0x00, RxComplType1=0x10,
587 RxComplType2=0x20, RxComplType3=0x30,
588 RxComplThreshShift=0,
591 /* Bits in the TxCompletionAddr register */
593 TxComplQAddr64bit=0x80, TxComplQAddr32bit=0,
594 TxComplProducerWrEn=0x40,
595 TxComplIntrStatus=0x20,
596 CommonQueueMode=0x10,
597 TxComplThreshShift=0,
600 /* Bits in the GenCtrl register */
602 RxEnable=0x05, TxEnable=0x0a,
603 RxGFPEnable=0x10, TxGFPEnable=0x20,
606 /* Bits in the IntrTimerCtrl register */
607 enum intr_ctrl_bits {
608 Timer10X=0x800, EnableIntrMasking=0x60, SmallFrameBypass=0x100,
609 SmallFrame64=0, SmallFrame128=0x200, SmallFrame256=0x400, SmallFrame512=0x600,
610 IntrLatencyMask=0x1f,
613 /* The Rx and Tx buffer descriptors. */
614 struct starfire_rx_desc {
618 RxDescValid=1, RxDescEndRing=2,
621 /* Completion queue entry. */
622 struct short_rx_done_desc {
623 u32 status; /* Low 16 bits is length. */
625 struct basic_rx_done_desc {
626 u32 status; /* Low 16 bits is length. */
630 struct csum_rx_done_desc {
631 u32 status; /* Low 16 bits is length. */
632 u16 csum; /* Partial checksum */
635 struct full_rx_done_desc {
636 u32 status; /* Low 16 bits is length. */
640 u16 csum; /* partial checksum */
643 /* XXX: this is ugly and I'm not sure it's worth the trouble -Ion */
645 typedef struct full_rx_done_desc rx_done_desc;
646 #define RxComplType RxComplType3
647 #else /* not VLAN_SUPPORT */
648 typedef struct csum_rx_done_desc rx_done_desc;
649 #define RxComplType RxComplType2
650 #endif /* not VLAN_SUPPORT */
653 RxOK=0x20000000, RxFIFOErr=0x10000000, RxBufQ2=0x08000000,
656 /* Type 1 Tx descriptor. */
657 struct starfire_tx_desc_1 {
658 u32 status; /* Upper bits are status, lower 16 length. */
662 /* Type 2 Tx descriptor. */
663 struct starfire_tx_desc_2 {
664 u32 status; /* Upper bits are status, lower 16 length. */
670 typedef struct starfire_tx_desc_2 starfire_tx_desc;
671 #define TX_DESC_TYPE TxDescType2
672 #else /* not ADDR_64BITS */
673 typedef struct starfire_tx_desc_1 starfire_tx_desc;
674 #define TX_DESC_TYPE TxDescType1
675 #endif /* not ADDR_64BITS */
676 #define TX_DESC_SPACING TxDescSpaceUnlim
680 TxCRCEn=0x01000000, TxDescIntr=0x08000000,
681 TxRingWrap=0x04000000, TxCalTCP=0x02000000,
683 struct tx_done_desc {
684 u32 status; /* timestamp, index. */
686 u32 intrstatus; /* interrupt status */
690 struct rx_ring_info {
694 struct tx_ring_info {
697 unsigned int used_slots;
701 struct netdev_private {
702 /* Descriptor rings first for alignment. */
703 struct starfire_rx_desc *rx_ring;
704 starfire_tx_desc *tx_ring;
705 dma_addr_t rx_ring_dma;
706 dma_addr_t tx_ring_dma;
707 /* The addresses of rx/tx-in-place skbuffs. */
708 struct rx_ring_info rx_info[RX_RING_SIZE];
709 struct tx_ring_info tx_info[TX_RING_SIZE];
710 /* Pointers to completion queues (full pages). */
711 rx_done_desc *rx_done_q;
712 dma_addr_t rx_done_q_dma;
713 unsigned int rx_done;
714 struct tx_done_desc *tx_done_q;
715 dma_addr_t tx_done_q_dma;
716 unsigned int tx_done;
717 struct net_device_stats stats;
718 struct pci_dev *pci_dev;
720 struct vlan_group *vlgrp;
723 dma_addr_t queue_mem_dma;
724 size_t queue_mem_size;
726 /* Frequently used values: keep some adjacent for cache effect. */
728 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
729 unsigned int cur_tx, dirty_tx, reap_tx;
730 unsigned int rx_buf_sz; /* Based on MTU+slack. */
731 /* These values keep track of the transceiver/media in use. */
732 int speed100; /* Set if speed == 100MBit. */
736 /* MII transceiver section. */
737 struct mii_if_info mii_if; /* MII lib hooks/info */
738 int phy_cnt; /* MII device addresses. */
739 unsigned char phys[PHY_CNT]; /* MII device addresses. */
744 static int mdio_read(struct net_device *dev, int phy_id, int location);
745 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
746 static int netdev_open(struct net_device *dev);
747 static void check_duplex(struct net_device *dev);
748 static void tx_timeout(struct net_device *dev);
749 static void init_ring(struct net_device *dev);
750 static int start_tx(struct sk_buff *skb, struct net_device *dev);
751 static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
752 static void netdev_error(struct net_device *dev, int intr_status);
753 static int __netdev_rx(struct net_device *dev, int *quota);
754 static void refill_rx_ring(struct net_device *dev);
755 static void netdev_error(struct net_device *dev, int intr_status);
756 static void set_rx_mode(struct net_device *dev);
757 static struct net_device_stats *get_stats(struct net_device *dev);
758 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
759 static int netdev_close(struct net_device *dev);
760 static void netdev_media_change(struct net_device *dev);
761 static struct ethtool_ops ethtool_ops;
765 static void netdev_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
767 struct netdev_private *np = netdev_priv(dev);
769 spin_lock(&np->lock);
771 printk("%s: Setting vlgrp to %p\n", dev->name, grp);
774 spin_unlock(&np->lock);
777 static void netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
779 struct netdev_private *np = netdev_priv(dev);
781 spin_lock(&np->lock);
783 printk("%s: Adding vlanid %d to vlan filter\n", dev->name, vid);
785 spin_unlock(&np->lock);
788 static void netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
790 struct netdev_private *np = netdev_priv(dev);
792 spin_lock(&np->lock);
794 printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid);
796 np->vlgrp->vlan_devices[vid] = NULL;
798 spin_unlock(&np->lock);
800 #endif /* VLAN_SUPPORT */
803 static int __devinit starfire_init_one(struct pci_dev *pdev,
804 const struct pci_device_id *ent)
806 struct netdev_private *np;
807 int i, irq, option, chip_idx = ent->driver_data;
808 struct net_device *dev;
809 static int card_idx = -1;
812 int drv_flags, io_size;
815 /* when built into the kernel, we only print version if device is found */
817 static int printed_version;
818 if (!printed_version++)
824 if (pci_enable_device (pdev))
827 ioaddr = pci_resource_start(pdev, 0);
828 io_size = pci_resource_len(pdev, 0);
829 if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) {
830 printk(KERN_ERR DRV_NAME " %d: no PCI MEM resources, aborting\n", card_idx);
834 dev = alloc_etherdev(sizeof(*np));
836 printk(KERN_ERR DRV_NAME " %d: cannot alloc etherdev, aborting\n", card_idx);
839 SET_MODULE_OWNER(dev);
840 SET_NETDEV_DEV(dev, &pdev->dev);
844 if (pci_request_regions (pdev, DRV_NAME)) {
845 printk(KERN_ERR DRV_NAME " %d: cannot reserve PCI resources, aborting\n", card_idx);
846 goto err_out_free_netdev;
849 /* ioremap is borken in Linux-2.2.x/sparc64 */
850 base = ioremap(ioaddr, io_size);
852 printk(KERN_ERR DRV_NAME " %d: cannot remap %#x @ %#lx, aborting\n",
853 card_idx, io_size, ioaddr);
854 goto err_out_free_res;
857 pci_set_master(pdev);
859 /* enable MWI -- it vastly improves Rx performance on sparc64 */
863 /* Starfire can do TCP/UDP checksumming */
865 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
866 #endif /* ZEROCOPY */
868 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
869 dev->vlan_rx_register = netdev_vlan_rx_register;
870 dev->vlan_rx_add_vid = netdev_vlan_rx_add_vid;
871 dev->vlan_rx_kill_vid = netdev_vlan_rx_kill_vid;
872 #endif /* VLAN_RX_KILL_VID */
874 dev->features |= NETIF_F_HIGHDMA;
875 #endif /* ADDR_64BITS */
877 /* Serial EEPROM reads are hidden by the hardware. */
878 for (i = 0; i < 6; i++)
879 dev->dev_addr[i] = readb(base + EEPROMCtrl + 20 - i);
881 #if ! defined(final_version) /* Dump the EEPROM contents during development. */
883 for (i = 0; i < 0x20; i++)
885 (unsigned int)readb(base + EEPROMCtrl + i),
886 i % 16 != 15 ? " " : "\n");
889 /* Issue soft reset */
890 writel(MiiSoftReset, base + TxMode);
892 writel(0, base + TxMode);
894 /* Reset the chip to erase previous misconfiguration. */
895 writel(1, base + PCIDeviceConfig);
897 while (--boguscnt > 0) {
899 if ((readl(base + PCIDeviceConfig) & 1) == 0)
903 printk("%s: chipset reset never completed!\n", dev->name);
904 /* wait a little longer */
907 dev->base_addr = (unsigned long)base;
910 np = netdev_priv(dev);
912 spin_lock_init(&np->lock);
913 pci_set_drvdata(pdev, dev);
917 np->mii_if.dev = dev;
918 np->mii_if.mdio_read = mdio_read;
919 np->mii_if.mdio_write = mdio_write;
920 np->mii_if.phy_id_mask = 0x1f;
921 np->mii_if.reg_num_mask = 0x1f;
923 drv_flags = netdrv_tbl[chip_idx].drv_flags;
925 option = card_idx < MAX_UNITS ? options[card_idx] : 0;
927 option = dev->mem_start;
929 /* The lower four bits are the media type. */
931 np->mii_if.full_duplex = 1;
933 if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
934 np->mii_if.full_duplex = 1;
936 if (np->mii_if.full_duplex)
937 np->mii_if.force_media = 1;
939 np->mii_if.force_media = 0;
942 /* timer resolution is 128 * 0.8us */
943 np->intr_timer_ctrl = (((intr_latency * 10) / 1024) & IntrLatencyMask) |
944 Timer10X | EnableIntrMasking;
946 if (small_frames > 0) {
947 np->intr_timer_ctrl |= SmallFrameBypass;
948 switch (small_frames) {
950 np->intr_timer_ctrl |= SmallFrame64;
953 np->intr_timer_ctrl |= SmallFrame128;
956 np->intr_timer_ctrl |= SmallFrame256;
959 np->intr_timer_ctrl |= SmallFrame512;
960 if (small_frames > 512)
961 printk("Adjusting small_frames down to 512\n");
966 /* The chip-specific entries in the device structure. */
967 dev->open = &netdev_open;
968 dev->hard_start_xmit = &start_tx;
969 dev->tx_timeout = tx_timeout;
970 dev->watchdog_timeo = TX_TIMEOUT;
972 dev->stop = &netdev_close;
973 dev->get_stats = &get_stats;
974 dev->set_multicast_list = &set_rx_mode;
975 dev->do_ioctl = &netdev_ioctl;
976 SET_ETHTOOL_OPS(dev, ðtool_ops);
981 if (register_netdev(dev))
982 goto err_out_cleardev;
984 printk(KERN_INFO "%s: %s at %p, ",
985 dev->name, netdrv_tbl[chip_idx].name, base);
986 for (i = 0; i < 5; i++)
987 printk("%2.2x:", dev->dev_addr[i]);
988 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
990 if (drv_flags & CanHaveMII) {
991 int phy, phy_idx = 0;
993 for (phy = 0; phy < 32 && phy_idx < PHY_CNT; phy++) {
994 mdio_write(dev, phy, MII_BMCR, BMCR_RESET);
997 while (--boguscnt > 0)
998 if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0)
1000 if (boguscnt == 0) {
1001 printk("%s: PHY#%d reset never completed!\n", dev->name, phy);
1004 mii_status = mdio_read(dev, phy, MII_BMSR);
1005 if (mii_status != 0) {
1006 np->phys[phy_idx++] = phy;
1007 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
1008 printk(KERN_INFO "%s: MII PHY found at address %d, status "
1009 "%#4.4x advertising %#4.4x.\n",
1010 dev->name, phy, mii_status, np->mii_if.advertising);
1011 /* there can be only one PHY on-board */
1015 np->phy_cnt = phy_idx;
1016 if (np->phy_cnt > 0)
1017 np->mii_if.phy_id = np->phys[0];
1019 memset(&np->mii_if, 0, sizeof(np->mii_if));
1022 printk(KERN_INFO "%s: scatter-gather and hardware TCP cksumming %s.\n",
1023 dev->name, enable_hw_cksum ? "enabled" : "disabled");
1027 pci_set_drvdata(pdev, NULL);
1030 pci_release_regions (pdev);
1031 err_out_free_netdev:
1037 /* Read the MII Management Data I/O (MDIO) interfaces. */
1038 static int mdio_read(struct net_device *dev, int phy_id, int location)
1040 struct netdev_private *np = netdev_priv(dev);
1041 void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
1042 int result, boguscnt=1000;
1043 /* ??? Should we add a busy-wait here? */
1045 result = readl(mdio_addr);
1046 while ((result & 0xC0000000) != 0x80000000 && --boguscnt > 0);
1049 if ((result & 0xffff) == 0xffff)
1051 return result & 0xffff;
1055 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
1057 struct netdev_private *np = netdev_priv(dev);
1058 void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
1059 writel(value, mdio_addr);
1060 /* The busy-wait will occur before a read. */
1064 static int netdev_open(struct net_device *dev)
1066 struct netdev_private *np = netdev_priv(dev);
1067 void __iomem *ioaddr = np->base;
1069 size_t tx_done_q_size, rx_done_q_size, tx_ring_size, rx_ring_size;
1071 /* Do we ever need to reset the chip??? */
1073 retval = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
1077 /* Disable the Rx and Tx, and reset the chip. */
1078 writel(0, ioaddr + GenCtrl);
1079 writel(1, ioaddr + PCIDeviceConfig);
1081 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
1082 dev->name, dev->irq);
1084 /* Allocate the various queues. */
1085 if (np->queue_mem == 0) {
1086 tx_done_q_size = ((sizeof(struct tx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
1087 rx_done_q_size = ((sizeof(rx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
1088 tx_ring_size = ((sizeof(starfire_tx_desc) * TX_RING_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
1089 rx_ring_size = sizeof(struct starfire_rx_desc) * RX_RING_SIZE;
1090 np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size;
1091 np->queue_mem = pci_alloc_consistent(np->pci_dev, np->queue_mem_size, &np->queue_mem_dma);
1092 if (np->queue_mem == NULL) {
1093 free_irq(dev->irq, dev);
1097 np->tx_done_q = np->queue_mem;
1098 np->tx_done_q_dma = np->queue_mem_dma;
1099 np->rx_done_q = (void *) np->tx_done_q + tx_done_q_size;
1100 np->rx_done_q_dma = np->tx_done_q_dma + tx_done_q_size;
1101 np->tx_ring = (void *) np->rx_done_q + rx_done_q_size;
1102 np->tx_ring_dma = np->rx_done_q_dma + rx_done_q_size;
1103 np->rx_ring = (void *) np->tx_ring + tx_ring_size;
1104 np->rx_ring_dma = np->tx_ring_dma + tx_ring_size;
1107 /* Start with no carrier, it gets adjusted later */
1108 netif_carrier_off(dev);
1110 /* Set the size of the Rx buffers. */
1111 writel((np->rx_buf_sz << RxBufferLenShift) |
1112 (0 << RxMinDescrThreshShift) |
1113 RxPrefetchMode | RxVariableQ |
1115 RX_DESC_Q_ADDR_SIZE | RX_DESC_ADDR_SIZE |
1117 ioaddr + RxDescQCtrl);
1119 /* Set up the Rx DMA controller. */
1120 writel(RxChecksumIgnore |
1121 (0 << RxEarlyIntThreshShift) |
1122 (6 << RxHighPrioThreshShift) |
1123 ((DMA_BURST_SIZE / 32) << RxBurstSizeShift),
1124 ioaddr + RxDMACtrl);
1126 /* Set Tx descriptor */
1127 writel((2 << TxHiPriFIFOThreshShift) |
1128 (0 << TxPadLenShift) |
1129 ((DMA_BURST_SIZE / 32) << TxDMABurstSizeShift) |
1130 TX_DESC_Q_ADDR_SIZE |
1131 TX_DESC_SPACING | TX_DESC_TYPE,
1132 ioaddr + TxDescCtrl);
1134 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + RxDescQHiAddr);
1135 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + TxRingHiAddr);
1136 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + CompletionHiAddr);
1137 writel(np->rx_ring_dma, ioaddr + RxDescQAddr);
1138 writel(np->tx_ring_dma, ioaddr + TxRingPtr);
1140 writel(np->tx_done_q_dma, ioaddr + TxCompletionAddr);
1141 writel(np->rx_done_q_dma |
1143 (0 << RxComplThreshShift),
1144 ioaddr + RxCompletionAddr);
1147 printk(KERN_DEBUG "%s: Filling in the station address.\n", dev->name);
1149 /* Fill both the Tx SA register and the Rx perfect filter. */
1150 for (i = 0; i < 6; i++)
1151 writeb(dev->dev_addr[i], ioaddr + TxStationAddr + 5 - i);
1152 /* The first entry is special because it bypasses the VLAN filter.
1154 writew(0, ioaddr + PerfFilterTable);
1155 writew(0, ioaddr + PerfFilterTable + 4);
1156 writew(0, ioaddr + PerfFilterTable + 8);
1157 for (i = 1; i < 16; i++) {
1158 u16 *eaddrs = (u16 *)dev->dev_addr;
1159 void __iomem *setup_frm = ioaddr + PerfFilterTable + i * 16;
1160 writew(cpu_to_be16(eaddrs[2]), setup_frm); setup_frm += 4;
1161 writew(cpu_to_be16(eaddrs[1]), setup_frm); setup_frm += 4;
1162 writew(cpu_to_be16(eaddrs[0]), setup_frm); setup_frm += 8;
1165 /* Initialize other registers. */
1166 /* Configure the PCI bus bursts and FIFO thresholds. */
1167 np->tx_mode = TxFlowEnable|RxFlowEnable|PadEnable; /* modified when link is up. */
1168 writel(MiiSoftReset | np->tx_mode, ioaddr + TxMode);
1170 writel(np->tx_mode, ioaddr + TxMode);
1171 np->tx_threshold = 4;
1172 writel(np->tx_threshold, ioaddr + TxThreshold);
1174 writel(np->intr_timer_ctrl, ioaddr + IntrTimerCtrl);
1176 netif_start_queue(dev);
1179 printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name);
1182 np->mii_if.advertising = mdio_read(dev, np->phys[0], MII_ADVERTISE);
1185 /* Enable GPIO interrupts on link change */
1186 writel(0x0f00ff00, ioaddr + GPIOCtrl);
1188 /* Set the interrupt mask */
1189 writel(IntrRxDone | IntrRxEmpty | IntrDMAErr |
1190 IntrTxDMADone | IntrStatsMax | IntrLinkChange |
1191 IntrRxGFPDead | IntrNoTxCsum | IntrTxBadID,
1192 ioaddr + IntrEnable);
1193 /* Enable PCI interrupts. */
1194 writel(0x00800000 | readl(ioaddr + PCIDeviceConfig),
1195 ioaddr + PCIDeviceConfig);
1198 /* Set VLAN type to 802.1q */
1199 writel(ETH_P_8021Q, ioaddr + VlanType);
1200 #endif /* VLAN_SUPPORT */
1202 /* Load Rx/Tx firmware into the frame processors */
1203 for (i = 0; i < FIRMWARE_RX_SIZE * 2; i++)
1204 writel(firmware_rx[i], ioaddr + RxGfpMem + i * 4);
1205 for (i = 0; i < FIRMWARE_TX_SIZE * 2; i++)
1206 writel(firmware_tx[i], ioaddr + TxGfpMem + i * 4);
1207 if (enable_hw_cksum)
1208 /* Enable the Rx and Tx units, and the Rx/Tx frame processors. */
1209 writel(TxEnable|TxGFPEnable|RxEnable|RxGFPEnable, ioaddr + GenCtrl);
1211 /* Enable the Rx and Tx units only. */
1212 writel(TxEnable|RxEnable, ioaddr + GenCtrl);
1215 printk(KERN_DEBUG "%s: Done netdev_open().\n",
1222 static void check_duplex(struct net_device *dev)
1224 struct netdev_private *np = netdev_priv(dev);
1226 int silly_count = 1000;
1228 mdio_write(dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising);
1229 mdio_write(dev, np->phys[0], MII_BMCR, BMCR_RESET);
1231 while (--silly_count && mdio_read(dev, np->phys[0], MII_BMCR) & BMCR_RESET)
1234 printk("%s: MII reset failed!\n", dev->name);
1238 reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1240 if (!np->mii_if.force_media) {
1241 reg0 |= BMCR_ANENABLE | BMCR_ANRESTART;
1243 reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
1245 reg0 |= BMCR_SPEED100;
1246 if (np->mii_if.full_duplex)
1247 reg0 |= BMCR_FULLDPLX;
1248 printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n",
1250 np->speed100 ? "100" : "10",
1251 np->mii_if.full_duplex ? "full" : "half");
1253 mdio_write(dev, np->phys[0], MII_BMCR, reg0);
1257 static void tx_timeout(struct net_device *dev)
1259 struct netdev_private *np = netdev_priv(dev);
1260 void __iomem *ioaddr = np->base;
1263 printk(KERN_WARNING "%s: Transmit timed out, status %#8.8x, "
1264 "resetting...\n", dev->name, (int) readl(ioaddr + IntrStatus));
1266 /* Perhaps we should reinitialize the hardware here. */
1269 * Stop and restart the interface.
1270 * Cheat and increase the debug level temporarily.
1278 /* Trigger an immediate transmit demand. */
1280 dev->trans_start = jiffies;
1281 np->stats.tx_errors++;
1282 netif_wake_queue(dev);
1286 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1287 static void init_ring(struct net_device *dev)
1289 struct netdev_private *np = netdev_priv(dev);
1292 np->cur_rx = np->cur_tx = np->reap_tx = 0;
1293 np->dirty_rx = np->dirty_tx = np->rx_done = np->tx_done = 0;
1295 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1297 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1298 for (i = 0; i < RX_RING_SIZE; i++) {
1299 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
1300 np->rx_info[i].skb = skb;
1303 np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1304 skb->dev = dev; /* Mark as being used by this device. */
1305 /* Grrr, we cannot offset to correctly align the IP header. */
1306 np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
1308 writew(i - 1, np->base + RxDescQIdx);
1309 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1311 /* Clear the remainder of the Rx buffer ring. */
1312 for ( ; i < RX_RING_SIZE; i++) {
1313 np->rx_ring[i].rxaddr = 0;
1314 np->rx_info[i].skb = NULL;
1315 np->rx_info[i].mapping = 0;
1317 /* Mark the last entry as wrapping the ring. */
1318 np->rx_ring[RX_RING_SIZE - 1].rxaddr |= cpu_to_dma(RxDescEndRing);
1320 /* Clear the completion rings. */
1321 for (i = 0; i < DONE_Q_SIZE; i++) {
1322 np->rx_done_q[i].status = 0;
1323 np->tx_done_q[i].status = 0;
1326 for (i = 0; i < TX_RING_SIZE; i++)
1327 memset(&np->tx_info[i], 0, sizeof(np->tx_info[i]));
1333 static int start_tx(struct sk_buff *skb, struct net_device *dev)
1335 struct netdev_private *np = netdev_priv(dev);
1341 * be cautious here, wrapping the queue has weird semantics
1342 * and we may not have enough slots even when it seems we do.
1344 if ((np->cur_tx - np->dirty_tx) + skb_num_frags(skb) * 2 > TX_RING_SIZE) {
1345 netif_stop_queue(dev);
1349 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
1350 if (skb->ip_summed == CHECKSUM_HW) {
1351 if (skb_padto(skb, (skb->len + PADDING_MASK) & ~PADDING_MASK))
1352 return NETDEV_TX_OK;
1354 #endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
1356 entry = np->cur_tx % TX_RING_SIZE;
1357 for (i = 0; i < skb_num_frags(skb); i++) {
1362 np->tx_info[entry].skb = skb;
1364 if (entry >= TX_RING_SIZE - skb_num_frags(skb)) {
1365 status |= TxRingWrap;
1369 status |= TxDescIntr;
1372 if (skb->ip_summed == CHECKSUM_HW) {
1374 np->stats.tx_compressed++;
1376 status |= skb_first_frag_len(skb) | (skb_num_frags(skb) << 16);
1378 np->tx_info[entry].mapping =
1379 pci_map_single(np->pci_dev, skb->data, skb_first_frag_len(skb), PCI_DMA_TODEVICE);
1381 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1];
1382 status |= this_frag->size;
1383 np->tx_info[entry].mapping =
1384 pci_map_single(np->pci_dev, page_address(this_frag->page) + this_frag->page_offset, this_frag->size, PCI_DMA_TODEVICE);
1387 np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
1388 np->tx_ring[entry].status = cpu_to_le32(status);
1390 printk(KERN_DEBUG "%s: Tx #%d/#%d slot %d status %#8.8x.\n",
1391 dev->name, np->cur_tx, np->dirty_tx,
1394 np->tx_info[entry].used_slots = TX_RING_SIZE - entry;
1395 np->cur_tx += np->tx_info[entry].used_slots;
1398 np->tx_info[entry].used_slots = 1;
1399 np->cur_tx += np->tx_info[entry].used_slots;
1402 /* scavenge the tx descriptors twice per TX_RING_SIZE */
1403 if (np->cur_tx % (TX_RING_SIZE / 2) == 0)
1407 /* Non-x86: explicitly flush descriptor cache lines here. */
1408 /* Ensure all descriptors are written back before the transmit is
1412 /* Update the producer index. */
1413 writel(entry * (sizeof(starfire_tx_desc) / 8), np->base + TxProducerIdx);
1415 /* 4 is arbitrary, but should be ok */
1416 if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE)
1417 netif_stop_queue(dev);
1419 dev->trans_start = jiffies;
1425 /* The interrupt handler does all of the Rx thread work and cleans up
1426 after the Tx thread. */
1427 static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
1429 struct net_device *dev = dev_instance;
1430 struct netdev_private *np = netdev_priv(dev);
1431 void __iomem *ioaddr = np->base;
1432 int boguscnt = max_interrupt_work;
1438 u32 intr_status = readl(ioaddr + IntrClear);
1441 printk(KERN_DEBUG "%s: Interrupt status %#8.8x.\n",
1442 dev->name, intr_status);
1444 if (intr_status == 0 || intr_status == (u32) -1)
1449 if (intr_status & (IntrRxDone | IntrRxEmpty))
1450 netdev_rx(dev, ioaddr);
1452 /* Scavenge the skbuff list based on the Tx-done queue.
1453 There are redundant checks here that may be cleaned up
1454 after the driver has proven to be reliable. */
1455 consumer = readl(ioaddr + TxConsumerIdx);
1457 printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n",
1458 dev->name, consumer);
1460 while ((tx_status = le32_to_cpu(np->tx_done_q[np->tx_done].status)) != 0) {
1462 printk(KERN_DEBUG "%s: Tx completion #%d entry %d is %#8.8x.\n",
1463 dev->name, np->dirty_tx, np->tx_done, tx_status);
1464 if ((tx_status & 0xe0000000) == 0xa0000000) {
1465 np->stats.tx_packets++;
1466 } else if ((tx_status & 0xe0000000) == 0x80000000) {
1467 u16 entry = (tx_status & 0x7fff) / sizeof(starfire_tx_desc);
1468 struct sk_buff *skb = np->tx_info[entry].skb;
1469 np->tx_info[entry].skb = NULL;
1470 pci_unmap_single(np->pci_dev,
1471 np->tx_info[entry].mapping,
1472 skb_first_frag_len(skb),
1474 np->tx_info[entry].mapping = 0;
1475 np->dirty_tx += np->tx_info[entry].used_slots;
1476 entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
1479 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1480 pci_unmap_single(np->pci_dev,
1481 np->tx_info[entry].mapping,
1482 skb_shinfo(skb)->frags[i].size,
1489 dev_kfree_skb_irq(skb);
1491 np->tx_done_q[np->tx_done].status = 0;
1492 np->tx_done = (np->tx_done + 1) % DONE_Q_SIZE;
1494 writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2);
1496 if (netif_queue_stopped(dev) &&
1497 (np->cur_tx - np->dirty_tx + 4 < TX_RING_SIZE)) {
1498 /* The ring is no longer full, wake the queue. */
1499 netif_wake_queue(dev);
1502 /* Stats overflow */
1503 if (intr_status & IntrStatsMax)
1506 /* Media change interrupt. */
1507 if (intr_status & IntrLinkChange)
1508 netdev_media_change(dev);
1510 /* Abnormal error summary/uncommon events handlers. */
1511 if (intr_status & IntrAbnormalSummary)
1512 netdev_error(dev, intr_status);
1514 if (--boguscnt < 0) {
1516 printk(KERN_WARNING "%s: Too much work at interrupt, "
1518 dev->name, intr_status);
1524 printk(KERN_DEBUG "%s: exiting interrupt, status=%#8.8x.\n",
1525 dev->name, (int) readl(ioaddr + IntrStatus));
1526 return IRQ_RETVAL(handled);
1530 /* This routine is logically part of the interrupt/poll handler, but separated
1531 for clarity, code sharing between NAPI/non-NAPI, and better register allocation. */
1532 static int __netdev_rx(struct net_device *dev, int *quota)
1534 struct netdev_private *np = netdev_priv(dev);
1538 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1539 while ((desc_status = le32_to_cpu(np->rx_done_q[np->rx_done].status)) != 0) {
1540 struct sk_buff *skb;
1543 rx_done_desc *desc = &np->rx_done_q[np->rx_done];
1546 printk(KERN_DEBUG " netdev_rx() status of %d was %#8.8x.\n", np->rx_done, desc_status);
1547 if (!(desc_status & RxOK)) {
1548 /* There was an error. */
1550 printk(KERN_DEBUG " netdev_rx() Rx error was %#8.8x.\n", desc_status);
1551 np->stats.rx_errors++;
1552 if (desc_status & RxFIFOErr)
1553 np->stats.rx_fifo_errors++;
1557 if (*quota <= 0) { /* out of rx quota */
1563 pkt_len = desc_status; /* Implicitly Truncate */
1564 entry = (desc_status >> 16) & 0x7ff;
1567 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d, quota %d.\n", pkt_len, *quota);
1568 /* Check if the packet is long enough to accept without copying
1569 to a minimally-sized skbuff. */
1570 if (pkt_len < rx_copybreak
1571 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1573 skb_reserve(skb, 2); /* 16 byte align the IP header */
1574 pci_dma_sync_single_for_cpu(np->pci_dev,
1575 np->rx_info[entry].mapping,
1576 pkt_len, PCI_DMA_FROMDEVICE);
1577 eth_copy_and_sum(skb, np->rx_info[entry].skb->data, pkt_len, 0);
1578 pci_dma_sync_single_for_device(np->pci_dev,
1579 np->rx_info[entry].mapping,
1580 pkt_len, PCI_DMA_FROMDEVICE);
1581 skb_put(skb, pkt_len);
1583 pci_unmap_single(np->pci_dev, np->rx_info[entry].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1584 skb = np->rx_info[entry].skb;
1585 skb_put(skb, pkt_len);
1586 np->rx_info[entry].skb = NULL;
1587 np->rx_info[entry].mapping = 0;
1589 #ifndef final_version /* Remove after testing. */
1590 /* You will want this info for the initial debug. */
1592 printk(KERN_DEBUG " Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
1593 "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x.\n",
1594 skb->data[0], skb->data[1], skb->data[2], skb->data[3],
1595 skb->data[4], skb->data[5], skb->data[6], skb->data[7],
1596 skb->data[8], skb->data[9], skb->data[10],
1597 skb->data[11], skb->data[12], skb->data[13]);
1600 skb->protocol = eth_type_trans(skb, dev);
1603 printk(KERN_DEBUG " netdev_rx() status2 of %d was %#4.4x.\n", np->rx_done, le16_to_cpu(desc->status2));
1605 if (le16_to_cpu(desc->status2) & 0x0100) {
1606 skb->ip_summed = CHECKSUM_UNNECESSARY;
1607 np->stats.rx_compressed++;
1610 * This feature doesn't seem to be working, at least
1611 * with the two firmware versions I have. If the GFP sees
1612 * an IP fragment, it either ignores it completely, or reports
1613 * "bad checksum" on it.
1615 * Maybe I missed something -- corrections are welcome.
1616 * Until then, the printk stays. :-) -Ion
1618 else if (le16_to_cpu(desc->status2) & 0x0040) {
1619 skb->ip_summed = CHECKSUM_HW;
1620 skb->csum = le16_to_cpu(desc->csum);
1621 printk(KERN_DEBUG "%s: checksum_hw, status2 = %#x\n", dev->name, le16_to_cpu(desc->status2));
1624 if (np->vlgrp && le16_to_cpu(desc->status2) & 0x0200) {
1626 printk(KERN_DEBUG " netdev_rx() vlanid = %d\n", le16_to_cpu(desc->vlanid));
1627 /* vlan_netdev_receive_skb() expects a packet with the VLAN tag stripped out */
1628 vlan_netdev_receive_skb(skb, np->vlgrp, le16_to_cpu(desc->vlanid) & VLAN_VID_MASK);
1630 #endif /* VLAN_SUPPORT */
1631 netdev_receive_skb(skb);
1632 dev->last_rx = jiffies;
1633 np->stats.rx_packets++;
1638 np->rx_done = (np->rx_done + 1) % DONE_Q_SIZE;
1640 writew(np->rx_done, np->base + CompletionQConsumerIdx);
1643 refill_rx_ring(dev);
1645 printk(KERN_DEBUG " exiting netdev_rx(): %d, status of %d was %#8.8x.\n",
1646 retcode, np->rx_done, desc_status);
1651 #ifdef HAVE_NETDEV_POLL
1652 static int netdev_poll(struct net_device *dev, int *budget)
1655 struct netdev_private *np = netdev_priv(dev);
1656 void __iomem *ioaddr = np->base;
1657 int retcode = 0, quota = dev->quota;
1660 writel(IntrRxDone | IntrRxEmpty, ioaddr + IntrClear);
1662 retcode = __netdev_rx(dev, "a);
1663 *budget -= (dev->quota - quota);
1668 intr_status = readl(ioaddr + IntrStatus);
1669 } while (intr_status & (IntrRxDone | IntrRxEmpty));
1671 netif_rx_complete(dev);
1672 intr_status = readl(ioaddr + IntrEnable);
1673 intr_status |= IntrRxDone | IntrRxEmpty;
1674 writel(intr_status, ioaddr + IntrEnable);
1678 printk(KERN_DEBUG " exiting netdev_poll(): %d.\n", retcode);
1680 /* Restart Rx engine if stopped. */
1683 #endif /* HAVE_NETDEV_POLL */
1686 static void refill_rx_ring(struct net_device *dev)
1688 struct netdev_private *np = netdev_priv(dev);
1689 struct sk_buff *skb;
1692 /* Refill the Rx ring buffers. */
1693 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1694 entry = np->dirty_rx % RX_RING_SIZE;
1695 if (np->rx_info[entry].skb == NULL) {
1696 skb = dev_alloc_skb(np->rx_buf_sz);
1697 np->rx_info[entry].skb = skb;
1699 break; /* Better luck next round. */
1700 np->rx_info[entry].mapping =
1701 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1702 skb->dev = dev; /* Mark as being used by this device. */
1703 np->rx_ring[entry].rxaddr =
1704 cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
1706 if (entry == RX_RING_SIZE - 1)
1707 np->rx_ring[entry].rxaddr |= cpu_to_dma(RxDescEndRing);
1710 writew(entry, np->base + RxDescQIdx);
1714 static void netdev_media_change(struct net_device *dev)
1716 struct netdev_private *np = netdev_priv(dev);
1717 void __iomem *ioaddr = np->base;
1718 u16 reg0, reg1, reg4, reg5;
1720 u32 new_intr_timer_ctrl;
1722 /* reset status first */
1723 mdio_read(dev, np->phys[0], MII_BMCR);
1724 mdio_read(dev, np->phys[0], MII_BMSR);
1726 reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1727 reg1 = mdio_read(dev, np->phys[0], MII_BMSR);
1729 if (reg1 & BMSR_LSTATUS) {
1731 if (reg0 & BMCR_ANENABLE) {
1732 /* autonegotiation is enabled */
1733 reg4 = mdio_read(dev, np->phys[0], MII_ADVERTISE);
1734 reg5 = mdio_read(dev, np->phys[0], MII_LPA);
1735 if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) {
1737 np->mii_if.full_duplex = 1;
1738 } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) {
1740 np->mii_if.full_duplex = 0;
1741 } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) {
1743 np->mii_if.full_duplex = 1;
1746 np->mii_if.full_duplex = 0;
1749 /* autonegotiation is disabled */
1750 if (reg0 & BMCR_SPEED100)
1754 if (reg0 & BMCR_FULLDPLX)
1755 np->mii_if.full_duplex = 1;
1757 np->mii_if.full_duplex = 0;
1759 netif_carrier_on(dev);
1760 printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n",
1762 np->speed100 ? "100" : "10",
1763 np->mii_if.full_duplex ? "full" : "half");
1765 new_tx_mode = np->tx_mode & ~FullDuplex; /* duplex setting */
1766 if (np->mii_if.full_duplex)
1767 new_tx_mode |= FullDuplex;
1768 if (np->tx_mode != new_tx_mode) {
1769 np->tx_mode = new_tx_mode;
1770 writel(np->tx_mode | MiiSoftReset, ioaddr + TxMode);
1772 writel(np->tx_mode, ioaddr + TxMode);
1775 new_intr_timer_ctrl = np->intr_timer_ctrl & ~Timer10X;
1777 new_intr_timer_ctrl |= Timer10X;
1778 if (np->intr_timer_ctrl != new_intr_timer_ctrl) {
1779 np->intr_timer_ctrl = new_intr_timer_ctrl;
1780 writel(new_intr_timer_ctrl, ioaddr + IntrTimerCtrl);
1783 netif_carrier_off(dev);
1784 printk(KERN_DEBUG "%s: Link is down\n", dev->name);
1789 static void netdev_error(struct net_device *dev, int intr_status)
1791 struct netdev_private *np = netdev_priv(dev);
1793 /* Came close to underrunning the Tx FIFO, increase threshold. */
1794 if (intr_status & IntrTxDataLow) {
1795 if (np->tx_threshold <= PKT_BUF_SZ / 16) {
1796 writel(++np->tx_threshold, np->base + TxThreshold);
1797 printk(KERN_NOTICE "%s: PCI bus congestion, increasing Tx FIFO threshold to %d bytes\n",
1798 dev->name, np->tx_threshold * 16);
1800 printk(KERN_WARNING "%s: PCI Tx underflow -- adapter is probably malfunctioning\n", dev->name);
1802 if (intr_status & IntrRxGFPDead) {
1803 np->stats.rx_fifo_errors++;
1804 np->stats.rx_errors++;
1806 if (intr_status & (IntrNoTxCsum | IntrDMAErr)) {
1807 np->stats.tx_fifo_errors++;
1808 np->stats.tx_errors++;
1810 if ((intr_status & ~(IntrNormalMask | IntrAbnormalSummary | IntrLinkChange | IntrStatsMax | IntrTxDataLow | IntrRxGFPDead | IntrNoTxCsum | IntrPCIPad)) && debug)
1811 printk(KERN_ERR "%s: Something Wicked happened! %#8.8x.\n",
1812 dev->name, intr_status);
1816 static struct net_device_stats *get_stats(struct net_device *dev)
1818 struct netdev_private *np = netdev_priv(dev);
1819 void __iomem *ioaddr = np->base;
1821 /* This adapter architecture needs no SMP locks. */
1822 np->stats.tx_bytes = readl(ioaddr + 0x57010);
1823 np->stats.rx_bytes = readl(ioaddr + 0x57044);
1824 np->stats.tx_packets = readl(ioaddr + 0x57000);
1825 np->stats.tx_aborted_errors =
1826 readl(ioaddr + 0x57024) + readl(ioaddr + 0x57028);
1827 np->stats.tx_window_errors = readl(ioaddr + 0x57018);
1828 np->stats.collisions =
1829 readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008);
1831 /* The chip only need report frame silently dropped. */
1832 np->stats.rx_dropped += readw(ioaddr + RxDMAStatus);
1833 writew(0, ioaddr + RxDMAStatus);
1834 np->stats.rx_crc_errors = readl(ioaddr + 0x5703C);
1835 np->stats.rx_frame_errors = readl(ioaddr + 0x57040);
1836 np->stats.rx_length_errors = readl(ioaddr + 0x57058);
1837 np->stats.rx_missed_errors = readl(ioaddr + 0x5707C);
1843 static void set_rx_mode(struct net_device *dev)
1845 struct netdev_private *np = netdev_priv(dev);
1846 void __iomem *ioaddr = np->base;
1847 u32 rx_mode = MinVLANPrio;
1848 struct dev_mc_list *mclist;
1852 rx_mode |= VlanMode;
1855 void __iomem *filter_addr = ioaddr + HashTable + 8;
1856 for (i = 0; i < VLAN_VID_MASK; i++) {
1857 if (np->vlgrp->vlan_devices[i]) {
1858 if (vlan_count >= 32)
1860 writew(cpu_to_be16(i), filter_addr);
1865 if (i == VLAN_VID_MASK) {
1866 rx_mode |= PerfectFilterVlan;
1867 while (vlan_count < 32) {
1868 writew(0, filter_addr);
1874 #endif /* VLAN_SUPPORT */
1876 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1877 rx_mode |= AcceptAll;
1878 } else if ((dev->mc_count > multicast_filter_limit)
1879 || (dev->flags & IFF_ALLMULTI)) {
1880 /* Too many to match, or accept all multicasts. */
1881 rx_mode |= AcceptBroadcast|AcceptAllMulticast|PerfectFilter;
1882 } else if (dev->mc_count <= 14) {
1883 /* Use the 16 element perfect filter, skip first two entries. */
1884 void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1886 for (i = 2, mclist = dev->mc_list; mclist && i < dev->mc_count + 2;
1887 i++, mclist = mclist->next) {
1888 eaddrs = (u16 *)mclist->dmi_addr;
1889 writew(cpu_to_be16(eaddrs[2]), filter_addr); filter_addr += 4;
1890 writew(cpu_to_be16(eaddrs[1]), filter_addr); filter_addr += 4;
1891 writew(cpu_to_be16(eaddrs[0]), filter_addr); filter_addr += 8;
1893 eaddrs = (u16 *)dev->dev_addr;
1895 writew(cpu_to_be16(eaddrs[0]), filter_addr); filter_addr += 4;
1896 writew(cpu_to_be16(eaddrs[1]), filter_addr); filter_addr += 4;
1897 writew(cpu_to_be16(eaddrs[2]), filter_addr); filter_addr += 8;
1899 rx_mode |= AcceptBroadcast|PerfectFilter;
1901 /* Must use a multicast hash table. */
1902 void __iomem *filter_addr;
1904 u16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */
1906 memset(mc_filter, 0, sizeof(mc_filter));
1907 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1908 i++, mclist = mclist->next) {
1909 /* The chip uses the upper 9 CRC bits
1910 as index into the hash table */
1911 int bit_nr = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 23;
1912 __u32 *fptr = (__u32 *) &mc_filter[(bit_nr >> 4) & ~1];
1914 *fptr |= cpu_to_le32(1 << (bit_nr & 31));
1916 /* Clear the perfect filter list, skip first two entries. */
1917 filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1918 eaddrs = (u16 *)dev->dev_addr;
1919 for (i = 2; i < 16; i++) {
1920 writew(cpu_to_be16(eaddrs[0]), filter_addr); filter_addr += 4;
1921 writew(cpu_to_be16(eaddrs[1]), filter_addr); filter_addr += 4;
1922 writew(cpu_to_be16(eaddrs[2]), filter_addr); filter_addr += 8;
1924 for (filter_addr = ioaddr + HashTable, i = 0; i < 32; filter_addr+= 16, i++)
1925 writew(mc_filter[i], filter_addr);
1926 rx_mode |= AcceptBroadcast|PerfectFilter|HashFilter;
1928 writel(rx_mode, ioaddr + RxFilterMode);
1931 static int check_if_running(struct net_device *dev)
1933 if (!netif_running(dev))
1938 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1940 struct netdev_private *np = netdev_priv(dev);
1941 strcpy(info->driver, DRV_NAME);
1942 strcpy(info->version, DRV_VERSION);
1943 strcpy(info->bus_info, pci_name(np->pci_dev));
1946 static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1948 struct netdev_private *np = netdev_priv(dev);
1949 spin_lock_irq(&np->lock);
1950 mii_ethtool_gset(&np->mii_if, ecmd);
1951 spin_unlock_irq(&np->lock);
1955 static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1957 struct netdev_private *np = netdev_priv(dev);
1959 spin_lock_irq(&np->lock);
1960 res = mii_ethtool_sset(&np->mii_if, ecmd);
1961 spin_unlock_irq(&np->lock);
1966 static int nway_reset(struct net_device *dev)
1968 struct netdev_private *np = netdev_priv(dev);
1969 return mii_nway_restart(&np->mii_if);
1972 static u32 get_link(struct net_device *dev)
1974 struct netdev_private *np = netdev_priv(dev);
1975 return mii_link_ok(&np->mii_if);
1978 static u32 get_msglevel(struct net_device *dev)
1983 static void set_msglevel(struct net_device *dev, u32 val)
1988 static struct ethtool_ops ethtool_ops = {
1989 .begin = check_if_running,
1990 .get_drvinfo = get_drvinfo,
1991 .get_settings = get_settings,
1992 .set_settings = set_settings,
1993 .nway_reset = nway_reset,
1994 .get_link = get_link,
1995 .get_msglevel = get_msglevel,
1996 .set_msglevel = set_msglevel,
1999 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2001 struct netdev_private *np = netdev_priv(dev);
2002 struct mii_ioctl_data *data = if_mii(rq);
2005 if (!netif_running(dev))
2008 spin_lock_irq(&np->lock);
2009 rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
2010 spin_unlock_irq(&np->lock);
2012 if ((cmd == SIOCSMIIREG) && (data->phy_id == np->phys[0]))
2018 static int netdev_close(struct net_device *dev)
2020 struct netdev_private *np = netdev_priv(dev);
2021 void __iomem *ioaddr = np->base;
2024 netif_stop_queue(dev);
2027 printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %#8.8x.\n",
2028 dev->name, (int) readl(ioaddr + IntrStatus));
2029 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
2030 dev->name, np->cur_tx, np->dirty_tx,
2031 np->cur_rx, np->dirty_rx);
2034 /* Disable interrupts by clearing the interrupt mask. */
2035 writel(0, ioaddr + IntrEnable);
2037 /* Stop the chip's Tx and Rx processes. */
2038 writel(0, ioaddr + GenCtrl);
2039 readl(ioaddr + GenCtrl);
2042 printk(KERN_DEBUG" Tx ring at %#llx:\n",
2043 (long long) np->tx_ring_dma);
2044 for (i = 0; i < 8 /* TX_RING_SIZE is huge! */; i++)
2045 printk(KERN_DEBUG " #%d desc. %#8.8x %#llx -> %#8.8x.\n",
2046 i, le32_to_cpu(np->tx_ring[i].status),
2047 (long long) dma_to_cpu(np->tx_ring[i].addr),
2048 le32_to_cpu(np->tx_done_q[i].status));
2049 printk(KERN_DEBUG " Rx ring at %#llx -> %p:\n",
2050 (long long) np->rx_ring_dma, np->rx_done_q);
2052 for (i = 0; i < 8 /* RX_RING_SIZE */; i++) {
2053 printk(KERN_DEBUG " #%d desc. %#llx -> %#8.8x\n",
2054 i, (long long) dma_to_cpu(np->rx_ring[i].rxaddr), le32_to_cpu(np->rx_done_q[i].status));
2058 free_irq(dev->irq, dev);
2060 /* Free all the skbuffs in the Rx queue. */
2061 for (i = 0; i < RX_RING_SIZE; i++) {
2062 np->rx_ring[i].rxaddr = cpu_to_dma(0xBADF00D0); /* An invalid address. */
2063 if (np->rx_info[i].skb != NULL) {
2064 pci_unmap_single(np->pci_dev, np->rx_info[i].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
2065 dev_kfree_skb(np->rx_info[i].skb);
2067 np->rx_info[i].skb = NULL;
2068 np->rx_info[i].mapping = 0;
2070 for (i = 0; i < TX_RING_SIZE; i++) {
2071 struct sk_buff *skb = np->tx_info[i].skb;
2074 pci_unmap_single(np->pci_dev,
2075 np->tx_info[i].mapping,
2076 skb_first_frag_len(skb), PCI_DMA_TODEVICE);
2077 np->tx_info[i].mapping = 0;
2079 np->tx_info[i].skb = NULL;
2086 static int starfire_suspend(struct pci_dev *pdev, pm_message_t state)
2088 struct net_device *dev = pci_get_drvdata(pdev);
2090 if (netif_running(dev)) {
2091 netif_device_detach(dev);
2095 pci_save_state(pdev);
2096 pci_set_power_state(pdev, pci_choose_state(pdev,state));
2101 static int starfire_resume(struct pci_dev *pdev)
2103 struct net_device *dev = pci_get_drvdata(pdev);
2105 pci_set_power_state(pdev, PCI_D0);
2106 pci_restore_state(pdev);
2108 if (netif_running(dev)) {
2110 netif_device_attach(dev);
2115 #endif /* CONFIG_PM */
2118 static void __devexit starfire_remove_one (struct pci_dev *pdev)
2120 struct net_device *dev = pci_get_drvdata(pdev);
2121 struct netdev_private *np = netdev_priv(dev);
2125 unregister_netdev(dev);
2128 pci_free_consistent(pdev, np->queue_mem_size, np->queue_mem, np->queue_mem_dma);
2131 /* XXX: add wakeup code -- requires firmware for MagicPacket */
2132 pci_set_power_state(pdev, PCI_D3hot); /* go to sleep in D3 mode */
2133 pci_disable_device(pdev);
2136 pci_release_regions(pdev);
2138 pci_set_drvdata(pdev, NULL);
2139 free_netdev(dev); /* Will also free np!! */
2143 static struct pci_driver starfire_driver = {
2145 .probe = starfire_init_one,
2146 .remove = __devexit_p(starfire_remove_one),
2148 .suspend = starfire_suspend,
2149 .resume = starfire_resume,
2150 #endif /* CONFIG_PM */
2151 .id_table = starfire_pci_tbl,
2155 static int __init starfire_init (void)
2157 /* when a module, this is printed whether or not devices are found in probe */
2160 #ifdef HAVE_NETDEV_POLL
2161 printk(KERN_INFO DRV_NAME ": polling (NAPI) enabled\n");
2163 printk(KERN_INFO DRV_NAME ": polling (NAPI) disabled\n");
2167 /* we can do this test only at run-time... sigh */
2168 if (sizeof(dma_addr_t) != sizeof(netdrv_addr_t)) {
2169 printk("This driver has dma_addr_t issues, please send email to maintainer\n");
2173 return pci_module_init (&starfire_driver);
2177 static void __exit starfire_cleanup (void)
2179 pci_unregister_driver (&starfire_driver);
2183 module_init(starfire_init);
2184 module_exit(starfire_cleanup);