1 /* starfire.c: Linux device driver for the Adaptec Starfire network adapter. */
3 Written 1998-2000 by Donald Becker.
5 Current maintainer is Ion Badulescu <ionut ta badula tod org>. Please
6 send all bug reports to me, and not to Donald Becker, as this code
7 has been heavily modified from Donald's original version.
9 This software may be used and distributed according to the terms of
10 the GNU General Public License (GPL), incorporated herein by reference.
11 Drivers based on or derived from this code fall under the GPL and must
12 retain the authorship, copyright and license notice. This file is not
13 a complete program and may only be used when the entire operating
14 system is licensed under the GPL.
16 The information below comes from Donald Becker's original driver:
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
23 Support and updates available at
24 http://www.scyld.com/network/starfire.html
26 -----------------------------------------------------------
28 Linux kernel-specific changes:
31 - Use PCI driver interface
36 - Merge Becker version 0.15
38 LK1.1.3 (Andrew Morton)
42 - Merge Becker version 1.03
44 LK1.2.1 (Ion Badulescu <ionut@cs.columbia.edu>)
45 - Support hardware Rx/Tx checksumming
46 - Use the GFP firmware taken from Adaptec's Netware driver
48 LK1.2.2 (Ion Badulescu)
51 LK1.2.3 (Ion Badulescu)
52 - Fix the flaky mdio interface
53 - More compat clean-ups
55 LK1.2.4 (Ion Badulescu)
56 - More 2.2.x initialization fixes
58 LK1.2.5 (Ion Badulescu)
59 - Several fixes from Manfred Spraul
61 LK1.2.6 (Ion Badulescu)
62 - Fixed ifup/ifdown/ifup problem in 2.4.x
64 LK1.2.7 (Ion Badulescu)
66 - Made more functions static and __init
68 LK1.2.8 (Ion Badulescu)
69 - Quell bogus error messages, inform about the Tx threshold
70 - Removed #ifdef CONFIG_PCI, this driver is PCI only
72 LK1.2.9 (Ion Badulescu)
73 - Merged Jeff Garzik's changes from 2.4.4-pre5
74 - Added 2.2.x compatibility stuff required by the above changes
76 LK1.2.9a (Ion Badulescu)
77 - More updates from Jeff Garzik
79 LK1.3.0 (Ion Badulescu)
80 - Merged zerocopy support
82 LK1.3.1 (Ion Badulescu)
83 - Added ethtool support
84 - Added GPIO (media change) interrupt support
86 LK1.3.2 (Ion Badulescu)
87 - Fixed 2.2.x compatibility issues introduced in 1.3.1
88 - Fixed ethtool ioctl returning uninitialized memory
90 LK1.3.3 (Ion Badulescu)
91 - Initialize the TxMode register properly
92 - Don't dereference dev->priv after freeing it
94 LK1.3.4 (Ion Badulescu)
95 - Fixed initialization timing problems
96 - Fixed interrupt mask definitions
99 - ethtool NWAY_RST, GLINK, [GS]MSGLVL support
102 - Sparc64 support and fixes (Ion Badulescu)
103 - Better stats and error handling (Ion Badulescu)
104 - Use new pci_set_mwi() PCI API function (jgarzik)
106 LK1.3.7 (Ion Badulescu)
107 - minimal implementation of tx_timeout()
108 - correctly shutdown the Rx/Tx engines in netdev_close()
109 - added calls to netif_carrier_on/off
110 (patch from Stefan Rompf <srompf@isg.de>)
113 LK1.3.8 (Ion Badulescu)
114 - adjust DMA burst size on sparc64
116 - reworked zerocopy support for 64-bit buffers
117 - working and usable interrupt mitigation/latency
118 - reduced Tx interrupt frequency for lower interrupt overhead
120 LK1.3.9 (Ion Badulescu)
121 - bugfix for mcast filter
122 - enable the right kind of Tx interrupts (TxDMADone, not TxDone)
124 LK1.4.0 (Ion Badulescu)
127 LK1.4.1 (Ion Badulescu)
128 - flush PCI posting buffers after disabling Rx interrupts
129 - put the chip to a D3 slumber on driver unload
130 - added config option to enable/disable NAPI
132 LK1.4.2 (Ion Badulescu)
133 - finally added firmware (GPL'ed by Adaptec)
134 - removed compatibility code for 2.2.x
136 TODO: - fix forced speed/duplexing code (broken a long time ago, when
137 somebody converted the driver to use the generic MII code)
141 #define DRV_NAME "starfire"
142 #define DRV_VERSION "1.03+LK1.4.2"
143 #define DRV_RELDATE "January 19, 2005"
145 #include <linux/config.h>
146 #include <linux/version.h>
147 #include <linux/module.h>
148 #include <linux/kernel.h>
149 #include <linux/pci.h>
150 #include <linux/netdevice.h>
151 #include <linux/etherdevice.h>
152 #include <linux/init.h>
153 #include <linux/delay.h>
154 #include <linux/crc32.h>
155 #include <linux/ethtool.h>
156 #include <linux/mii.h>
157 #include <linux/if_vlan.h>
158 #include <asm/processor.h> /* Processor type for cache alignment. */
159 #include <asm/uaccess.h>
162 #include "starfire_firmware.h"
164 * The current frame processor firmware fails to checksum a fragment
165 * of length 1. If and when this is fixed, the #define below can be removed.
167 #define HAS_BROKEN_FIRMWARE
169 * Define this if using the driver with the zero-copy patch
173 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
177 #ifndef CONFIG_ADAPTEC_STARFIRE_NAPI
178 #undef HAVE_NETDEV_POLL
181 /* The user-configurable values.
182 These may be modified when a driver module is loaded.*/
184 /* Used for tuning interrupt latency vs. overhead. */
185 static int intr_latency;
186 static int small_frames;
188 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
189 static int max_interrupt_work = 20;
191 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
192 The Starfire has a 512 element hash table based on the Ethernet CRC. */
193 static int multicast_filter_limit = 512;
194 /* Whether to do TCP/UDP checksums in hardware */
195 static int enable_hw_cksum = 1;
197 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
199 * Set the copy breakpoint for the copy-only-tiny-frames scheme.
200 * Setting to > 1518 effectively disables this feature.
203 * The ia64 doesn't allow for unaligned loads even of integers being
204 * misaligned on a 2 byte boundary. Thus always force copying of
205 * packets as the starfire doesn't allow for misaligned DMAs ;-(
208 * The Alpha and the Sparc don't like unaligned loads, either. On Sparc64,
209 * at least, having unaligned frames leads to a rather serious performance
212 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
213 static int rx_copybreak = PKT_BUF_SZ;
215 static int rx_copybreak /* = 0 */;
218 /* PCI DMA burst size -- on sparc64 we want to force it to 64 bytes, on the others the default of 128 is fine. */
220 #define DMA_BURST_SIZE 64
222 #define DMA_BURST_SIZE 128
225 /* Used to pass the media type, etc.
226 Both 'options[]' and 'full_duplex[]' exist for driver interoperability.
227 The media type is usually passed in 'options[]'.
228 These variables are deprecated, use ethtool instead. -Ion
230 #define MAX_UNITS 8 /* More are supported, limit only on options */
231 static int options[MAX_UNITS] = {0, };
232 static int full_duplex[MAX_UNITS] = {0, };
234 /* Operational parameters that are set at compile time. */
236 /* The "native" ring sizes are either 256 or 2048.
237 However in some modes a descriptor may be marked to wrap the ring earlier.
239 #define RX_RING_SIZE 256
240 #define TX_RING_SIZE 32
241 /* The completion queues are fixed at 1024 entries i.e. 4K or 8KB. */
242 #define DONE_Q_SIZE 1024
243 /* All queues must be aligned on a 256-byte boundary */
244 #define QUEUE_ALIGN 256
246 #if RX_RING_SIZE > 256
247 #define RX_Q_ENTRIES Rx2048QEntries
249 #define RX_Q_ENTRIES Rx256QEntries
252 /* Operational parameters that usually are not changed. */
253 /* Time in jiffies before concluding the transmitter is hung. */
254 #define TX_TIMEOUT (2 * HZ)
258 * We need a much better method to determine if dma_addr_t is 64-bit.
260 #if (defined(__i386__) && defined(CONFIG_HIGHMEM) && (LINUX_VERSION_CODE > 0x20500 || defined(CONFIG_HIGHMEM64G))) || defined(__x86_64__) || defined (__ia64__) || defined(__mips64__) || (defined(__mips__) && defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR))
261 /* 64-bit dma_addr_t */
262 #define ADDR_64BITS /* This chip uses 64 bit addresses. */
263 #define cpu_to_dma(x) cpu_to_le64(x)
264 #define dma_to_cpu(x) le64_to_cpu(x)
265 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
266 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
267 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
268 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
269 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
270 #else /* 32-bit dma_addr_t */
271 #define cpu_to_dma(x) cpu_to_le32(x)
272 #define dma_to_cpu(x) le32_to_cpu(x)
273 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
274 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
275 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
276 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
277 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
280 #define skb_first_frag_len(skb) skb_headlen(skb)
281 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
283 #ifdef HAVE_NETDEV_POLL
284 #define init_poll(dev) \
286 dev->poll = &netdev_poll; \
287 dev->weight = max_interrupt_work; \
289 #define netdev_rx(dev, ioaddr) \
292 if (netif_rx_schedule_prep(dev)) { \
293 __netif_rx_schedule(dev); \
294 intr_enable = readl(ioaddr + IntrEnable); \
295 intr_enable &= ~(IntrRxDone | IntrRxEmpty); \
296 writel(intr_enable, ioaddr + IntrEnable); \
297 readl(ioaddr + IntrEnable); /* flush PCI posting buffers */ \
299 /* Paranoia check */ \
300 intr_enable = readl(ioaddr + IntrEnable); \
301 if (intr_enable & (IntrRxDone | IntrRxEmpty)) { \
302 printk(KERN_INFO "%s: interrupt while in polling mode!\n", dev->name); \
303 intr_enable &= ~(IntrRxDone | IntrRxEmpty); \
304 writel(intr_enable, ioaddr + IntrEnable); \
308 #define netdev_receive_skb(skb) netif_receive_skb(skb)
309 #define vlan_netdev_receive_skb(skb, vlgrp, vlid) vlan_hwaccel_receive_skb(skb, vlgrp, vlid)
310 static int netdev_poll(struct net_device *dev, int *budget);
311 #else /* not HAVE_NETDEV_POLL */
312 #define init_poll(dev)
313 #define netdev_receive_skb(skb) netif_rx(skb)
314 #define vlan_netdev_receive_skb(skb, vlgrp, vlid) vlan_hwaccel_rx(skb, vlgrp, vlid)
315 #define netdev_rx(dev, ioaddr) \
317 int quota = np->dirty_rx + RX_RING_SIZE - np->cur_rx; \
318 __netdev_rx(dev, "a);\
320 #endif /* not HAVE_NETDEV_POLL */
321 /* end of compatibility code */
324 /* These identify the driver base version and may not be removed. */
325 static char version[] __devinitdata =
326 KERN_INFO "starfire.c:v1.03 7/26/2000 Written by Donald Becker <becker@scyld.com>\n"
327 KERN_INFO " (unofficial 2.2/2.4 kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
329 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
330 MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
331 MODULE_LICENSE("GPL");
332 MODULE_VERSION(DRV_VERSION);
334 module_param(max_interrupt_work, int, 0);
335 module_param(mtu, int, 0);
336 module_param(debug, int, 0);
337 module_param(rx_copybreak, int, 0);
338 module_param(intr_latency, int, 0);
339 module_param(small_frames, int, 0);
340 module_param_array(options, int, NULL, 0);
341 module_param_array(full_duplex, int, NULL, 0);
342 module_param(enable_hw_cksum, int, 0);
343 MODULE_PARM_DESC(max_interrupt_work, "Maximum events handled per interrupt");
344 MODULE_PARM_DESC(mtu, "MTU (all boards)");
345 MODULE_PARM_DESC(debug, "Debug level (0-6)");
346 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
347 MODULE_PARM_DESC(intr_latency, "Maximum interrupt latency, in microseconds");
348 MODULE_PARM_DESC(small_frames, "Maximum size of receive frames that bypass interrupt latency (0,64,128,256,512)");
349 MODULE_PARM_DESC(options, "Deprecated: Bits 0-3: media type, bit 17: full duplex");
350 MODULE_PARM_DESC(full_duplex, "Deprecated: Forced full-duplex setting (0/1)");
351 MODULE_PARM_DESC(enable_hw_cksum, "Enable/disable hardware cksum support (0/1)");
356 I. Board Compatibility
358 This driver is for the Adaptec 6915 "Starfire" 64 bit PCI Ethernet adapter.
360 II. Board-specific settings
362 III. Driver operation
366 The Starfire hardware uses multiple fixed-size descriptor queues/rings. The
367 ring sizes are set fixed by the hardware, but may optionally be wrapped
368 earlier by the END bit in the descriptor.
369 This driver uses that hardware queue size for the Rx ring, where a large
370 number of entries has no ill effect beyond increases the potential backlog.
371 The Tx ring is wrapped with the END bit, since a large hardware Tx queue
372 disables the queue layer priority ordering and we have no mechanism to
373 utilize the hardware two-level priority queue. When modifying the
374 RX/TX_RING_SIZE pay close attention to page sizes and the ring-empty warning
377 IIIb/c. Transmit/Receive Structure
379 See the Adaptec manual for the many possible structures, and options for
380 each structure. There are far too many to document all of them here.
382 For transmit this driver uses type 0/1 transmit descriptors (depending
383 on the 32/64 bitness of the architecture), and relies on automatic
384 minimum-length padding. It does not use the completion queue
385 consumer index, but instead checks for non-zero status entries.
387 For receive this driver uses type 2/3 receive descriptors. The driver
388 allocates full frame size skbuffs for the Rx ring buffers, so all frames
389 should fit in a single descriptor. The driver does not use the completion
390 queue consumer index, but instead checks for non-zero status entries.
392 When an incoming frame is less than RX_COPYBREAK bytes long, a fresh skbuff
393 is allocated and the frame is copied to the new skbuff. When the incoming
394 frame is larger, the skbuff is passed directly up the protocol stack.
395 Buffers consumed this way are replaced by newly allocated skbuffs in a later
398 A notable aspect of operation is that unaligned buffers are not permitted by
399 the Starfire hardware. Thus the IP header at offset 14 in an ethernet frame
400 isn't longword aligned, which may cause problems on some machine
401 e.g. Alphas and IA64. For these architectures, the driver is forced to copy
402 the frame into a new skbuff unconditionally. Copied frames are put into the
403 skbuff at an offset of "+2", thus 16-byte aligning the IP header.
405 IIId. Synchronization
407 The driver runs as two independent, single-threaded flows of control. One
408 is the send-packet routine, which enforces single-threaded use by the
409 dev->tbusy flag. The other thread is the interrupt handler, which is single
410 threaded by the hardware and interrupt handling software.
412 The send packet thread has partial control over the Tx ring and the netif_queue
413 status. If the number of free Tx slots in the ring falls below a certain number
414 (currently hardcoded to 4), it signals the upper layer to stop the queue.
416 The interrupt handler has exclusive control over the Rx ring and records stats
417 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
418 empty by incrementing the dirty_tx mark. Iff the netif_queue is stopped and the
419 number of free Tx slow is above the threshold, it signals the upper layer to
426 The Adaptec Starfire manuals, available only from Adaptec.
427 http://www.scyld.com/expert/100mbps.html
428 http://www.scyld.com/expert/NWay.html
432 - StopOnPerr is broken, don't enable
433 - Hardware ethernet padding exposes random data, perform software padding
434 instead (unverified -- works correctly for all the hardware I have)
440 enum chip_capability_flags {CanHaveMII=1, };
446 static struct pci_device_id starfire_pci_tbl[] = {
447 { 0x9004, 0x6915, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_6915 },
450 MODULE_DEVICE_TABLE(pci, starfire_pci_tbl);
452 /* A chip capabilities table, matching the CH_xxx entries in xxx_pci_tbl[] above. */
453 static struct chip_info {
456 } netdrv_tbl[] __devinitdata = {
457 { "Adaptec Starfire 6915", CanHaveMII },
461 /* Offsets to the device registers.
462 Unlike software-only systems, device drivers interact with complex hardware.
463 It's not useful to define symbolic names for every register bit in the
464 device. The name can only partially document the semantics and make
465 the driver longer and more difficult to read.
466 In general, only the important configuration values or bits changed
467 multiple times should be defined symbolically.
469 enum register_offsets {
470 PCIDeviceConfig=0x50040, GenCtrl=0x50070, IntrTimerCtrl=0x50074,
471 IntrClear=0x50080, IntrStatus=0x50084, IntrEnable=0x50088,
472 MIICtrl=0x52000, TxStationAddr=0x50120, EEPROMCtrl=0x51000,
473 GPIOCtrl=0x5008C, TxDescCtrl=0x50090,
474 TxRingPtr=0x50098, HiPriTxRingPtr=0x50094, /* Low and High priority. */
475 TxRingHiAddr=0x5009C, /* 64 bit address extension. */
476 TxProducerIdx=0x500A0, TxConsumerIdx=0x500A4,
478 CompletionHiAddr=0x500B4, TxCompletionAddr=0x500B8,
479 RxCompletionAddr=0x500BC, RxCompletionQ2Addr=0x500C0,
480 CompletionQConsumerIdx=0x500C4, RxDMACtrl=0x500D0,
481 RxDescQCtrl=0x500D4, RxDescQHiAddr=0x500DC, RxDescQAddr=0x500E0,
482 RxDescQIdx=0x500E8, RxDMAStatus=0x500F0, RxFilterMode=0x500F4,
483 TxMode=0x55000, VlanType=0x55064,
484 PerfFilterTable=0x56000, HashTable=0x56100,
485 TxGfpMem=0x58000, RxGfpMem=0x5a000,
489 * Bits in the interrupt status/mask registers.
490 * Warning: setting Intr[Ab]NormalSummary in the IntrEnable register
491 * enables all the interrupt sources that are or'ed into those status bits.
493 enum intr_status_bits {
494 IntrLinkChange=0xf0000000, IntrStatsMax=0x08000000,
495 IntrAbnormalSummary=0x02000000, IntrGeneralTimer=0x01000000,
496 IntrSoftware=0x800000, IntrRxComplQ1Low=0x400000,
497 IntrTxComplQLow=0x200000, IntrPCI=0x100000,
498 IntrDMAErr=0x080000, IntrTxDataLow=0x040000,
499 IntrRxComplQ2Low=0x020000, IntrRxDescQ1Low=0x010000,
500 IntrNormalSummary=0x8000, IntrTxDone=0x4000,
501 IntrTxDMADone=0x2000, IntrTxEmpty=0x1000,
502 IntrEarlyRxQ2=0x0800, IntrEarlyRxQ1=0x0400,
503 IntrRxQ2Done=0x0200, IntrRxQ1Done=0x0100,
504 IntrRxGFPDead=0x80, IntrRxDescQ2Low=0x40,
505 IntrNoTxCsum=0x20, IntrTxBadID=0x10,
506 IntrHiPriTxBadID=0x08, IntrRxGfp=0x04,
507 IntrTxGfp=0x02, IntrPCIPad=0x01,
509 IntrRxDone=IntrRxQ2Done | IntrRxQ1Done,
510 IntrRxEmpty=IntrRxDescQ1Low | IntrRxDescQ2Low,
511 IntrNormalMask=0xff00, IntrAbnormalMask=0x3ff00fe,
514 /* Bits in the RxFilterMode register. */
516 AcceptBroadcast=0x04, AcceptAllMulticast=0x02, AcceptAll=0x01,
517 AcceptMulticast=0x10, PerfectFilter=0x40, HashFilter=0x30,
518 PerfectFilterVlan=0x80, MinVLANPrio=0xE000, VlanMode=0x0200,
522 /* Bits in the TxMode register */
524 MiiSoftReset=0x8000, MIILoopback=0x4000,
525 TxFlowEnable=0x0800, RxFlowEnable=0x0400,
526 PadEnable=0x04, FullDuplex=0x02, HugeFrame=0x01,
529 /* Bits in the TxDescCtrl register. */
531 TxDescSpaceUnlim=0x00, TxDescSpace32=0x10, TxDescSpace64=0x20,
532 TxDescSpace128=0x30, TxDescSpace256=0x40,
533 TxDescType0=0x00, TxDescType1=0x01, TxDescType2=0x02,
534 TxDescType3=0x03, TxDescType4=0x04,
535 TxNoDMACompletion=0x08,
536 TxDescQAddr64bit=0x80, TxDescQAddr32bit=0,
537 TxHiPriFIFOThreshShift=24, TxPadLenShift=16,
538 TxDMABurstSizeShift=8,
541 /* Bits in the RxDescQCtrl register. */
543 RxBufferLenShift=16, RxMinDescrThreshShift=0,
544 RxPrefetchMode=0x8000, RxVariableQ=0x2000,
545 Rx2048QEntries=0x4000, Rx256QEntries=0,
546 RxDescAddr64bit=0x1000, RxDescAddr32bit=0,
547 RxDescQAddr64bit=0x0100, RxDescQAddr32bit=0,
548 RxDescSpace4=0x000, RxDescSpace8=0x100,
549 RxDescSpace16=0x200, RxDescSpace32=0x300,
550 RxDescSpace64=0x400, RxDescSpace128=0x500,
554 /* Bits in the RxDMACtrl register. */
555 enum rx_dmactrl_bits {
556 RxReportBadFrames=0x80000000, RxDMAShortFrames=0x40000000,
557 RxDMABadFrames=0x20000000, RxDMACrcErrorFrames=0x10000000,
558 RxDMAControlFrame=0x08000000, RxDMAPauseFrame=0x04000000,
559 RxChecksumIgnore=0, RxChecksumRejectTCPUDP=0x02000000,
560 RxChecksumRejectTCPOnly=0x01000000,
561 RxCompletionQ2Enable=0x800000,
562 RxDMAQ2Disable=0, RxDMAQ2FPOnly=0x100000,
563 RxDMAQ2SmallPkt=0x200000, RxDMAQ2HighPrio=0x300000,
564 RxDMAQ2NonIP=0x400000,
565 RxUseBackupQueue=0x080000, RxDMACRC=0x040000,
566 RxEarlyIntThreshShift=12, RxHighPrioThreshShift=8,
570 /* Bits in the RxCompletionAddr register */
572 RxComplQAddr64bit=0x80, RxComplQAddr32bit=0,
573 RxComplProducerWrEn=0x40,
574 RxComplType0=0x00, RxComplType1=0x10,
575 RxComplType2=0x20, RxComplType3=0x30,
576 RxComplThreshShift=0,
579 /* Bits in the TxCompletionAddr register */
581 TxComplQAddr64bit=0x80, TxComplQAddr32bit=0,
582 TxComplProducerWrEn=0x40,
583 TxComplIntrStatus=0x20,
584 CommonQueueMode=0x10,
585 TxComplThreshShift=0,
588 /* Bits in the GenCtrl register */
590 RxEnable=0x05, TxEnable=0x0a,
591 RxGFPEnable=0x10, TxGFPEnable=0x20,
594 /* Bits in the IntrTimerCtrl register */
595 enum intr_ctrl_bits {
596 Timer10X=0x800, EnableIntrMasking=0x60, SmallFrameBypass=0x100,
597 SmallFrame64=0, SmallFrame128=0x200, SmallFrame256=0x400, SmallFrame512=0x600,
598 IntrLatencyMask=0x1f,
601 /* The Rx and Tx buffer descriptors. */
602 struct starfire_rx_desc {
606 RxDescValid=1, RxDescEndRing=2,
609 /* Completion queue entry. */
610 struct short_rx_done_desc {
611 u32 status; /* Low 16 bits is length. */
613 struct basic_rx_done_desc {
614 u32 status; /* Low 16 bits is length. */
618 struct csum_rx_done_desc {
619 u32 status; /* Low 16 bits is length. */
620 u16 csum; /* Partial checksum */
623 struct full_rx_done_desc {
624 u32 status; /* Low 16 bits is length. */
628 u16 csum; /* partial checksum */
631 /* XXX: this is ugly and I'm not sure it's worth the trouble -Ion */
633 typedef struct full_rx_done_desc rx_done_desc;
634 #define RxComplType RxComplType3
635 #else /* not VLAN_SUPPORT */
636 typedef struct csum_rx_done_desc rx_done_desc;
637 #define RxComplType RxComplType2
638 #endif /* not VLAN_SUPPORT */
641 RxOK=0x20000000, RxFIFOErr=0x10000000, RxBufQ2=0x08000000,
644 /* Type 1 Tx descriptor. */
645 struct starfire_tx_desc_1 {
646 u32 status; /* Upper bits are status, lower 16 length. */
650 /* Type 2 Tx descriptor. */
651 struct starfire_tx_desc_2 {
652 u32 status; /* Upper bits are status, lower 16 length. */
658 typedef struct starfire_tx_desc_2 starfire_tx_desc;
659 #define TX_DESC_TYPE TxDescType2
660 #else /* not ADDR_64BITS */
661 typedef struct starfire_tx_desc_1 starfire_tx_desc;
662 #define TX_DESC_TYPE TxDescType1
663 #endif /* not ADDR_64BITS */
664 #define TX_DESC_SPACING TxDescSpaceUnlim
668 TxCRCEn=0x01000000, TxDescIntr=0x08000000,
669 TxRingWrap=0x04000000, TxCalTCP=0x02000000,
671 struct tx_done_desc {
672 u32 status; /* timestamp, index. */
674 u32 intrstatus; /* interrupt status */
678 struct rx_ring_info {
682 struct tx_ring_info {
685 unsigned int used_slots;
689 struct netdev_private {
690 /* Descriptor rings first for alignment. */
691 struct starfire_rx_desc *rx_ring;
692 starfire_tx_desc *tx_ring;
693 dma_addr_t rx_ring_dma;
694 dma_addr_t tx_ring_dma;
695 /* The addresses of rx/tx-in-place skbuffs. */
696 struct rx_ring_info rx_info[RX_RING_SIZE];
697 struct tx_ring_info tx_info[TX_RING_SIZE];
698 /* Pointers to completion queues (full pages). */
699 rx_done_desc *rx_done_q;
700 dma_addr_t rx_done_q_dma;
701 unsigned int rx_done;
702 struct tx_done_desc *tx_done_q;
703 dma_addr_t tx_done_q_dma;
704 unsigned int tx_done;
705 struct net_device_stats stats;
706 struct pci_dev *pci_dev;
708 struct vlan_group *vlgrp;
711 dma_addr_t queue_mem_dma;
712 size_t queue_mem_size;
714 /* Frequently used values: keep some adjacent for cache effect. */
716 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
717 unsigned int cur_tx, dirty_tx, reap_tx;
718 unsigned int rx_buf_sz; /* Based on MTU+slack. */
719 /* These values keep track of the transceiver/media in use. */
720 int speed100; /* Set if speed == 100MBit. */
724 /* MII transceiver section. */
725 struct mii_if_info mii_if; /* MII lib hooks/info */
726 int phy_cnt; /* MII device addresses. */
727 unsigned char phys[PHY_CNT]; /* MII device addresses. */
732 static int mdio_read(struct net_device *dev, int phy_id, int location);
733 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
734 static int netdev_open(struct net_device *dev);
735 static void check_duplex(struct net_device *dev);
736 static void tx_timeout(struct net_device *dev);
737 static void init_ring(struct net_device *dev);
738 static int start_tx(struct sk_buff *skb, struct net_device *dev);
739 static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
740 static void netdev_error(struct net_device *dev, int intr_status);
741 static int __netdev_rx(struct net_device *dev, int *quota);
742 static void refill_rx_ring(struct net_device *dev);
743 static void netdev_error(struct net_device *dev, int intr_status);
744 static void set_rx_mode(struct net_device *dev);
745 static struct net_device_stats *get_stats(struct net_device *dev);
746 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
747 static int netdev_close(struct net_device *dev);
748 static void netdev_media_change(struct net_device *dev);
749 static struct ethtool_ops ethtool_ops;
753 static void netdev_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
755 struct netdev_private *np = netdev_priv(dev);
757 spin_lock(&np->lock);
759 printk("%s: Setting vlgrp to %p\n", dev->name, grp);
762 spin_unlock(&np->lock);
765 static void netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
767 struct netdev_private *np = netdev_priv(dev);
769 spin_lock(&np->lock);
771 printk("%s: Adding vlanid %d to vlan filter\n", dev->name, vid);
773 spin_unlock(&np->lock);
776 static void netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
778 struct netdev_private *np = netdev_priv(dev);
780 spin_lock(&np->lock);
782 printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid);
784 np->vlgrp->vlan_devices[vid] = NULL;
786 spin_unlock(&np->lock);
788 #endif /* VLAN_SUPPORT */
791 static int __devinit starfire_init_one(struct pci_dev *pdev,
792 const struct pci_device_id *ent)
794 struct netdev_private *np;
795 int i, irq, option, chip_idx = ent->driver_data;
796 struct net_device *dev;
797 static int card_idx = -1;
800 int drv_flags, io_size;
803 /* when built into the kernel, we only print version if device is found */
805 static int printed_version;
806 if (!printed_version++)
812 if (pci_enable_device (pdev))
815 ioaddr = pci_resource_start(pdev, 0);
816 io_size = pci_resource_len(pdev, 0);
817 if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) {
818 printk(KERN_ERR DRV_NAME " %d: no PCI MEM resources, aborting\n", card_idx);
822 dev = alloc_etherdev(sizeof(*np));
824 printk(KERN_ERR DRV_NAME " %d: cannot alloc etherdev, aborting\n", card_idx);
827 SET_MODULE_OWNER(dev);
828 SET_NETDEV_DEV(dev, &pdev->dev);
832 if (pci_request_regions (pdev, DRV_NAME)) {
833 printk(KERN_ERR DRV_NAME " %d: cannot reserve PCI resources, aborting\n", card_idx);
834 goto err_out_free_netdev;
837 /* ioremap is borken in Linux-2.2.x/sparc64 */
838 base = ioremap(ioaddr, io_size);
840 printk(KERN_ERR DRV_NAME " %d: cannot remap %#x @ %#lx, aborting\n",
841 card_idx, io_size, ioaddr);
842 goto err_out_free_res;
845 pci_set_master(pdev);
847 /* enable MWI -- it vastly improves Rx performance on sparc64 */
851 /* Starfire can do TCP/UDP checksumming */
853 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
854 #endif /* ZEROCOPY */
856 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
857 dev->vlan_rx_register = netdev_vlan_rx_register;
858 dev->vlan_rx_add_vid = netdev_vlan_rx_add_vid;
859 dev->vlan_rx_kill_vid = netdev_vlan_rx_kill_vid;
860 #endif /* VLAN_RX_KILL_VID */
862 dev->features |= NETIF_F_HIGHDMA;
863 #endif /* ADDR_64BITS */
865 /* Serial EEPROM reads are hidden by the hardware. */
866 for (i = 0; i < 6; i++)
867 dev->dev_addr[i] = readb(base + EEPROMCtrl + 20 - i);
869 #if ! defined(final_version) /* Dump the EEPROM contents during development. */
871 for (i = 0; i < 0x20; i++)
873 (unsigned int)readb(base + EEPROMCtrl + i),
874 i % 16 != 15 ? " " : "\n");
877 /* Issue soft reset */
878 writel(MiiSoftReset, base + TxMode);
880 writel(0, base + TxMode);
882 /* Reset the chip to erase previous misconfiguration. */
883 writel(1, base + PCIDeviceConfig);
885 while (--boguscnt > 0) {
887 if ((readl(base + PCIDeviceConfig) & 1) == 0)
891 printk("%s: chipset reset never completed!\n", dev->name);
892 /* wait a little longer */
895 dev->base_addr = (unsigned long)base;
898 np = netdev_priv(dev);
900 spin_lock_init(&np->lock);
901 pci_set_drvdata(pdev, dev);
905 np->mii_if.dev = dev;
906 np->mii_if.mdio_read = mdio_read;
907 np->mii_if.mdio_write = mdio_write;
908 np->mii_if.phy_id_mask = 0x1f;
909 np->mii_if.reg_num_mask = 0x1f;
911 drv_flags = netdrv_tbl[chip_idx].drv_flags;
913 option = card_idx < MAX_UNITS ? options[card_idx] : 0;
915 option = dev->mem_start;
917 /* The lower four bits are the media type. */
919 np->mii_if.full_duplex = 1;
921 if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
922 np->mii_if.full_duplex = 1;
924 if (np->mii_if.full_duplex)
925 np->mii_if.force_media = 1;
927 np->mii_if.force_media = 0;
930 /* timer resolution is 128 * 0.8us */
931 np->intr_timer_ctrl = (((intr_latency * 10) / 1024) & IntrLatencyMask) |
932 Timer10X | EnableIntrMasking;
934 if (small_frames > 0) {
935 np->intr_timer_ctrl |= SmallFrameBypass;
936 switch (small_frames) {
938 np->intr_timer_ctrl |= SmallFrame64;
941 np->intr_timer_ctrl |= SmallFrame128;
944 np->intr_timer_ctrl |= SmallFrame256;
947 np->intr_timer_ctrl |= SmallFrame512;
948 if (small_frames > 512)
949 printk("Adjusting small_frames down to 512\n");
954 /* The chip-specific entries in the device structure. */
955 dev->open = &netdev_open;
956 dev->hard_start_xmit = &start_tx;
957 dev->tx_timeout = tx_timeout;
958 dev->watchdog_timeo = TX_TIMEOUT;
960 dev->stop = &netdev_close;
961 dev->get_stats = &get_stats;
962 dev->set_multicast_list = &set_rx_mode;
963 dev->do_ioctl = &netdev_ioctl;
964 SET_ETHTOOL_OPS(dev, ðtool_ops);
969 if (register_netdev(dev))
970 goto err_out_cleardev;
972 printk(KERN_INFO "%s: %s at %p, ",
973 dev->name, netdrv_tbl[chip_idx].name, base);
974 for (i = 0; i < 5; i++)
975 printk("%2.2x:", dev->dev_addr[i]);
976 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
978 if (drv_flags & CanHaveMII) {
979 int phy, phy_idx = 0;
981 for (phy = 0; phy < 32 && phy_idx < PHY_CNT; phy++) {
982 mdio_write(dev, phy, MII_BMCR, BMCR_RESET);
985 while (--boguscnt > 0)
986 if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0)
989 printk("%s: PHY#%d reset never completed!\n", dev->name, phy);
992 mii_status = mdio_read(dev, phy, MII_BMSR);
993 if (mii_status != 0) {
994 np->phys[phy_idx++] = phy;
995 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
996 printk(KERN_INFO "%s: MII PHY found at address %d, status "
997 "%#4.4x advertising %#4.4x.\n",
998 dev->name, phy, mii_status, np->mii_if.advertising);
999 /* there can be only one PHY on-board */
1003 np->phy_cnt = phy_idx;
1004 if (np->phy_cnt > 0)
1005 np->mii_if.phy_id = np->phys[0];
1007 memset(&np->mii_if, 0, sizeof(np->mii_if));
1010 printk(KERN_INFO "%s: scatter-gather and hardware TCP cksumming %s.\n",
1011 dev->name, enable_hw_cksum ? "enabled" : "disabled");
1015 pci_set_drvdata(pdev, NULL);
1018 pci_release_regions (pdev);
1019 err_out_free_netdev:
1025 /* Read the MII Management Data I/O (MDIO) interfaces. */
1026 static int mdio_read(struct net_device *dev, int phy_id, int location)
1028 struct netdev_private *np = netdev_priv(dev);
1029 void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
1030 int result, boguscnt=1000;
1031 /* ??? Should we add a busy-wait here? */
1033 result = readl(mdio_addr);
1034 while ((result & 0xC0000000) != 0x80000000 && --boguscnt > 0);
1037 if ((result & 0xffff) == 0xffff)
1039 return result & 0xffff;
1043 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
1045 struct netdev_private *np = netdev_priv(dev);
1046 void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
1047 writel(value, mdio_addr);
1048 /* The busy-wait will occur before a read. */
1052 static int netdev_open(struct net_device *dev)
1054 struct netdev_private *np = netdev_priv(dev);
1055 void __iomem *ioaddr = np->base;
1057 size_t tx_done_q_size, rx_done_q_size, tx_ring_size, rx_ring_size;
1059 /* Do we ever need to reset the chip??? */
1061 retval = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
1065 /* Disable the Rx and Tx, and reset the chip. */
1066 writel(0, ioaddr + GenCtrl);
1067 writel(1, ioaddr + PCIDeviceConfig);
1069 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
1070 dev->name, dev->irq);
1072 /* Allocate the various queues. */
1073 if (np->queue_mem == 0) {
1074 tx_done_q_size = ((sizeof(struct tx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
1075 rx_done_q_size = ((sizeof(rx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
1076 tx_ring_size = ((sizeof(starfire_tx_desc) * TX_RING_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
1077 rx_ring_size = sizeof(struct starfire_rx_desc) * RX_RING_SIZE;
1078 np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size;
1079 np->queue_mem = pci_alloc_consistent(np->pci_dev, np->queue_mem_size, &np->queue_mem_dma);
1080 if (np->queue_mem == 0)
1083 np->tx_done_q = np->queue_mem;
1084 np->tx_done_q_dma = np->queue_mem_dma;
1085 np->rx_done_q = (void *) np->tx_done_q + tx_done_q_size;
1086 np->rx_done_q_dma = np->tx_done_q_dma + tx_done_q_size;
1087 np->tx_ring = (void *) np->rx_done_q + rx_done_q_size;
1088 np->tx_ring_dma = np->rx_done_q_dma + rx_done_q_size;
1089 np->rx_ring = (void *) np->tx_ring + tx_ring_size;
1090 np->rx_ring_dma = np->tx_ring_dma + tx_ring_size;
1093 /* Start with no carrier, it gets adjusted later */
1094 netif_carrier_off(dev);
1096 /* Set the size of the Rx buffers. */
1097 writel((np->rx_buf_sz << RxBufferLenShift) |
1098 (0 << RxMinDescrThreshShift) |
1099 RxPrefetchMode | RxVariableQ |
1101 RX_DESC_Q_ADDR_SIZE | RX_DESC_ADDR_SIZE |
1103 ioaddr + RxDescQCtrl);
1105 /* Set up the Rx DMA controller. */
1106 writel(RxChecksumIgnore |
1107 (0 << RxEarlyIntThreshShift) |
1108 (6 << RxHighPrioThreshShift) |
1109 ((DMA_BURST_SIZE / 32) << RxBurstSizeShift),
1110 ioaddr + RxDMACtrl);
1112 /* Set Tx descriptor */
1113 writel((2 << TxHiPriFIFOThreshShift) |
1114 (0 << TxPadLenShift) |
1115 ((DMA_BURST_SIZE / 32) << TxDMABurstSizeShift) |
1116 TX_DESC_Q_ADDR_SIZE |
1117 TX_DESC_SPACING | TX_DESC_TYPE,
1118 ioaddr + TxDescCtrl);
1120 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + RxDescQHiAddr);
1121 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + TxRingHiAddr);
1122 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + CompletionHiAddr);
1123 writel(np->rx_ring_dma, ioaddr + RxDescQAddr);
1124 writel(np->tx_ring_dma, ioaddr + TxRingPtr);
1126 writel(np->tx_done_q_dma, ioaddr + TxCompletionAddr);
1127 writel(np->rx_done_q_dma |
1129 (0 << RxComplThreshShift),
1130 ioaddr + RxCompletionAddr);
1133 printk(KERN_DEBUG "%s: Filling in the station address.\n", dev->name);
1135 /* Fill both the Tx SA register and the Rx perfect filter. */
1136 for (i = 0; i < 6; i++)
1137 writeb(dev->dev_addr[i], ioaddr + TxStationAddr + 5 - i);
1138 /* The first entry is special because it bypasses the VLAN filter.
1140 writew(0, ioaddr + PerfFilterTable);
1141 writew(0, ioaddr + PerfFilterTable + 4);
1142 writew(0, ioaddr + PerfFilterTable + 8);
1143 for (i = 1; i < 16; i++) {
1144 u16 *eaddrs = (u16 *)dev->dev_addr;
1145 void __iomem *setup_frm = ioaddr + PerfFilterTable + i * 16;
1146 writew(cpu_to_be16(eaddrs[2]), setup_frm); setup_frm += 4;
1147 writew(cpu_to_be16(eaddrs[1]), setup_frm); setup_frm += 4;
1148 writew(cpu_to_be16(eaddrs[0]), setup_frm); setup_frm += 8;
1151 /* Initialize other registers. */
1152 /* Configure the PCI bus bursts and FIFO thresholds. */
1153 np->tx_mode = TxFlowEnable|RxFlowEnable|PadEnable; /* modified when link is up. */
1154 writel(MiiSoftReset | np->tx_mode, ioaddr + TxMode);
1156 writel(np->tx_mode, ioaddr + TxMode);
1157 np->tx_threshold = 4;
1158 writel(np->tx_threshold, ioaddr + TxThreshold);
1160 writel(np->intr_timer_ctrl, ioaddr + IntrTimerCtrl);
1162 netif_start_queue(dev);
1165 printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name);
1168 np->mii_if.advertising = mdio_read(dev, np->phys[0], MII_ADVERTISE);
1171 /* Enable GPIO interrupts on link change */
1172 writel(0x0f00ff00, ioaddr + GPIOCtrl);
1174 /* Set the interrupt mask */
1175 writel(IntrRxDone | IntrRxEmpty | IntrDMAErr |
1176 IntrTxDMADone | IntrStatsMax | IntrLinkChange |
1177 IntrRxGFPDead | IntrNoTxCsum | IntrTxBadID,
1178 ioaddr + IntrEnable);
1179 /* Enable PCI interrupts. */
1180 writel(0x00800000 | readl(ioaddr + PCIDeviceConfig),
1181 ioaddr + PCIDeviceConfig);
1184 /* Set VLAN type to 802.1q */
1185 writel(ETH_P_8021Q, ioaddr + VlanType);
1186 #endif /* VLAN_SUPPORT */
1188 /* Load Rx/Tx firmware into the frame processors */
1189 for (i = 0; i < FIRMWARE_RX_SIZE * 2; i++)
1190 writel(firmware_rx[i], ioaddr + RxGfpMem + i * 4);
1191 for (i = 0; i < FIRMWARE_TX_SIZE * 2; i++)
1192 writel(firmware_tx[i], ioaddr + TxGfpMem + i * 4);
1193 if (enable_hw_cksum)
1194 /* Enable the Rx and Tx units, and the Rx/Tx frame processors. */
1195 writel(TxEnable|TxGFPEnable|RxEnable|RxGFPEnable, ioaddr + GenCtrl);
1197 /* Enable the Rx and Tx units only. */
1198 writel(TxEnable|RxEnable, ioaddr + GenCtrl);
1201 printk(KERN_DEBUG "%s: Done netdev_open().\n",
1208 static void check_duplex(struct net_device *dev)
1210 struct netdev_private *np = netdev_priv(dev);
1212 int silly_count = 1000;
1214 mdio_write(dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising);
1215 mdio_write(dev, np->phys[0], MII_BMCR, BMCR_RESET);
1217 while (--silly_count && mdio_read(dev, np->phys[0], MII_BMCR) & BMCR_RESET)
1220 printk("%s: MII reset failed!\n", dev->name);
1224 reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1226 if (!np->mii_if.force_media) {
1227 reg0 |= BMCR_ANENABLE | BMCR_ANRESTART;
1229 reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
1231 reg0 |= BMCR_SPEED100;
1232 if (np->mii_if.full_duplex)
1233 reg0 |= BMCR_FULLDPLX;
1234 printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n",
1236 np->speed100 ? "100" : "10",
1237 np->mii_if.full_duplex ? "full" : "half");
1239 mdio_write(dev, np->phys[0], MII_BMCR, reg0);
1243 static void tx_timeout(struct net_device *dev)
1245 struct netdev_private *np = netdev_priv(dev);
1246 void __iomem *ioaddr = np->base;
1249 printk(KERN_WARNING "%s: Transmit timed out, status %#8.8x, "
1250 "resetting...\n", dev->name, (int) readl(ioaddr + IntrStatus));
1252 /* Perhaps we should reinitialize the hardware here. */
1255 * Stop and restart the interface.
1256 * Cheat and increase the debug level temporarily.
1264 /* Trigger an immediate transmit demand. */
1266 dev->trans_start = jiffies;
1267 np->stats.tx_errors++;
1268 netif_wake_queue(dev);
1272 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1273 static void init_ring(struct net_device *dev)
1275 struct netdev_private *np = netdev_priv(dev);
1278 np->cur_rx = np->cur_tx = np->reap_tx = 0;
1279 np->dirty_rx = np->dirty_tx = np->rx_done = np->tx_done = 0;
1281 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1283 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1284 for (i = 0; i < RX_RING_SIZE; i++) {
1285 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
1286 np->rx_info[i].skb = skb;
1289 np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1290 skb->dev = dev; /* Mark as being used by this device. */
1291 /* Grrr, we cannot offset to correctly align the IP header. */
1292 np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
1294 writew(i - 1, np->base + RxDescQIdx);
1295 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1297 /* Clear the remainder of the Rx buffer ring. */
1298 for ( ; i < RX_RING_SIZE; i++) {
1299 np->rx_ring[i].rxaddr = 0;
1300 np->rx_info[i].skb = NULL;
1301 np->rx_info[i].mapping = 0;
1303 /* Mark the last entry as wrapping the ring. */
1304 np->rx_ring[RX_RING_SIZE - 1].rxaddr |= cpu_to_dma(RxDescEndRing);
1306 /* Clear the completion rings. */
1307 for (i = 0; i < DONE_Q_SIZE; i++) {
1308 np->rx_done_q[i].status = 0;
1309 np->tx_done_q[i].status = 0;
1312 for (i = 0; i < TX_RING_SIZE; i++)
1313 memset(&np->tx_info[i], 0, sizeof(np->tx_info[i]));
1319 static int start_tx(struct sk_buff *skb, struct net_device *dev)
1321 struct netdev_private *np = netdev_priv(dev);
1327 * be cautious here, wrapping the queue has weird semantics
1328 * and we may not have enough slots even when it seems we do.
1330 if ((np->cur_tx - np->dirty_tx) + skb_num_frags(skb) * 2 > TX_RING_SIZE) {
1331 netif_stop_queue(dev);
1335 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
1337 int has_bad_length = 0;
1339 if (skb_first_frag_len(skb) == 1)
1342 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1343 if (skb_shinfo(skb)->frags[i].size == 1) {
1350 skb_checksum_help(skb, 0);
1352 #endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
1354 entry = np->cur_tx % TX_RING_SIZE;
1355 for (i = 0; i < skb_num_frags(skb); i++) {
1360 np->tx_info[entry].skb = skb;
1362 if (entry >= TX_RING_SIZE - skb_num_frags(skb)) {
1363 status |= TxRingWrap;
1367 status |= TxDescIntr;
1370 if (skb->ip_summed == CHECKSUM_HW) {
1372 np->stats.tx_compressed++;
1374 status |= skb_first_frag_len(skb) | (skb_num_frags(skb) << 16);
1376 np->tx_info[entry].mapping =
1377 pci_map_single(np->pci_dev, skb->data, skb_first_frag_len(skb), PCI_DMA_TODEVICE);
1379 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1];
1380 status |= this_frag->size;
1381 np->tx_info[entry].mapping =
1382 pci_map_single(np->pci_dev, page_address(this_frag->page) + this_frag->page_offset, this_frag->size, PCI_DMA_TODEVICE);
1385 np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
1386 np->tx_ring[entry].status = cpu_to_le32(status);
1388 printk(KERN_DEBUG "%s: Tx #%d/#%d slot %d status %#8.8x.\n",
1389 dev->name, np->cur_tx, np->dirty_tx,
1392 np->tx_info[entry].used_slots = TX_RING_SIZE - entry;
1393 np->cur_tx += np->tx_info[entry].used_slots;
1396 np->tx_info[entry].used_slots = 1;
1397 np->cur_tx += np->tx_info[entry].used_slots;
1400 /* scavenge the tx descriptors twice per TX_RING_SIZE */
1401 if (np->cur_tx % (TX_RING_SIZE / 2) == 0)
1405 /* Non-x86: explicitly flush descriptor cache lines here. */
1406 /* Ensure all descriptors are written back before the transmit is
1410 /* Update the producer index. */
1411 writel(entry * (sizeof(starfire_tx_desc) / 8), np->base + TxProducerIdx);
1413 /* 4 is arbitrary, but should be ok */
1414 if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE)
1415 netif_stop_queue(dev);
1417 dev->trans_start = jiffies;
1423 /* The interrupt handler does all of the Rx thread work and cleans up
1424 after the Tx thread. */
1425 static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
1427 struct net_device *dev = dev_instance;
1428 struct netdev_private *np = netdev_priv(dev);
1429 void __iomem *ioaddr = np->base;
1430 int boguscnt = max_interrupt_work;
1436 u32 intr_status = readl(ioaddr + IntrClear);
1439 printk(KERN_DEBUG "%s: Interrupt status %#8.8x.\n",
1440 dev->name, intr_status);
1442 if (intr_status == 0 || intr_status == (u32) -1)
1447 if (intr_status & (IntrRxDone | IntrRxEmpty))
1448 netdev_rx(dev, ioaddr);
1450 /* Scavenge the skbuff list based on the Tx-done queue.
1451 There are redundant checks here that may be cleaned up
1452 after the driver has proven to be reliable. */
1453 consumer = readl(ioaddr + TxConsumerIdx);
1455 printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n",
1456 dev->name, consumer);
1458 while ((tx_status = le32_to_cpu(np->tx_done_q[np->tx_done].status)) != 0) {
1460 printk(KERN_DEBUG "%s: Tx completion #%d entry %d is %#8.8x.\n",
1461 dev->name, np->dirty_tx, np->tx_done, tx_status);
1462 if ((tx_status & 0xe0000000) == 0xa0000000) {
1463 np->stats.tx_packets++;
1464 } else if ((tx_status & 0xe0000000) == 0x80000000) {
1465 u16 entry = (tx_status & 0x7fff) / sizeof(starfire_tx_desc);
1466 struct sk_buff *skb = np->tx_info[entry].skb;
1467 np->tx_info[entry].skb = NULL;
1468 pci_unmap_single(np->pci_dev,
1469 np->tx_info[entry].mapping,
1470 skb_first_frag_len(skb),
1472 np->tx_info[entry].mapping = 0;
1473 np->dirty_tx += np->tx_info[entry].used_slots;
1474 entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
1477 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1478 pci_unmap_single(np->pci_dev,
1479 np->tx_info[entry].mapping,
1480 skb_shinfo(skb)->frags[i].size,
1487 dev_kfree_skb_irq(skb);
1489 np->tx_done_q[np->tx_done].status = 0;
1490 np->tx_done = (np->tx_done + 1) % DONE_Q_SIZE;
1492 writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2);
1494 if (netif_queue_stopped(dev) &&
1495 (np->cur_tx - np->dirty_tx + 4 < TX_RING_SIZE)) {
1496 /* The ring is no longer full, wake the queue. */
1497 netif_wake_queue(dev);
1500 /* Stats overflow */
1501 if (intr_status & IntrStatsMax)
1504 /* Media change interrupt. */
1505 if (intr_status & IntrLinkChange)
1506 netdev_media_change(dev);
1508 /* Abnormal error summary/uncommon events handlers. */
1509 if (intr_status & IntrAbnormalSummary)
1510 netdev_error(dev, intr_status);
1512 if (--boguscnt < 0) {
1514 printk(KERN_WARNING "%s: Too much work at interrupt, "
1516 dev->name, intr_status);
1522 printk(KERN_DEBUG "%s: exiting interrupt, status=%#8.8x.\n",
1523 dev->name, (int) readl(ioaddr + IntrStatus));
1524 return IRQ_RETVAL(handled);
1528 /* This routine is logically part of the interrupt/poll handler, but separated
1529 for clarity, code sharing between NAPI/non-NAPI, and better register allocation. */
1530 static int __netdev_rx(struct net_device *dev, int *quota)
1532 struct netdev_private *np = netdev_priv(dev);
1536 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1537 while ((desc_status = le32_to_cpu(np->rx_done_q[np->rx_done].status)) != 0) {
1538 struct sk_buff *skb;
1541 rx_done_desc *desc = &np->rx_done_q[np->rx_done];
1544 printk(KERN_DEBUG " netdev_rx() status of %d was %#8.8x.\n", np->rx_done, desc_status);
1545 if (!(desc_status & RxOK)) {
1546 /* There was an error. */
1548 printk(KERN_DEBUG " netdev_rx() Rx error was %#8.8x.\n", desc_status);
1549 np->stats.rx_errors++;
1550 if (desc_status & RxFIFOErr)
1551 np->stats.rx_fifo_errors++;
1555 if (*quota <= 0) { /* out of rx quota */
1561 pkt_len = desc_status; /* Implicitly Truncate */
1562 entry = (desc_status >> 16) & 0x7ff;
1565 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d, quota %d.\n", pkt_len, *quota);
1566 /* Check if the packet is long enough to accept without copying
1567 to a minimally-sized skbuff. */
1568 if (pkt_len < rx_copybreak
1569 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1571 skb_reserve(skb, 2); /* 16 byte align the IP header */
1572 pci_dma_sync_single_for_cpu(np->pci_dev,
1573 np->rx_info[entry].mapping,
1574 pkt_len, PCI_DMA_FROMDEVICE);
1575 eth_copy_and_sum(skb, np->rx_info[entry].skb->tail, pkt_len, 0);
1576 pci_dma_sync_single_for_device(np->pci_dev,
1577 np->rx_info[entry].mapping,
1578 pkt_len, PCI_DMA_FROMDEVICE);
1579 skb_put(skb, pkt_len);
1581 pci_unmap_single(np->pci_dev, np->rx_info[entry].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1582 skb = np->rx_info[entry].skb;
1583 skb_put(skb, pkt_len);
1584 np->rx_info[entry].skb = NULL;
1585 np->rx_info[entry].mapping = 0;
1587 #ifndef final_version /* Remove after testing. */
1588 /* You will want this info for the initial debug. */
1590 printk(KERN_DEBUG " Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
1591 "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x.\n",
1592 skb->data[0], skb->data[1], skb->data[2], skb->data[3],
1593 skb->data[4], skb->data[5], skb->data[6], skb->data[7],
1594 skb->data[8], skb->data[9], skb->data[10],
1595 skb->data[11], skb->data[12], skb->data[13]);
1598 skb->protocol = eth_type_trans(skb, dev);
1601 printk(KERN_DEBUG " netdev_rx() status2 of %d was %#4.4x.\n", np->rx_done, le16_to_cpu(desc->status2));
1603 if (le16_to_cpu(desc->status2) & 0x0100) {
1604 skb->ip_summed = CHECKSUM_UNNECESSARY;
1605 np->stats.rx_compressed++;
1608 * This feature doesn't seem to be working, at least
1609 * with the two firmware versions I have. If the GFP sees
1610 * an IP fragment, it either ignores it completely, or reports
1611 * "bad checksum" on it.
1613 * Maybe I missed something -- corrections are welcome.
1614 * Until then, the printk stays. :-) -Ion
1616 else if (le16_to_cpu(desc->status2) & 0x0040) {
1617 skb->ip_summed = CHECKSUM_HW;
1618 skb->csum = le16_to_cpu(desc->csum);
1619 printk(KERN_DEBUG "%s: checksum_hw, status2 = %#x\n", dev->name, le16_to_cpu(desc->status2));
1622 if (np->vlgrp && le16_to_cpu(desc->status2) & 0x0200) {
1624 printk(KERN_DEBUG " netdev_rx() vlanid = %d\n", le16_to_cpu(desc->vlanid));
1625 /* vlan_netdev_receive_skb() expects a packet with the VLAN tag stripped out */
1626 vlan_netdev_receive_skb(skb, np->vlgrp, le16_to_cpu(desc->vlanid) & VLAN_VID_MASK);
1628 #endif /* VLAN_SUPPORT */
1629 netdev_receive_skb(skb);
1630 dev->last_rx = jiffies;
1631 np->stats.rx_packets++;
1636 np->rx_done = (np->rx_done + 1) % DONE_Q_SIZE;
1638 writew(np->rx_done, np->base + CompletionQConsumerIdx);
1641 refill_rx_ring(dev);
1643 printk(KERN_DEBUG " exiting netdev_rx(): %d, status of %d was %#8.8x.\n",
1644 retcode, np->rx_done, desc_status);
1649 #ifdef HAVE_NETDEV_POLL
1650 static int netdev_poll(struct net_device *dev, int *budget)
1653 struct netdev_private *np = netdev_priv(dev);
1654 void __iomem *ioaddr = np->base;
1655 int retcode = 0, quota = dev->quota;
1658 writel(IntrRxDone | IntrRxEmpty, ioaddr + IntrClear);
1660 retcode = __netdev_rx(dev, "a);
1661 *budget -= (dev->quota - quota);
1666 intr_status = readl(ioaddr + IntrStatus);
1667 } while (intr_status & (IntrRxDone | IntrRxEmpty));
1669 netif_rx_complete(dev);
1670 intr_status = readl(ioaddr + IntrEnable);
1671 intr_status |= IntrRxDone | IntrRxEmpty;
1672 writel(intr_status, ioaddr + IntrEnable);
1676 printk(KERN_DEBUG " exiting netdev_poll(): %d.\n", retcode);
1678 /* Restart Rx engine if stopped. */
1681 #endif /* HAVE_NETDEV_POLL */
1684 static void refill_rx_ring(struct net_device *dev)
1686 struct netdev_private *np = netdev_priv(dev);
1687 struct sk_buff *skb;
1690 /* Refill the Rx ring buffers. */
1691 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1692 entry = np->dirty_rx % RX_RING_SIZE;
1693 if (np->rx_info[entry].skb == NULL) {
1694 skb = dev_alloc_skb(np->rx_buf_sz);
1695 np->rx_info[entry].skb = skb;
1697 break; /* Better luck next round. */
1698 np->rx_info[entry].mapping =
1699 pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1700 skb->dev = dev; /* Mark as being used by this device. */
1701 np->rx_ring[entry].rxaddr =
1702 cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
1704 if (entry == RX_RING_SIZE - 1)
1705 np->rx_ring[entry].rxaddr |= cpu_to_dma(RxDescEndRing);
1708 writew(entry, np->base + RxDescQIdx);
1712 static void netdev_media_change(struct net_device *dev)
1714 struct netdev_private *np = netdev_priv(dev);
1715 void __iomem *ioaddr = np->base;
1716 u16 reg0, reg1, reg4, reg5;
1718 u32 new_intr_timer_ctrl;
1720 /* reset status first */
1721 mdio_read(dev, np->phys[0], MII_BMCR);
1722 mdio_read(dev, np->phys[0], MII_BMSR);
1724 reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1725 reg1 = mdio_read(dev, np->phys[0], MII_BMSR);
1727 if (reg1 & BMSR_LSTATUS) {
1729 if (reg0 & BMCR_ANENABLE) {
1730 /* autonegotiation is enabled */
1731 reg4 = mdio_read(dev, np->phys[0], MII_ADVERTISE);
1732 reg5 = mdio_read(dev, np->phys[0], MII_LPA);
1733 if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) {
1735 np->mii_if.full_duplex = 1;
1736 } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) {
1738 np->mii_if.full_duplex = 0;
1739 } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) {
1741 np->mii_if.full_duplex = 1;
1744 np->mii_if.full_duplex = 0;
1747 /* autonegotiation is disabled */
1748 if (reg0 & BMCR_SPEED100)
1752 if (reg0 & BMCR_FULLDPLX)
1753 np->mii_if.full_duplex = 1;
1755 np->mii_if.full_duplex = 0;
1757 netif_carrier_on(dev);
1758 printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n",
1760 np->speed100 ? "100" : "10",
1761 np->mii_if.full_duplex ? "full" : "half");
1763 new_tx_mode = np->tx_mode & ~FullDuplex; /* duplex setting */
1764 if (np->mii_if.full_duplex)
1765 new_tx_mode |= FullDuplex;
1766 if (np->tx_mode != new_tx_mode) {
1767 np->tx_mode = new_tx_mode;
1768 writel(np->tx_mode | MiiSoftReset, ioaddr + TxMode);
1770 writel(np->tx_mode, ioaddr + TxMode);
1773 new_intr_timer_ctrl = np->intr_timer_ctrl & ~Timer10X;
1775 new_intr_timer_ctrl |= Timer10X;
1776 if (np->intr_timer_ctrl != new_intr_timer_ctrl) {
1777 np->intr_timer_ctrl = new_intr_timer_ctrl;
1778 writel(new_intr_timer_ctrl, ioaddr + IntrTimerCtrl);
1781 netif_carrier_off(dev);
1782 printk(KERN_DEBUG "%s: Link is down\n", dev->name);
1787 static void netdev_error(struct net_device *dev, int intr_status)
1789 struct netdev_private *np = netdev_priv(dev);
1791 /* Came close to underrunning the Tx FIFO, increase threshold. */
1792 if (intr_status & IntrTxDataLow) {
1793 if (np->tx_threshold <= PKT_BUF_SZ / 16) {
1794 writel(++np->tx_threshold, np->base + TxThreshold);
1795 printk(KERN_NOTICE "%s: PCI bus congestion, increasing Tx FIFO threshold to %d bytes\n",
1796 dev->name, np->tx_threshold * 16);
1798 printk(KERN_WARNING "%s: PCI Tx underflow -- adapter is probably malfunctioning\n", dev->name);
1800 if (intr_status & IntrRxGFPDead) {
1801 np->stats.rx_fifo_errors++;
1802 np->stats.rx_errors++;
1804 if (intr_status & (IntrNoTxCsum | IntrDMAErr)) {
1805 np->stats.tx_fifo_errors++;
1806 np->stats.tx_errors++;
1808 if ((intr_status & ~(IntrNormalMask | IntrAbnormalSummary | IntrLinkChange | IntrStatsMax | IntrTxDataLow | IntrRxGFPDead | IntrNoTxCsum | IntrPCIPad)) && debug)
1809 printk(KERN_ERR "%s: Something Wicked happened! %#8.8x.\n",
1810 dev->name, intr_status);
1814 static struct net_device_stats *get_stats(struct net_device *dev)
1816 struct netdev_private *np = netdev_priv(dev);
1817 void __iomem *ioaddr = np->base;
1819 /* This adapter architecture needs no SMP locks. */
1820 np->stats.tx_bytes = readl(ioaddr + 0x57010);
1821 np->stats.rx_bytes = readl(ioaddr + 0x57044);
1822 np->stats.tx_packets = readl(ioaddr + 0x57000);
1823 np->stats.tx_aborted_errors =
1824 readl(ioaddr + 0x57024) + readl(ioaddr + 0x57028);
1825 np->stats.tx_window_errors = readl(ioaddr + 0x57018);
1826 np->stats.collisions =
1827 readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008);
1829 /* The chip only need report frame silently dropped. */
1830 np->stats.rx_dropped += readw(ioaddr + RxDMAStatus);
1831 writew(0, ioaddr + RxDMAStatus);
1832 np->stats.rx_crc_errors = readl(ioaddr + 0x5703C);
1833 np->stats.rx_frame_errors = readl(ioaddr + 0x57040);
1834 np->stats.rx_length_errors = readl(ioaddr + 0x57058);
1835 np->stats.rx_missed_errors = readl(ioaddr + 0x5707C);
1841 static void set_rx_mode(struct net_device *dev)
1843 struct netdev_private *np = netdev_priv(dev);
1844 void __iomem *ioaddr = np->base;
1845 u32 rx_mode = MinVLANPrio;
1846 struct dev_mc_list *mclist;
1850 rx_mode |= VlanMode;
1853 void __iomem *filter_addr = ioaddr + HashTable + 8;
1854 for (i = 0; i < VLAN_VID_MASK; i++) {
1855 if (np->vlgrp->vlan_devices[i]) {
1856 if (vlan_count >= 32)
1858 writew(cpu_to_be16(i), filter_addr);
1863 if (i == VLAN_VID_MASK) {
1864 rx_mode |= PerfectFilterVlan;
1865 while (vlan_count < 32) {
1866 writew(0, filter_addr);
1872 #endif /* VLAN_SUPPORT */
1874 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1875 rx_mode |= AcceptAll;
1876 } else if ((dev->mc_count > multicast_filter_limit)
1877 || (dev->flags & IFF_ALLMULTI)) {
1878 /* Too many to match, or accept all multicasts. */
1879 rx_mode |= AcceptBroadcast|AcceptAllMulticast|PerfectFilter;
1880 } else if (dev->mc_count <= 14) {
1881 /* Use the 16 element perfect filter, skip first two entries. */
1882 void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1884 for (i = 2, mclist = dev->mc_list; mclist && i < dev->mc_count + 2;
1885 i++, mclist = mclist->next) {
1886 eaddrs = (u16 *)mclist->dmi_addr;
1887 writew(cpu_to_be16(eaddrs[2]), filter_addr); filter_addr += 4;
1888 writew(cpu_to_be16(eaddrs[1]), filter_addr); filter_addr += 4;
1889 writew(cpu_to_be16(eaddrs[0]), filter_addr); filter_addr += 8;
1891 eaddrs = (u16 *)dev->dev_addr;
1893 writew(cpu_to_be16(eaddrs[0]), filter_addr); filter_addr += 4;
1894 writew(cpu_to_be16(eaddrs[1]), filter_addr); filter_addr += 4;
1895 writew(cpu_to_be16(eaddrs[2]), filter_addr); filter_addr += 8;
1897 rx_mode |= AcceptBroadcast|PerfectFilter;
1899 /* Must use a multicast hash table. */
1900 void __iomem *filter_addr;
1902 u16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */
1904 memset(mc_filter, 0, sizeof(mc_filter));
1905 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1906 i++, mclist = mclist->next) {
1907 /* The chip uses the upper 9 CRC bits
1908 as index into the hash table */
1909 int bit_nr = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 23;
1910 __u32 *fptr = (__u32 *) &mc_filter[(bit_nr >> 4) & ~1];
1912 *fptr |= cpu_to_le32(1 << (bit_nr & 31));
1914 /* Clear the perfect filter list, skip first two entries. */
1915 filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1916 eaddrs = (u16 *)dev->dev_addr;
1917 for (i = 2; i < 16; i++) {
1918 writew(cpu_to_be16(eaddrs[0]), filter_addr); filter_addr += 4;
1919 writew(cpu_to_be16(eaddrs[1]), filter_addr); filter_addr += 4;
1920 writew(cpu_to_be16(eaddrs[2]), filter_addr); filter_addr += 8;
1922 for (filter_addr = ioaddr + HashTable, i = 0; i < 32; filter_addr+= 16, i++)
1923 writew(mc_filter[i], filter_addr);
1924 rx_mode |= AcceptBroadcast|PerfectFilter|HashFilter;
1926 writel(rx_mode, ioaddr + RxFilterMode);
1929 static int check_if_running(struct net_device *dev)
1931 if (!netif_running(dev))
1936 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1938 struct netdev_private *np = netdev_priv(dev);
1939 strcpy(info->driver, DRV_NAME);
1940 strcpy(info->version, DRV_VERSION);
1941 strcpy(info->bus_info, pci_name(np->pci_dev));
1944 static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1946 struct netdev_private *np = netdev_priv(dev);
1947 spin_lock_irq(&np->lock);
1948 mii_ethtool_gset(&np->mii_if, ecmd);
1949 spin_unlock_irq(&np->lock);
1953 static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1955 struct netdev_private *np = netdev_priv(dev);
1957 spin_lock_irq(&np->lock);
1958 res = mii_ethtool_sset(&np->mii_if, ecmd);
1959 spin_unlock_irq(&np->lock);
1964 static int nway_reset(struct net_device *dev)
1966 struct netdev_private *np = netdev_priv(dev);
1967 return mii_nway_restart(&np->mii_if);
1970 static u32 get_link(struct net_device *dev)
1972 struct netdev_private *np = netdev_priv(dev);
1973 return mii_link_ok(&np->mii_if);
1976 static u32 get_msglevel(struct net_device *dev)
1981 static void set_msglevel(struct net_device *dev, u32 val)
1986 static struct ethtool_ops ethtool_ops = {
1987 .begin = check_if_running,
1988 .get_drvinfo = get_drvinfo,
1989 .get_settings = get_settings,
1990 .set_settings = set_settings,
1991 .nway_reset = nway_reset,
1992 .get_link = get_link,
1993 .get_msglevel = get_msglevel,
1994 .set_msglevel = set_msglevel,
1997 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1999 struct netdev_private *np = netdev_priv(dev);
2000 struct mii_ioctl_data *data = if_mii(rq);
2003 if (!netif_running(dev))
2006 spin_lock_irq(&np->lock);
2007 rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
2008 spin_unlock_irq(&np->lock);
2010 if ((cmd == SIOCSMIIREG) && (data->phy_id == np->phys[0]))
2016 static int netdev_close(struct net_device *dev)
2018 struct netdev_private *np = netdev_priv(dev);
2019 void __iomem *ioaddr = np->base;
2022 netif_stop_queue(dev);
2025 printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %#8.8x.\n",
2026 dev->name, (int) readl(ioaddr + IntrStatus));
2027 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
2028 dev->name, np->cur_tx, np->dirty_tx,
2029 np->cur_rx, np->dirty_rx);
2032 /* Disable interrupts by clearing the interrupt mask. */
2033 writel(0, ioaddr + IntrEnable);
2035 /* Stop the chip's Tx and Rx processes. */
2036 writel(0, ioaddr + GenCtrl);
2037 readl(ioaddr + GenCtrl);
2040 printk(KERN_DEBUG" Tx ring at %#llx:\n",
2041 (long long) np->tx_ring_dma);
2042 for (i = 0; i < 8 /* TX_RING_SIZE is huge! */; i++)
2043 printk(KERN_DEBUG " #%d desc. %#8.8x %#llx -> %#8.8x.\n",
2044 i, le32_to_cpu(np->tx_ring[i].status),
2045 (long long) dma_to_cpu(np->tx_ring[i].addr),
2046 le32_to_cpu(np->tx_done_q[i].status));
2047 printk(KERN_DEBUG " Rx ring at %#llx -> %p:\n",
2048 (long long) np->rx_ring_dma, np->rx_done_q);
2050 for (i = 0; i < 8 /* RX_RING_SIZE */; i++) {
2051 printk(KERN_DEBUG " #%d desc. %#llx -> %#8.8x\n",
2052 i, (long long) dma_to_cpu(np->rx_ring[i].rxaddr), le32_to_cpu(np->rx_done_q[i].status));
2056 free_irq(dev->irq, dev);
2058 /* Free all the skbuffs in the Rx queue. */
2059 for (i = 0; i < RX_RING_SIZE; i++) {
2060 np->rx_ring[i].rxaddr = cpu_to_dma(0xBADF00D0); /* An invalid address. */
2061 if (np->rx_info[i].skb != NULL) {
2062 pci_unmap_single(np->pci_dev, np->rx_info[i].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
2063 dev_kfree_skb(np->rx_info[i].skb);
2065 np->rx_info[i].skb = NULL;
2066 np->rx_info[i].mapping = 0;
2068 for (i = 0; i < TX_RING_SIZE; i++) {
2069 struct sk_buff *skb = np->tx_info[i].skb;
2072 pci_unmap_single(np->pci_dev,
2073 np->tx_info[i].mapping,
2074 skb_first_frag_len(skb), PCI_DMA_TODEVICE);
2075 np->tx_info[i].mapping = 0;
2077 np->tx_info[i].skb = NULL;
2084 static void __devexit starfire_remove_one (struct pci_dev *pdev)
2086 struct net_device *dev = pci_get_drvdata(pdev);
2087 struct netdev_private *np = netdev_priv(dev);
2092 unregister_netdev(dev);
2095 pci_free_consistent(pdev, np->queue_mem_size, np->queue_mem, np->queue_mem_dma);
2098 /* XXX: add wakeup code -- requires firmware for MagicPacket */
2099 pci_set_power_state(pdev, PCI_D3hot); /* go to sleep in D3 mode */
2100 pci_disable_device(pdev);
2103 pci_release_regions(pdev);
2105 pci_set_drvdata(pdev, NULL);
2106 free_netdev(dev); /* Will also free np!! */
2110 static struct pci_driver starfire_driver = {
2112 .probe = starfire_init_one,
2113 .remove = __devexit_p(starfire_remove_one),
2114 .id_table = starfire_pci_tbl,
2118 static int __init starfire_init (void)
2120 /* when a module, this is printed whether or not devices are found in probe */
2123 #ifdef HAVE_NETDEV_POLL
2124 printk(KERN_INFO DRV_NAME ": polling (NAPI) enabled\n");
2126 printk(KERN_INFO DRV_NAME ": polling (NAPI) disabled\n");
2131 /* we can do this test only at run-time... sigh */
2132 if (sizeof(dma_addr_t) == sizeof(u64)) {
2133 printk("This driver has not been ported to this 64-bit architecture yet\n");
2136 #endif /* not ADDR_64BITS */
2137 return pci_module_init (&starfire_driver);
2141 static void __exit starfire_cleanup (void)
2143 pci_unregister_driver (&starfire_driver);
2147 module_init(starfire_init);
2148 module_exit(starfire_cleanup);