1 /* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
3 Written 1999-2000 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
17 Support and updates available at
18 http://www.scyld.com/network/sundance.html
21 Version LK1.01a (jgarzik):
22 - Replace some MII-related magic numbers with constants
24 Version LK1.02 (D-Link):
25 - Add new board to PCI ID list
28 Version LK1.03 (D-Link):
29 - New Rx scheme, reduce Rx congestion
30 - Option to disable flow control
32 Version LK1.04 (D-Link):
34 - More support for ethtool.
37 - Remove unused/constant members from struct pci_id_info
38 (which then allows removal of 'drv_flags' from private struct)
40 - If no phy is found, fail to load that board (jgarzik)
41 - Always start phy id scan at id 1 to avoid problems (Donald Becker)
42 - Autodetect where mii_preable_required is needed,
43 default to not needed. (Donald Becker)
46 - Remove mii_preamble_required module parameter (Donald Becker)
47 - Add per-interface mii_preamble_required (setting is autodetected)
49 - Remove unnecessary cast from void pointer (jgarzik)
50 - Re-align comments in private struct (jgarzik)
52 Version LK1.04c (jgarzik):
53 - Support bitmapped message levels (NETIF_MSG_xxx), and the
54 two ethtool ioctls that get/set them
55 - Don't hand-code MII ethtool support, use standard API/lib
58 - Merge from Donald Becker's sundance.c: (Jason Lunz)
59 * proper support for variably-sized MTUs
60 * default to PIO, to fix chip bugs
61 - Add missing unregister_netdev (Jason Lunz)
62 - Add CONFIG_SUNDANCE_MMIO config option (jgarzik)
63 - Better rx buf size calculation (Donald Becker)
65 Version LK1.05 (D-Link):
66 - Fix DFE-580TX packet drop issue (for DL10050C)
69 Version LK1.06 (D-Link):
70 - Fix crash while unloading driver
72 Versin LK1.06b (D-Link):
73 - New tx scheme, adaptive tx_coalesce
75 Version LK1.07 (D-Link):
76 - Fix tx bugs in big-endian machines
77 - Remove unused max_interrupt_work module parameter, the new
78 NAPI-like rx scheme doesn't need it.
79 - Remove redundancy get_stats() in intr_handler(), those
80 I/O access could affect performance in ARM-based system
81 - Add Linux software VLAN support
83 Version LK1.08 (D-Link):
84 - Fix bug of custom mac address
85 (StationAddr register only accept word write)
87 Version LK1.09 (D-Link):
88 - Fix the flowctrl bug.
89 - Set Pause bit in MII ANAR if flow control enabled.
91 Version LK1.09a (ICPlus):
92 - Add the delay time in reading the contents of EEPROM
96 #define DRV_NAME "sundance"
97 #define DRV_VERSION "1.01+LK1.09a"
98 #define DRV_RELDATE "10-Jul-2003"
101 /* The user-configurable values.
102 These may be modified when a driver module is loaded.*/
103 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
104 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
105 Typical is a 64 element hash table based on the Ethernet CRC. */
106 static int multicast_filter_limit = 32;
108 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
109 Setting to > 1518 effectively disables this feature.
110 This chip can receive into offset buffers, so the Alpha does not
111 need a copy-align. */
112 static int rx_copybreak;
113 static int flowctrl=1;
115 /* media[] specifies the media type the NIC operates at.
116 autosense Autosensing active media.
117 10mbps_hd 10Mbps half duplex.
118 10mbps_fd 10Mbps full duplex.
119 100mbps_hd 100Mbps half duplex.
120 100mbps_fd 100Mbps full duplex.
121 0 Autosensing active media.
122 1 10Mbps half duplex.
123 2 10Mbps full duplex.
124 3 100Mbps half duplex.
125 4 100Mbps full duplex.
128 static char *media[MAX_UNITS];
131 /* Operational parameters that are set at compile time. */
133 /* Keep the ring sizes a power of two for compile efficiency.
134 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
135 Making the Tx ring too large decreases the effectiveness of channel
136 bonding and packet priority, and more than 128 requires modifying the
138 Large receive rings merely waste memory. */
139 #define TX_RING_SIZE 32
140 #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
141 #define RX_RING_SIZE 64
143 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
144 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
146 /* Operational parameters that usually are not changed. */
147 /* Time in jiffies before concluding the transmitter is hung. */
148 #define TX_TIMEOUT (4*HZ)
149 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
151 /* Include files, designed to support most kernel versions 2.0.0 and later. */
152 #include <linux/module.h>
153 #include <linux/kernel.h>
154 #include <linux/string.h>
155 #include <linux/timer.h>
156 #include <linux/errno.h>
157 #include <linux/ioport.h>
158 #include <linux/slab.h>
159 #include <linux/interrupt.h>
160 #include <linux/pci.h>
161 #include <linux/netdevice.h>
162 #include <linux/etherdevice.h>
163 #include <linux/skbuff.h>
164 #include <linux/init.h>
165 #include <linux/bitops.h>
166 #include <asm/uaccess.h>
167 #include <asm/processor.h> /* Processor type for cache alignment. */
169 #include <linux/delay.h>
170 #include <linux/spinlock.h>
171 #ifndef _COMPAT_WITH_OLD_KERNEL
172 #include <linux/crc32.h>
173 #include <linux/ethtool.h>
174 #include <linux/mii.h>
182 /* These identify the driver base version and may not be removed. */
183 static char version[] __devinitdata =
184 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n"
185 KERN_INFO " http://www.scyld.com/network/sundance.html\n";
187 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
188 MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
189 MODULE_LICENSE("GPL");
191 module_param(debug, int, 0);
192 module_param(rx_copybreak, int, 0);
193 module_param_array(media, charp, NULL, 0);
194 module_param(flowctrl, int, 0);
195 MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
196 MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
197 MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
202 I. Board Compatibility
204 This driver is designed for the Sundance Technologies "Alta" ST201 chip.
206 II. Board-specific settings
208 III. Driver operation
212 This driver uses two statically allocated fixed-size descriptor lists
213 formed into rings by a branch from the final descriptor to the beginning of
214 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
215 Some chips explicitly use only 2^N sized rings, while others use a
216 'next descriptor' pointer that the driver forms into rings.
218 IIIb/c. Transmit/Receive Structure
220 This driver uses a zero-copy receive and transmit scheme.
221 The driver allocates full frame size skbuffs for the Rx ring buffers at
222 open() time and passes the skb->data field to the chip as receive data
223 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
224 a fresh skbuff is allocated and the frame is copied to the new skbuff.
225 When the incoming frame is larger, the skbuff is passed directly up the
226 protocol stack. Buffers consumed this way are replaced by newly allocated
227 skbuffs in a later phase of receives.
229 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
230 using a full-sized skbuff for small frames vs. the copying costs of larger
231 frames. New boards are typically used in generously configured machines
232 and the underfilled buffers have negligible impact compared to the benefit of
233 a single allocation size, so the default value of zero results in never
234 copying packets. When copying is done, the cost is usually mitigated by using
235 a combined copy/checksum routine. Copying also preloads the cache, which is
236 most useful with small frames.
238 A subtle aspect of the operation is that the IP header at offset 14 in an
239 ethernet frame isn't longword aligned for further processing.
240 Unaligned buffers are permitted by the Sundance hardware, so
241 frames are received into the skbuff at an offset of "+2", 16-byte aligning
244 IIId. Synchronization
246 The driver runs as two independent, single-threaded flows of control. One
247 is the send-packet routine, which enforces single-threaded use by the
248 dev->tbusy flag. The other thread is the interrupt handler, which is single
249 threaded by the hardware and interrupt handling software.
251 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
252 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
253 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
254 the 'lp->tx_full' flag.
256 The interrupt handler has exclusive control over the Rx ring and records stats
257 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
258 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
259 clears both the tx_full and tbusy flags.
265 The Sundance ST201 datasheet, preliminary version.
266 http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
267 http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
273 /* Work-around for Kendin chip bugs. */
274 #ifndef CONFIG_SUNDANCE_MMIO
278 static struct pci_device_id sundance_pci_tbl[] = {
279 {0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0},
280 {0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1},
281 {0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2},
282 {0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3},
283 {0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
284 {0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
287 MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
296 static struct pci_id_info pci_id_tbl[] = {
297 {"D-Link DFE-550TX FAST Ethernet Adapter"},
298 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
299 {"D-Link DFE-580TX 4 port Server Adapter"},
300 {"D-Link DFE-530TXS FAST Ethernet Adapter"},
301 {"D-Link DL10050-based FAST Ethernet Adapter"},
302 {"Sundance Technology Alta"},
303 {NULL,}, /* 0 terminated list. */
306 /* This driver was written to use PCI memory space, however x86-oriented
307 hardware often uses I/O space accesses. */
309 /* Offsets to the device registers.
310 Unlike software-only systems, device drivers interact with complex hardware.
311 It's not useful to define symbolic names for every register bit in the
312 device. The name can only partially document the semantics and make
313 the driver longer and more difficult to read.
314 In general, only the important configuration values or bits changed
315 multiple times should be defined symbolically.
320 TxDMABurstThresh = 0x08,
321 TxDMAUrgentThresh = 0x09,
322 TxDMAPollPeriod = 0x0a,
327 RxDMABurstThresh = 0x14,
328 RxDMAUrgentThresh = 0x15,
329 RxDMAPollPeriod = 0x16,
334 TxStartThresh = 0x3c,
335 RxEarlyThresh = 0x3e,
350 MulticastFilter0 = 0x60,
351 MulticastFilter1 = 0x64,
358 StatsCarrierError = 0x74,
359 StatsLateColl = 0x75,
360 StatsMultiColl = 0x76,
364 StatsTxXSDefer = 0x7a,
370 /* Aliased and bogus values! */
373 enum ASICCtrl_HiWord_bit {
374 GlobalReset = 0x0001,
379 NetworkReset = 0x0020,
384 /* Bits in the interrupt status/mask registers. */
385 enum intr_status_bits {
386 IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
387 IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
389 StatsMax=0x0080, LinkChange=0x0100,
390 IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
393 /* Bits in the RxMode register. */
395 AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
396 AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
398 /* Bits in MACCtrl. */
399 enum mac_ctrl0_bits {
400 EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
401 EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
403 enum mac_ctrl1_bits {
404 StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
405 TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
406 RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
409 /* The Rx and Tx buffer descriptors. */
410 /* Note that using only 32 bit fields simplifies conversion to big-endian
415 struct desc_frag { u32 addr, length; } frag[1];
418 /* Bits in netdev_desc.status */
419 enum desc_status_bits {
421 DescEndPacket=0x4000,
425 DescIntrOnDMADone=0x80000000,
426 DisableAlign = 0x00000001,
429 #define PRIV_ALIGN 15 /* Required alignment mask */
430 /* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
431 within the structure. */
433 struct netdev_private {
434 /* Descriptor rings first for alignment. */
435 struct netdev_desc *rx_ring;
436 struct netdev_desc *tx_ring;
437 struct sk_buff* rx_skbuff[RX_RING_SIZE];
438 struct sk_buff* tx_skbuff[TX_RING_SIZE];
439 dma_addr_t tx_ring_dma;
440 dma_addr_t rx_ring_dma;
441 struct net_device_stats stats;
442 struct timer_list timer; /* Media monitoring timer. */
443 /* Frequently used values: keep some adjacent for cache effect. */
445 spinlock_t rx_lock; /* Group with Tx control cache line. */
448 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
449 unsigned int rx_buf_sz; /* Based on MTU+slack. */
450 struct netdev_desc *last_tx; /* Last Tx descriptor used. */
451 unsigned int cur_tx, dirty_tx;
452 /* These values are keep track of the transceiver/media in use. */
453 unsigned int flowctrl:1;
454 unsigned int default_port:4; /* Last dev->if_port value. */
455 unsigned int an_enable:1;
457 struct tasklet_struct rx_tasklet;
458 struct tasklet_struct tx_tasklet;
461 /* Multicast and receive mode. */
462 spinlock_t mcastlock; /* SMP lock multicast updates. */
464 /* MII transceiver section. */
465 struct mii_if_info mii_if;
466 int mii_preamble_required;
467 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
468 struct pci_dev *pci_dev;
470 unsigned char pci_rev_id;
473 /* The station address location in the EEPROM. */
474 #define EEPROM_SA_OFFSET 0x10
475 #define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
476 IntrDrvRqst | IntrTxDone | StatsMax | \
479 static int change_mtu(struct net_device *dev, int new_mtu);
480 static int eeprom_read(void __iomem *ioaddr, int location);
481 static int mdio_read(struct net_device *dev, int phy_id, int location);
482 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
483 static int netdev_open(struct net_device *dev);
484 static void check_duplex(struct net_device *dev);
485 static void netdev_timer(unsigned long data);
486 static void tx_timeout(struct net_device *dev);
487 static void init_ring(struct net_device *dev);
488 static int start_tx(struct sk_buff *skb, struct net_device *dev);
489 static int reset_tx (struct net_device *dev);
490 static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
491 static void rx_poll(unsigned long data);
492 static void tx_poll(unsigned long data);
493 static void refill_rx (struct net_device *dev);
494 static void netdev_error(struct net_device *dev, int intr_status);
495 static void netdev_error(struct net_device *dev, int intr_status);
496 static void set_rx_mode(struct net_device *dev);
497 static int __set_mac_addr(struct net_device *dev);
498 static struct net_device_stats *get_stats(struct net_device *dev);
499 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
500 static int netdev_close(struct net_device *dev);
501 static struct ethtool_ops ethtool_ops;
503 static int __devinit sundance_probe1 (struct pci_dev *pdev,
504 const struct pci_device_id *ent)
506 struct net_device *dev;
507 struct netdev_private *np;
509 int chip_idx = ent->driver_data;
512 void __iomem *ioaddr;
523 /* when built into the kernel, we only print version if device is found */
525 static int printed_version;
526 if (!printed_version++)
530 if (pci_enable_device(pdev))
532 pci_set_master(pdev);
536 dev = alloc_etherdev(sizeof(*np));
539 SET_MODULE_OWNER(dev);
540 SET_NETDEV_DEV(dev, &pdev->dev);
542 if (pci_request_regions(pdev, DRV_NAME))
545 ioaddr = pci_iomap(pdev, bar, netdev_io_size);
549 for (i = 0; i < 3; i++)
550 ((u16 *)dev->dev_addr)[i] =
551 le16_to_cpu(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
552 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
554 dev->base_addr = (unsigned long)ioaddr;
557 np = netdev_priv(dev);
560 np->chip_id = chip_idx;
561 np->msg_enable = (1 << debug) - 1;
562 spin_lock_init(&np->lock);
563 tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
564 tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
566 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
568 goto err_out_cleardev;
569 np->tx_ring = (struct netdev_desc *)ring_space;
570 np->tx_ring_dma = ring_dma;
572 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
574 goto err_out_unmap_tx;
575 np->rx_ring = (struct netdev_desc *)ring_space;
576 np->rx_ring_dma = ring_dma;
578 np->mii_if.dev = dev;
579 np->mii_if.mdio_read = mdio_read;
580 np->mii_if.mdio_write = mdio_write;
581 np->mii_if.phy_id_mask = 0x1f;
582 np->mii_if.reg_num_mask = 0x1f;
584 /* The chip-specific entries in the device structure. */
585 dev->open = &netdev_open;
586 dev->hard_start_xmit = &start_tx;
587 dev->stop = &netdev_close;
588 dev->get_stats = &get_stats;
589 dev->set_multicast_list = &set_rx_mode;
590 dev->do_ioctl = &netdev_ioctl;
591 SET_ETHTOOL_OPS(dev, ðtool_ops);
592 dev->tx_timeout = &tx_timeout;
593 dev->watchdog_timeo = TX_TIMEOUT;
594 dev->change_mtu = &change_mtu;
595 pci_set_drvdata(pdev, dev);
597 pci_read_config_byte(pdev, PCI_REVISION_ID, &np->pci_rev_id);
599 i = register_netdev(dev);
601 goto err_out_unmap_rx;
603 printk(KERN_INFO "%s: %s at %p, ",
604 dev->name, pci_id_tbl[chip_idx].name, ioaddr);
605 for (i = 0; i < 5; i++)
606 printk("%2.2x:", dev->dev_addr[i]);
607 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
610 int phy, phy_idx = 0;
611 np->phys[0] = 1; /* Default setting */
612 np->mii_preamble_required++;
613 for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) {
614 int mii_status = mdio_read(dev, phy, MII_BMSR);
615 if (mii_status != 0xffff && mii_status != 0x0000) {
616 np->phys[phy_idx++] = phy;
617 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
618 if ((mii_status & 0x0040) == 0)
619 np->mii_preamble_required++;
620 printk(KERN_INFO "%s: MII PHY found at address %d, status "
621 "0x%4.4x advertising %4.4x.\n",
622 dev->name, phy, mii_status, np->mii_if.advertising);
625 np->mii_preamble_required--;
628 printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
629 dev->name, ioread32(ioaddr + ASICCtrl));
630 goto err_out_unregister;
633 np->mii_if.phy_id = np->phys[0];
636 /* Parse override configuration */
638 if (card_idx < MAX_UNITS) {
639 if (media[card_idx] != NULL) {
641 if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
642 strcmp (media[card_idx], "4") == 0) {
644 np->mii_if.full_duplex = 1;
645 } else if (strcmp (media[card_idx], "100mbps_hd") == 0
646 || strcmp (media[card_idx], "3") == 0) {
648 np->mii_if.full_duplex = 0;
649 } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
650 strcmp (media[card_idx], "2") == 0) {
652 np->mii_if.full_duplex = 1;
653 } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
654 strcmp (media[card_idx], "1") == 0) {
656 np->mii_if.full_duplex = 0;
666 if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
667 /* Default 100Mbps Full */
670 np->mii_if.full_duplex = 1;
675 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
677 /* If flow control enabled, we need to advertise it.*/
679 mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
680 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
681 /* Force media type */
682 if (!np->an_enable) {
684 mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
685 mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
686 mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
687 printk (KERN_INFO "Override speed=%d, %s duplex\n",
688 np->speed, np->mii_if.full_duplex ? "Full" : "Half");
692 /* Perhaps move the reset here? */
693 /* Reset the chip to erase previous misconfiguration. */
694 if (netif_msg_hw(np))
695 printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
696 iowrite16(0x007f, ioaddr + ASICCtrl + 2);
697 if (netif_msg_hw(np))
698 printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
704 unregister_netdev(dev);
706 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
708 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
710 pci_set_drvdata(pdev, NULL);
711 pci_iounmap(pdev, ioaddr);
713 pci_release_regions(pdev);
719 static int change_mtu(struct net_device *dev, int new_mtu)
721 if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
723 if (netif_running(dev))
729 #define eeprom_delay(ee_addr) ioread32(ee_addr)
730 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
731 static int __devinit eeprom_read(void __iomem *ioaddr, int location)
733 int boguscnt = 10000; /* Typical 1900 ticks. */
734 iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
736 eeprom_delay(ioaddr + EECtrl);
737 if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
738 return ioread16(ioaddr + EEData);
740 } while (--boguscnt > 0);
744 /* MII transceiver control section.
745 Read and write the MII registers using software-generated serial
746 MDIO protocol. See the MII specifications or DP83840A data sheet
749 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
750 met by back-to-back 33Mhz PCI cycles. */
751 #define mdio_delay() ioread8(mdio_addr)
754 MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
756 #define MDIO_EnbIn (0)
757 #define MDIO_WRITE0 (MDIO_EnbOutput)
758 #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
760 /* Generate the preamble required for initial synchronization and
761 a few older transceivers. */
762 static void mdio_sync(void __iomem *mdio_addr)
766 /* Establish sync by sending at least 32 logic ones. */
767 while (--bits >= 0) {
768 iowrite8(MDIO_WRITE1, mdio_addr);
770 iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
775 static int mdio_read(struct net_device *dev, int phy_id, int location)
777 struct netdev_private *np = netdev_priv(dev);
778 void __iomem *mdio_addr = np->base + MIICtrl;
779 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
782 if (np->mii_preamble_required)
783 mdio_sync(mdio_addr);
785 /* Shift the read command bits out. */
786 for (i = 15; i >= 0; i--) {
787 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
789 iowrite8(dataval, mdio_addr);
791 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
794 /* Read the two transition, 16 data, and wire-idle bits. */
795 for (i = 19; i > 0; i--) {
796 iowrite8(MDIO_EnbIn, mdio_addr);
798 retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
799 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
802 return (retval>>1) & 0xffff;
805 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
807 struct netdev_private *np = netdev_priv(dev);
808 void __iomem *mdio_addr = np->base + MIICtrl;
809 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
812 if (np->mii_preamble_required)
813 mdio_sync(mdio_addr);
815 /* Shift the command bits out. */
816 for (i = 31; i >= 0; i--) {
817 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
819 iowrite8(dataval, mdio_addr);
821 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
824 /* Clear out extra bits. */
825 for (i = 2; i > 0; i--) {
826 iowrite8(MDIO_EnbIn, mdio_addr);
828 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
834 static int netdev_open(struct net_device *dev)
836 struct netdev_private *np = netdev_priv(dev);
837 void __iomem *ioaddr = np->base;
840 /* Do we need to reset the chip??? */
842 i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
846 if (netif_msg_ifup(np))
847 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
848 dev->name, dev->irq);
851 iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
852 /* The Tx list pointer is written as packets are queued. */
854 /* Initialize other registers. */
856 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
857 iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
859 iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
862 iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
864 /* Configure the PCI bus bursts and FIFO thresholds. */
866 if (dev->if_port == 0)
867 dev->if_port = np->default_port;
869 spin_lock_init(&np->mcastlock);
872 iowrite16(0, ioaddr + IntrEnable);
873 iowrite16(0, ioaddr + DownCounter);
874 /* Set the chip to poll every N*320nsec. */
875 iowrite8(100, ioaddr + RxDMAPollPeriod);
876 iowrite8(127, ioaddr + TxDMAPollPeriod);
877 /* Fix DFE-580TX packet drop issue */
878 if (np->pci_rev_id >= 0x14)
879 iowrite8(0x01, ioaddr + DebugCtrl1);
880 netif_start_queue(dev);
882 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
884 if (netif_msg_ifup(np))
885 printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
886 "MAC Control %x, %4.4x %4.4x.\n",
887 dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
888 ioread32(ioaddr + MACCtrl0),
889 ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
891 /* Set the timer to check for link beat. */
892 init_timer(&np->timer);
893 np->timer.expires = jiffies + 3*HZ;
894 np->timer.data = (unsigned long)dev;
895 np->timer.function = &netdev_timer; /* timer handler */
896 add_timer(&np->timer);
898 /* Enable interrupts by setting the interrupt mask. */
899 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
904 static void check_duplex(struct net_device *dev)
906 struct netdev_private *np = netdev_priv(dev);
907 void __iomem *ioaddr = np->base;
908 int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
909 int negotiated = mii_lpa & np->mii_if.advertising;
913 if (!np->an_enable || mii_lpa == 0xffff) {
914 if (np->mii_if.full_duplex)
915 iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
920 /* Autonegotiation */
921 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
922 if (np->mii_if.full_duplex != duplex) {
923 np->mii_if.full_duplex = duplex;
924 if (netif_msg_link(np))
925 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
926 "negotiated capability %4.4x.\n", dev->name,
927 duplex ? "full" : "half", np->phys[0], negotiated);
928 iowrite16(ioread16(ioaddr + MACCtrl0) | duplex ? 0x20 : 0, ioaddr + MACCtrl0);
932 static void netdev_timer(unsigned long data)
934 struct net_device *dev = (struct net_device *)data;
935 struct netdev_private *np = netdev_priv(dev);
936 void __iomem *ioaddr = np->base;
937 int next_tick = 10*HZ;
939 if (netif_msg_timer(np)) {
940 printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
942 dev->name, ioread16(ioaddr + IntrEnable),
943 ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
946 np->timer.expires = jiffies + next_tick;
947 add_timer(&np->timer);
950 static void tx_timeout(struct net_device *dev)
952 struct netdev_private *np = netdev_priv(dev);
953 void __iomem *ioaddr = np->base;
956 netif_stop_queue(dev);
957 tasklet_disable(&np->tx_tasklet);
958 iowrite16(0, ioaddr + IntrEnable);
959 printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
961 " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
962 ioread8(ioaddr + TxFrameId));
966 for (i=0; i<TX_RING_SIZE; i++) {
967 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
968 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
969 le32_to_cpu(np->tx_ring[i].next_desc),
970 le32_to_cpu(np->tx_ring[i].status),
971 (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
972 le32_to_cpu(np->tx_ring[i].frag[0].addr),
973 le32_to_cpu(np->tx_ring[i].frag[0].length));
975 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
976 ioread32(np->base + TxListPtr),
977 netif_queue_stopped(dev));
978 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
979 np->cur_tx, np->cur_tx % TX_RING_SIZE,
980 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
981 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
982 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
984 spin_lock_irqsave(&np->lock, flag);
986 /* Stop and restart the chip's Tx processes . */
988 spin_unlock_irqrestore(&np->lock, flag);
992 dev->trans_start = jiffies;
993 np->stats.tx_errors++;
994 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
995 netif_wake_queue(dev);
997 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
998 tasklet_enable(&np->tx_tasklet);
1002 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1003 static void init_ring(struct net_device *dev)
1005 struct netdev_private *np = netdev_priv(dev);
1008 np->cur_rx = np->cur_tx = 0;
1009 np->dirty_rx = np->dirty_tx = 0;
1012 np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
1014 /* Initialize all Rx descriptors. */
1015 for (i = 0; i < RX_RING_SIZE; i++) {
1016 np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1017 ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1018 np->rx_ring[i].status = 0;
1019 np->rx_ring[i].frag[0].length = 0;
1020 np->rx_skbuff[i] = NULL;
1023 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1024 for (i = 0; i < RX_RING_SIZE; i++) {
1025 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
1026 np->rx_skbuff[i] = skb;
1029 skb->dev = dev; /* Mark as being used by this device. */
1030 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1031 np->rx_ring[i].frag[0].addr = cpu_to_le32(
1032 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz,
1033 PCI_DMA_FROMDEVICE));
1034 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1036 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1038 for (i = 0; i < TX_RING_SIZE; i++) {
1039 np->tx_skbuff[i] = NULL;
1040 np->tx_ring[i].status = 0;
1045 static void tx_poll (unsigned long data)
1047 struct net_device *dev = (struct net_device *)data;
1048 struct netdev_private *np = netdev_priv(dev);
1049 unsigned head = np->cur_task % TX_RING_SIZE;
1050 struct netdev_desc *txdesc =
1051 &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1053 /* Chain the next pointer */
1054 for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1055 int entry = np->cur_task % TX_RING_SIZE;
1056 txdesc = &np->tx_ring[entry];
1058 np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1059 entry*sizeof(struct netdev_desc));
1061 np->last_tx = txdesc;
1063 /* Indicate the latest descriptor of tx ring */
1064 txdesc->status |= cpu_to_le32(DescIntrOnTx);
1066 if (ioread32 (np->base + TxListPtr) == 0)
1067 iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1068 np->base + TxListPtr);
1073 start_tx (struct sk_buff *skb, struct net_device *dev)
1075 struct netdev_private *np = netdev_priv(dev);
1076 struct netdev_desc *txdesc;
1079 /* Calculate the next Tx descriptor entry. */
1080 entry = np->cur_tx % TX_RING_SIZE;
1081 np->tx_skbuff[entry] = skb;
1082 txdesc = &np->tx_ring[entry];
1084 txdesc->next_desc = 0;
1085 txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1086 txdesc->frag[0].addr = cpu_to_le32 (pci_map_single (np->pci_dev, skb->data,
1089 txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1091 /* Increment cur_tx before tasklet_schedule() */
1094 /* Schedule a tx_poll() task */
1095 tasklet_schedule(&np->tx_tasklet);
1097 /* On some architectures: explicitly flush cache lines here. */
1098 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1
1099 && !netif_queue_stopped(dev)) {
1102 netif_stop_queue (dev);
1104 dev->trans_start = jiffies;
1105 if (netif_msg_tx_queued(np)) {
1107 "%s: Transmit frame #%d queued in slot %d.\n",
1108 dev->name, np->cur_tx, entry);
1113 /* Reset hardware tx and free all of tx buffers */
1115 reset_tx (struct net_device *dev)
1117 struct netdev_private *np = netdev_priv(dev);
1118 void __iomem *ioaddr = np->base;
1119 struct sk_buff *skb;
1121 int irq = in_interrupt();
1123 /* Reset tx logic, TxListPtr will be cleaned */
1124 iowrite16 (TxDisable, ioaddr + MACCtrl1);
1125 iowrite16 (TxReset | DMAReset | FIFOReset | NetworkReset,
1126 ioaddr + ASICCtrl + 2);
1127 for (i=50; i > 0; i--) {
1128 if ((ioread16(ioaddr + ASICCtrl + 2) & ResetBusy) == 0)
1132 /* free all tx skbuff */
1133 for (i = 0; i < TX_RING_SIZE; i++) {
1134 skb = np->tx_skbuff[i];
1136 pci_unmap_single(np->pci_dev,
1137 np->tx_ring[i].frag[0].addr, skb->len,
1140 dev_kfree_skb_irq (skb);
1142 dev_kfree_skb (skb);
1143 np->tx_skbuff[i] = NULL;
1144 np->stats.tx_dropped++;
1147 np->cur_tx = np->dirty_tx = 0;
1149 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1153 /* The interrupt handler cleans up after the Tx thread,
1154 and schedule a Rx thread work */
1155 static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
1157 struct net_device *dev = (struct net_device *)dev_instance;
1158 struct netdev_private *np = netdev_priv(dev);
1159 void __iomem *ioaddr = np->base;
1167 int intr_status = ioread16(ioaddr + IntrStatus);
1168 iowrite16(intr_status, ioaddr + IntrStatus);
1170 if (netif_msg_intr(np))
1171 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1172 dev->name, intr_status);
1174 if (!(intr_status & DEFAULT_INTR))
1179 if (intr_status & (IntrRxDMADone)) {
1180 iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1181 ioaddr + IntrEnable);
1183 np->budget = RX_BUDGET;
1184 tasklet_schedule(&np->rx_tasklet);
1186 if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1187 tx_status = ioread16 (ioaddr + TxStatus);
1188 for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1189 if (netif_msg_tx_done(np))
1191 ("%s: Transmit status is %2.2x.\n",
1192 dev->name, tx_status);
1193 if (tx_status & 0x1e) {
1194 np->stats.tx_errors++;
1195 if (tx_status & 0x10)
1196 np->stats.tx_fifo_errors++;
1197 if (tx_status & 0x08)
1198 np->stats.collisions++;
1199 if (tx_status & 0x02)
1200 np->stats.tx_window_errors++;
1201 /* This reset has not been verified!. */
1202 if (tx_status & 0x10) { /* Reset the Tx. */
1203 np->stats.tx_fifo_errors++;
1204 spin_lock(&np->lock);
1206 spin_unlock(&np->lock);
1208 if (tx_status & 0x1e) /* Restart the Tx. */
1209 iowrite16 (TxEnable,
1212 /* Yup, this is a documentation bug. It cost me *hours*. */
1213 iowrite16 (0, ioaddr + TxStatus);
1215 iowrite32(5000, ioaddr + DownCounter);
1218 tx_status = ioread16 (ioaddr + TxStatus);
1220 hw_frame_id = (tx_status >> 8) & 0xff;
1222 hw_frame_id = ioread8(ioaddr + TxFrameId);
1225 if (np->pci_rev_id >= 0x14) {
1226 spin_lock(&np->lock);
1227 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1228 int entry = np->dirty_tx % TX_RING_SIZE;
1229 struct sk_buff *skb;
1231 sw_frame_id = (le32_to_cpu(
1232 np->tx_ring[entry].status) >> 2) & 0xff;
1233 if (sw_frame_id == hw_frame_id &&
1234 !(le32_to_cpu(np->tx_ring[entry].status)
1237 if (sw_frame_id == (hw_frame_id + 1) %
1240 skb = np->tx_skbuff[entry];
1241 /* Free the original skb. */
1242 pci_unmap_single(np->pci_dev,
1243 np->tx_ring[entry].frag[0].addr,
1244 skb->len, PCI_DMA_TODEVICE);
1245 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1246 np->tx_skbuff[entry] = NULL;
1247 np->tx_ring[entry].frag[0].addr = 0;
1248 np->tx_ring[entry].frag[0].length = 0;
1250 spin_unlock(&np->lock);
1252 spin_lock(&np->lock);
1253 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1254 int entry = np->dirty_tx % TX_RING_SIZE;
1255 struct sk_buff *skb;
1256 if (!(le32_to_cpu(np->tx_ring[entry].status)
1259 skb = np->tx_skbuff[entry];
1260 /* Free the original skb. */
1261 pci_unmap_single(np->pci_dev,
1262 np->tx_ring[entry].frag[0].addr,
1263 skb->len, PCI_DMA_TODEVICE);
1264 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1265 np->tx_skbuff[entry] = NULL;
1266 np->tx_ring[entry].frag[0].addr = 0;
1267 np->tx_ring[entry].frag[0].length = 0;
1269 spin_unlock(&np->lock);
1272 if (netif_queue_stopped(dev) &&
1273 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1274 /* The ring is no longer full, clear busy flag. */
1275 netif_wake_queue (dev);
1277 /* Abnormal error summary/uncommon events handlers. */
1278 if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1279 netdev_error(dev, intr_status);
1281 if (netif_msg_intr(np))
1282 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1283 dev->name, ioread16(ioaddr + IntrStatus));
1284 return IRQ_RETVAL(handled);
1287 static void rx_poll(unsigned long data)
1289 struct net_device *dev = (struct net_device *)data;
1290 struct netdev_private *np = netdev_priv(dev);
1291 int entry = np->cur_rx % RX_RING_SIZE;
1292 int boguscnt = np->budget;
1293 void __iomem *ioaddr = np->base;
1296 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1298 struct netdev_desc *desc = &(np->rx_ring[entry]);
1299 u32 frame_status = le32_to_cpu(desc->status);
1302 if (--boguscnt < 0) {
1305 if (!(frame_status & DescOwn))
1307 pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
1308 if (netif_msg_rx_status(np))
1309 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
1311 if (frame_status & 0x001f4000) {
1312 /* There was a error. */
1313 if (netif_msg_rx_err(np))
1314 printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
1316 np->stats.rx_errors++;
1317 if (frame_status & 0x00100000) np->stats.rx_length_errors++;
1318 if (frame_status & 0x00010000) np->stats.rx_fifo_errors++;
1319 if (frame_status & 0x00060000) np->stats.rx_frame_errors++;
1320 if (frame_status & 0x00080000) np->stats.rx_crc_errors++;
1321 if (frame_status & 0x00100000) {
1322 printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1324 dev->name, frame_status);
1327 struct sk_buff *skb;
1328 #ifndef final_version
1329 if (netif_msg_rx_status(np))
1330 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1331 ", bogus_cnt %d.\n",
1334 /* Check if the packet is long enough to accept without copying
1335 to a minimally-sized skbuff. */
1336 if (pkt_len < rx_copybreak
1337 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1339 skb_reserve(skb, 2); /* 16 byte align the IP header */
1340 pci_dma_sync_single_for_cpu(np->pci_dev,
1343 PCI_DMA_FROMDEVICE);
1345 eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0);
1346 pci_dma_sync_single_for_device(np->pci_dev,
1349 PCI_DMA_FROMDEVICE);
1350 skb_put(skb, pkt_len);
1352 pci_unmap_single(np->pci_dev,
1355 PCI_DMA_FROMDEVICE);
1356 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1357 np->rx_skbuff[entry] = NULL;
1359 skb->protocol = eth_type_trans(skb, dev);
1360 /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1362 dev->last_rx = jiffies;
1364 entry = (entry + 1) % RX_RING_SIZE;
1369 np->budget -= received;
1370 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1378 np->budget -= received;
1379 if (np->budget <= 0)
1380 np->budget = RX_BUDGET;
1381 tasklet_schedule(&np->rx_tasklet);
1385 static void refill_rx (struct net_device *dev)
1387 struct netdev_private *np = netdev_priv(dev);
1391 /* Refill the Rx ring buffers. */
1392 for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1393 np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1394 struct sk_buff *skb;
1395 entry = np->dirty_rx % RX_RING_SIZE;
1396 if (np->rx_skbuff[entry] == NULL) {
1397 skb = dev_alloc_skb(np->rx_buf_sz);
1398 np->rx_skbuff[entry] = skb;
1400 break; /* Better luck next round. */
1401 skb->dev = dev; /* Mark as being used by this device. */
1402 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1403 np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1404 pci_map_single(np->pci_dev, skb->data,
1405 np->rx_buf_sz, PCI_DMA_FROMDEVICE));
1407 /* Perhaps we need not reset this field. */
1408 np->rx_ring[entry].frag[0].length =
1409 cpu_to_le32(np->rx_buf_sz | LastFrag);
1410 np->rx_ring[entry].status = 0;
1415 static void netdev_error(struct net_device *dev, int intr_status)
1417 struct netdev_private *np = netdev_priv(dev);
1418 void __iomem *ioaddr = np->base;
1419 u16 mii_ctl, mii_advertise, mii_lpa;
1422 if (intr_status & LinkChange) {
1423 if (np->an_enable) {
1424 mii_advertise = mdio_read (dev, np->phys[0], MII_ADVERTISE);
1425 mii_lpa= mdio_read (dev, np->phys[0], MII_LPA);
1426 mii_advertise &= mii_lpa;
1427 printk (KERN_INFO "%s: Link changed: ", dev->name);
1428 if (mii_advertise & ADVERTISE_100FULL) {
1430 printk ("100Mbps, full duplex\n");
1431 } else if (mii_advertise & ADVERTISE_100HALF) {
1433 printk ("100Mbps, half duplex\n");
1434 } else if (mii_advertise & ADVERTISE_10FULL) {
1436 printk ("10Mbps, full duplex\n");
1437 } else if (mii_advertise & ADVERTISE_10HALF) {
1439 printk ("10Mbps, half duplex\n");
1444 mii_ctl = mdio_read (dev, np->phys[0], MII_BMCR);
1445 speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1447 printk (KERN_INFO "%s: Link changed: %dMbps ,",
1449 printk ("%s duplex.\n", (mii_ctl & BMCR_FULLDPLX) ?
1453 if (np->flowctrl && np->mii_if.full_duplex) {
1454 iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1455 ioaddr + MulticastFilter1+2);
1456 iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1460 if (intr_status & StatsMax) {
1463 if (intr_status & IntrPCIErr) {
1464 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1465 dev->name, intr_status);
1466 /* We must do a global reset of DMA to continue. */
1470 static struct net_device_stats *get_stats(struct net_device *dev)
1472 struct netdev_private *np = netdev_priv(dev);
1473 void __iomem *ioaddr = np->base;
1476 /* We should lock this segment of code for SMP eventually, although
1477 the vulnerability window is very small and statistics are
1479 /* The chip only need report frame silently dropped. */
1480 np->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
1481 np->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1482 np->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1483 np->stats.collisions += ioread8(ioaddr + StatsLateColl);
1484 np->stats.collisions += ioread8(ioaddr + StatsMultiColl);
1485 np->stats.collisions += ioread8(ioaddr + StatsOneColl);
1486 np->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1487 ioread8(ioaddr + StatsTxDefer);
1488 for (i = StatsTxDefer; i <= StatsMcastRx; i++)
1489 ioread8(ioaddr + i);
1490 np->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1491 np->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1492 np->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1493 np->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1498 static void set_rx_mode(struct net_device *dev)
1500 struct netdev_private *np = netdev_priv(dev);
1501 void __iomem *ioaddr = np->base;
1502 u16 mc_filter[4]; /* Multicast hash filter */
1506 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1507 /* Unconditionally log net taps. */
1508 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1509 memset(mc_filter, 0xff, sizeof(mc_filter));
1510 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1511 } else if ((dev->mc_count > multicast_filter_limit)
1512 || (dev->flags & IFF_ALLMULTI)) {
1513 /* Too many to match, or accept all multicasts. */
1514 memset(mc_filter, 0xff, sizeof(mc_filter));
1515 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1516 } else if (dev->mc_count) {
1517 struct dev_mc_list *mclist;
1521 memset (mc_filter, 0, sizeof (mc_filter));
1522 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1523 i++, mclist = mclist->next) {
1524 crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
1525 for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1526 if (crc & 0x80000000) index |= 1 << bit;
1527 mc_filter[index/16] |= (1 << (index % 16));
1529 rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1531 iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1534 if (np->mii_if.full_duplex && np->flowctrl)
1535 mc_filter[3] |= 0x0200;
1537 for (i = 0; i < 4; i++)
1538 iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1539 iowrite8(rx_mode, ioaddr + RxMode);
1542 static int __set_mac_addr(struct net_device *dev)
1544 struct netdev_private *np = netdev_priv(dev);
1547 addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1548 iowrite16(addr16, np->base + StationAddr);
1549 addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1550 iowrite16(addr16, np->base + StationAddr+2);
1551 addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1552 iowrite16(addr16, np->base + StationAddr+4);
1556 static int check_if_running(struct net_device *dev)
1558 if (!netif_running(dev))
1563 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1565 struct netdev_private *np = netdev_priv(dev);
1566 strcpy(info->driver, DRV_NAME);
1567 strcpy(info->version, DRV_VERSION);
1568 strcpy(info->bus_info, pci_name(np->pci_dev));
1571 static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1573 struct netdev_private *np = netdev_priv(dev);
1574 spin_lock_irq(&np->lock);
1575 mii_ethtool_gset(&np->mii_if, ecmd);
1576 spin_unlock_irq(&np->lock);
1580 static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1582 struct netdev_private *np = netdev_priv(dev);
1584 spin_lock_irq(&np->lock);
1585 res = mii_ethtool_sset(&np->mii_if, ecmd);
1586 spin_unlock_irq(&np->lock);
1590 static int nway_reset(struct net_device *dev)
1592 struct netdev_private *np = netdev_priv(dev);
1593 return mii_nway_restart(&np->mii_if);
1596 static u32 get_link(struct net_device *dev)
1598 struct netdev_private *np = netdev_priv(dev);
1599 return mii_link_ok(&np->mii_if);
1602 static u32 get_msglevel(struct net_device *dev)
1604 struct netdev_private *np = netdev_priv(dev);
1605 return np->msg_enable;
1608 static void set_msglevel(struct net_device *dev, u32 val)
1610 struct netdev_private *np = netdev_priv(dev);
1611 np->msg_enable = val;
1614 static struct ethtool_ops ethtool_ops = {
1615 .begin = check_if_running,
1616 .get_drvinfo = get_drvinfo,
1617 .get_settings = get_settings,
1618 .set_settings = set_settings,
1619 .nway_reset = nway_reset,
1620 .get_link = get_link,
1621 .get_msglevel = get_msglevel,
1622 .set_msglevel = set_msglevel,
1623 .get_perm_addr = ethtool_op_get_perm_addr,
1626 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1628 struct netdev_private *np = netdev_priv(dev);
1629 void __iomem *ioaddr = np->base;
1633 if (!netif_running(dev))
1636 spin_lock_irq(&np->lock);
1637 rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1638 spin_unlock_irq(&np->lock);
1640 case SIOCDEVPRIVATE:
1641 for (i=0; i<TX_RING_SIZE; i++) {
1642 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
1643 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
1644 le32_to_cpu(np->tx_ring[i].next_desc),
1645 le32_to_cpu(np->tx_ring[i].status),
1646 (le32_to_cpu(np->tx_ring[i].status) >> 2)
1648 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1649 le32_to_cpu(np->tx_ring[i].frag[0].length));
1651 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
1652 ioread32(np->base + TxListPtr),
1653 netif_queue_stopped(dev));
1654 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
1655 np->cur_tx, np->cur_tx % TX_RING_SIZE,
1656 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
1657 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
1658 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
1659 printk(KERN_DEBUG "TxStatus=%04x\n", ioread16(ioaddr + TxStatus));
1667 static int netdev_close(struct net_device *dev)
1669 struct netdev_private *np = netdev_priv(dev);
1670 void __iomem *ioaddr = np->base;
1671 struct sk_buff *skb;
1674 netif_stop_queue(dev);
1676 if (netif_msg_ifdown(np)) {
1677 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1678 "Rx %4.4x Int %2.2x.\n",
1679 dev->name, ioread8(ioaddr + TxStatus),
1680 ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1681 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1682 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1685 /* Disable interrupts by clearing the interrupt mask. */
1686 iowrite16(0x0000, ioaddr + IntrEnable);
1688 /* Stop the chip's Tx and Rx processes. */
1689 iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1691 /* Wait and kill tasklet */
1692 tasklet_kill(&np->rx_tasklet);
1693 tasklet_kill(&np->tx_tasklet);
1696 if (netif_msg_hw(np)) {
1697 printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
1698 (int)(np->tx_ring_dma));
1699 for (i = 0; i < TX_RING_SIZE; i++)
1700 printk(" #%d desc. %4.4x %8.8x %8.8x.\n",
1701 i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1702 np->tx_ring[i].frag[0].length);
1703 printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
1704 (int)(np->rx_ring_dma));
1705 for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1706 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1707 i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1708 np->rx_ring[i].frag[0].length);
1711 #endif /* __i386__ debugging only */
1713 free_irq(dev->irq, dev);
1715 del_timer_sync(&np->timer);
1717 /* Free all the skbuffs in the Rx queue. */
1718 for (i = 0; i < RX_RING_SIZE; i++) {
1719 np->rx_ring[i].status = 0;
1720 np->rx_ring[i].frag[0].addr = 0xBADF00D0; /* An invalid address. */
1721 skb = np->rx_skbuff[i];
1723 pci_unmap_single(np->pci_dev,
1724 np->rx_ring[i].frag[0].addr, np->rx_buf_sz,
1725 PCI_DMA_FROMDEVICE);
1727 np->rx_skbuff[i] = NULL;
1730 for (i = 0; i < TX_RING_SIZE; i++) {
1731 skb = np->tx_skbuff[i];
1733 pci_unmap_single(np->pci_dev,
1734 np->tx_ring[i].frag[0].addr, skb->len,
1737 np->tx_skbuff[i] = NULL;
1744 static void __devexit sundance_remove1 (struct pci_dev *pdev)
1746 struct net_device *dev = pci_get_drvdata(pdev);
1749 struct netdev_private *np = netdev_priv(dev);
1751 unregister_netdev(dev);
1752 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring,
1754 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
1756 pci_iounmap(pdev, np->base);
1757 pci_release_regions(pdev);
1759 pci_set_drvdata(pdev, NULL);
1763 static struct pci_driver sundance_driver = {
1765 .id_table = sundance_pci_tbl,
1766 .probe = sundance_probe1,
1767 .remove = __devexit_p(sundance_remove1),
1770 static int __init sundance_init(void)
1772 /* when a module, this is printed whether or not devices are found in probe */
1776 return pci_module_init(&sundance_driver);
1779 static void __exit sundance_exit(void)
1781 pci_unregister_driver(&sundance_driver);
1784 module_init(sundance_init);
1785 module_exit(sundance_exit);