1 /* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
3 Written 1999-2000 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
17 Support and updates available at
18 http://www.scyld.com/network/sundance.html
19 [link no longer provides useful info -jgarzik]
20 Archives of the mailing list are still available at
21 http://www.beowulf.org/pipermail/netdrivers/
25 #define DRV_NAME "sundance"
26 #define DRV_VERSION "1.2"
27 #define DRV_RELDATE "11-Sep-2006"
30 /* The user-configurable values.
31 These may be modified when a driver module is loaded.*/
32 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
33 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
34 Typical is a 64 element hash table based on the Ethernet CRC. */
35 static const int multicast_filter_limit = 32;
37 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
38 Setting to > 1518 effectively disables this feature.
39 This chip can receive into offset buffers, so the Alpha does not
41 static int rx_copybreak;
42 static int flowctrl=1;
44 /* media[] specifies the media type the NIC operates at.
45 autosense Autosensing active media.
46 10mbps_hd 10Mbps half duplex.
47 10mbps_fd 10Mbps full duplex.
48 100mbps_hd 100Mbps half duplex.
49 100mbps_fd 100Mbps full duplex.
50 0 Autosensing active media.
53 3 100Mbps half duplex.
54 4 100Mbps full duplex.
57 static char *media[MAX_UNITS];
60 /* Operational parameters that are set at compile time. */
62 /* Keep the ring sizes a power of two for compile efficiency.
63 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
64 Making the Tx ring too large decreases the effectiveness of channel
65 bonding and packet priority, and more than 128 requires modifying the
67 Large receive rings merely waste memory. */
68 #define TX_RING_SIZE 32
69 #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
70 #define RX_RING_SIZE 64
72 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
73 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
75 /* Operational parameters that usually are not changed. */
76 /* Time in jiffies before concluding the transmitter is hung. */
77 #define TX_TIMEOUT (4*HZ)
78 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
80 /* Include files, designed to support most kernel versions 2.0.0 and later. */
81 #include <linux/module.h>
82 #include <linux/kernel.h>
83 #include <linux/string.h>
84 #include <linux/timer.h>
85 #include <linux/errno.h>
86 #include <linux/ioport.h>
87 #include <linux/slab.h>
88 #include <linux/interrupt.h>
89 #include <linux/pci.h>
90 #include <linux/netdevice.h>
91 #include <linux/etherdevice.h>
92 #include <linux/skbuff.h>
93 #include <linux/init.h>
94 #include <linux/bitops.h>
95 #include <asm/uaccess.h>
96 #include <asm/processor.h> /* Processor type for cache alignment. */
98 #include <linux/delay.h>
99 #include <linux/spinlock.h>
100 #ifndef _COMPAT_WITH_OLD_KERNEL
101 #include <linux/crc32.h>
102 #include <linux/ethtool.h>
103 #include <linux/mii.h>
111 /* These identify the driver base version and may not be removed. */
112 static char version[] =
113 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n";
115 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
116 MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
117 MODULE_LICENSE("GPL");
119 module_param(debug, int, 0);
120 module_param(rx_copybreak, int, 0);
121 module_param_array(media, charp, NULL, 0);
122 module_param(flowctrl, int, 0);
123 MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
124 MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
125 MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
130 I. Board Compatibility
132 This driver is designed for the Sundance Technologies "Alta" ST201 chip.
134 II. Board-specific settings
136 III. Driver operation
140 This driver uses two statically allocated fixed-size descriptor lists
141 formed into rings by a branch from the final descriptor to the beginning of
142 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
143 Some chips explicitly use only 2^N sized rings, while others use a
144 'next descriptor' pointer that the driver forms into rings.
146 IIIb/c. Transmit/Receive Structure
148 This driver uses a zero-copy receive and transmit scheme.
149 The driver allocates full frame size skbuffs for the Rx ring buffers at
150 open() time and passes the skb->data field to the chip as receive data
151 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
152 a fresh skbuff is allocated and the frame is copied to the new skbuff.
153 When the incoming frame is larger, the skbuff is passed directly up the
154 protocol stack. Buffers consumed this way are replaced by newly allocated
155 skbuffs in a later phase of receives.
157 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
158 using a full-sized skbuff for small frames vs. the copying costs of larger
159 frames. New boards are typically used in generously configured machines
160 and the underfilled buffers have negligible impact compared to the benefit of
161 a single allocation size, so the default value of zero results in never
162 copying packets. When copying is done, the cost is usually mitigated by using
163 a combined copy/checksum routine. Copying also preloads the cache, which is
164 most useful with small frames.
166 A subtle aspect of the operation is that the IP header at offset 14 in an
167 ethernet frame isn't longword aligned for further processing.
168 Unaligned buffers are permitted by the Sundance hardware, so
169 frames are received into the skbuff at an offset of "+2", 16-byte aligning
172 IIId. Synchronization
174 The driver runs as two independent, single-threaded flows of control. One
175 is the send-packet routine, which enforces single-threaded use by the
176 dev->tbusy flag. The other thread is the interrupt handler, which is single
177 threaded by the hardware and interrupt handling software.
179 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
180 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
181 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
182 the 'lp->tx_full' flag.
184 The interrupt handler has exclusive control over the Rx ring and records stats
185 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
186 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
187 clears both the tx_full and tbusy flags.
193 The Sundance ST201 datasheet, preliminary version.
194 The Kendin KS8723 datasheet, preliminary version.
195 The ICplus IP100 datasheet, preliminary version.
196 http://www.scyld.com/expert/100mbps.html
197 http://www.scyld.com/expert/NWay.html
203 /* Work-around for Kendin chip bugs. */
204 #ifndef CONFIG_SUNDANCE_MMIO
208 static const struct pci_device_id sundance_pci_tbl[] = {
209 { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
210 { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
211 { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
212 { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
213 { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
214 { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
215 { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
218 MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
227 static const struct pci_id_info pci_id_tbl[] __devinitdata = {
228 {"D-Link DFE-550TX FAST Ethernet Adapter"},
229 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
230 {"D-Link DFE-580TX 4 port Server Adapter"},
231 {"D-Link DFE-530TXS FAST Ethernet Adapter"},
232 {"D-Link DL10050-based FAST Ethernet Adapter"},
233 {"Sundance Technology Alta"},
234 {"IC Plus Corporation IP100A FAST Ethernet Adapter"},
235 { } /* terminate list. */
238 /* This driver was written to use PCI memory space, however x86-oriented
239 hardware often uses I/O space accesses. */
241 /* Offsets to the device registers.
242 Unlike software-only systems, device drivers interact with complex hardware.
243 It's not useful to define symbolic names for every register bit in the
244 device. The name can only partially document the semantics and make
245 the driver longer and more difficult to read.
246 In general, only the important configuration values or bits changed
247 multiple times should be defined symbolically.
252 TxDMABurstThresh = 0x08,
253 TxDMAUrgentThresh = 0x09,
254 TxDMAPollPeriod = 0x0a,
259 RxDMABurstThresh = 0x14,
260 RxDMAUrgentThresh = 0x15,
261 RxDMAPollPeriod = 0x16,
280 MulticastFilter0 = 0x60,
281 MulticastFilter1 = 0x64,
288 StatsCarrierError = 0x74,
289 StatsLateColl = 0x75,
290 StatsMultiColl = 0x76,
294 StatsTxXSDefer = 0x7a,
300 /* Aliased and bogus values! */
303 enum ASICCtrl_HiWord_bit {
304 GlobalReset = 0x0001,
309 NetworkReset = 0x0020,
314 /* Bits in the interrupt status/mask registers. */
315 enum intr_status_bits {
316 IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
317 IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
319 StatsMax=0x0080, LinkChange=0x0100,
320 IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
323 /* Bits in the RxMode register. */
325 AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
326 AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
328 /* Bits in MACCtrl. */
329 enum mac_ctrl0_bits {
330 EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
331 EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
333 enum mac_ctrl1_bits {
334 StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
335 TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
336 RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
339 /* The Rx and Tx buffer descriptors. */
340 /* Note that using only 32 bit fields simplifies conversion to big-endian
345 struct desc_frag { __le32 addr, length; } frag[1];
348 /* Bits in netdev_desc.status */
349 enum desc_status_bits {
351 DescEndPacket=0x4000,
355 DescIntrOnDMADone=0x80000000,
356 DisableAlign = 0x00000001,
359 #define PRIV_ALIGN 15 /* Required alignment mask */
360 /* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
361 within the structure. */
363 struct netdev_private {
364 /* Descriptor rings first for alignment. */
365 struct netdev_desc *rx_ring;
366 struct netdev_desc *tx_ring;
367 struct sk_buff* rx_skbuff[RX_RING_SIZE];
368 struct sk_buff* tx_skbuff[TX_RING_SIZE];
369 dma_addr_t tx_ring_dma;
370 dma_addr_t rx_ring_dma;
371 struct net_device_stats stats;
372 struct timer_list timer; /* Media monitoring timer. */
373 /* Frequently used values: keep some adjacent for cache effect. */
375 spinlock_t rx_lock; /* Group with Tx control cache line. */
378 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
379 unsigned int rx_buf_sz; /* Based on MTU+slack. */
380 struct netdev_desc *last_tx; /* Last Tx descriptor used. */
381 unsigned int cur_tx, dirty_tx;
382 /* These values are keep track of the transceiver/media in use. */
383 unsigned int flowctrl:1;
384 unsigned int default_port:4; /* Last dev->if_port value. */
385 unsigned int an_enable:1;
387 struct tasklet_struct rx_tasklet;
388 struct tasklet_struct tx_tasklet;
391 /* Multicast and receive mode. */
392 spinlock_t mcastlock; /* SMP lock multicast updates. */
394 /* MII transceiver section. */
395 struct mii_if_info mii_if;
396 int mii_preamble_required;
397 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
398 struct pci_dev *pci_dev;
402 /* The station address location in the EEPROM. */
403 #define EEPROM_SA_OFFSET 0x10
404 #define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
405 IntrDrvRqst | IntrTxDone | StatsMax | \
408 static int change_mtu(struct net_device *dev, int new_mtu);
409 static int eeprom_read(void __iomem *ioaddr, int location);
410 static int mdio_read(struct net_device *dev, int phy_id, int location);
411 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
412 static int mdio_wait_link(struct net_device *dev, int wait);
413 static int netdev_open(struct net_device *dev);
414 static void check_duplex(struct net_device *dev);
415 static void netdev_timer(unsigned long data);
416 static void tx_timeout(struct net_device *dev);
417 static void init_ring(struct net_device *dev);
418 static int start_tx(struct sk_buff *skb, struct net_device *dev);
419 static int reset_tx (struct net_device *dev);
420 static irqreturn_t intr_handler(int irq, void *dev_instance);
421 static void rx_poll(unsigned long data);
422 static void tx_poll(unsigned long data);
423 static void refill_rx (struct net_device *dev);
424 static void netdev_error(struct net_device *dev, int intr_status);
425 static void netdev_error(struct net_device *dev, int intr_status);
426 static void set_rx_mode(struct net_device *dev);
427 static int __set_mac_addr(struct net_device *dev);
428 static struct net_device_stats *get_stats(struct net_device *dev);
429 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
430 static int netdev_close(struct net_device *dev);
431 static const struct ethtool_ops ethtool_ops;
433 static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
435 struct netdev_private *np = netdev_priv(dev);
436 void __iomem *ioaddr = np->base + ASICCtrl;
439 /* ST201 documentation states ASICCtrl is a 32bit register */
440 iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
441 /* ST201 documentation states reset can take up to 1 ms */
443 while (ioread32 (ioaddr) & (ResetBusy << 16)) {
444 if (--countdown == 0) {
445 printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
452 static const struct net_device_ops netdev_ops = {
453 .ndo_open = netdev_open,
454 .ndo_stop = netdev_close,
455 .ndo_start_xmit = start_tx,
456 .ndo_get_stats = get_stats,
457 .ndo_set_multicast_list = set_rx_mode,
458 .ndo_do_ioctl = netdev_ioctl,
459 .ndo_tx_timeout = tx_timeout,
460 .ndo_change_mtu = change_mtu,
461 .ndo_set_mac_address = eth_mac_addr,
462 .ndo_validate_addr = eth_validate_addr,
465 static int __devinit sundance_probe1 (struct pci_dev *pdev,
466 const struct pci_device_id *ent)
468 struct net_device *dev;
469 struct netdev_private *np;
471 int chip_idx = ent->driver_data;
474 void __iomem *ioaddr;
483 int phy, phy_end, phy_idx = 0;
485 /* when built into the kernel, we only print version if device is found */
487 static int printed_version;
488 if (!printed_version++)
492 if (pci_enable_device(pdev))
494 pci_set_master(pdev);
498 dev = alloc_etherdev(sizeof(*np));
501 SET_NETDEV_DEV(dev, &pdev->dev);
503 if (pci_request_regions(pdev, DRV_NAME))
506 ioaddr = pci_iomap(pdev, bar, netdev_io_size);
510 for (i = 0; i < 3; i++)
511 ((__le16 *)dev->dev_addr)[i] =
512 cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
513 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
515 dev->base_addr = (unsigned long)ioaddr;
518 np = netdev_priv(dev);
521 np->chip_id = chip_idx;
522 np->msg_enable = (1 << debug) - 1;
523 spin_lock_init(&np->lock);
524 tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
525 tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
527 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
529 goto err_out_cleardev;
530 np->tx_ring = (struct netdev_desc *)ring_space;
531 np->tx_ring_dma = ring_dma;
533 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
535 goto err_out_unmap_tx;
536 np->rx_ring = (struct netdev_desc *)ring_space;
537 np->rx_ring_dma = ring_dma;
539 np->mii_if.dev = dev;
540 np->mii_if.mdio_read = mdio_read;
541 np->mii_if.mdio_write = mdio_write;
542 np->mii_if.phy_id_mask = 0x1f;
543 np->mii_if.reg_num_mask = 0x1f;
545 /* The chip-specific entries in the device structure. */
546 dev->netdev_ops = &netdev_ops;
547 SET_ETHTOOL_OPS(dev, ðtool_ops);
548 dev->watchdog_timeo = TX_TIMEOUT;
550 pci_set_drvdata(pdev, dev);
552 i = register_netdev(dev);
554 goto err_out_unmap_rx;
556 printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
557 dev->name, pci_id_tbl[chip_idx].name, ioaddr,
560 np->phys[0] = 1; /* Default setting */
561 np->mii_preamble_required++;
564 * It seems some phys doesn't deal well with address 0 being accessed
567 if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
572 phy_end = 32; /* wraps to zero, due to 'phy & 0x1f' */
574 for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
575 int phyx = phy & 0x1f;
576 int mii_status = mdio_read(dev, phyx, MII_BMSR);
577 if (mii_status != 0xffff && mii_status != 0x0000) {
578 np->phys[phy_idx++] = phyx;
579 np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
580 if ((mii_status & 0x0040) == 0)
581 np->mii_preamble_required++;
582 printk(KERN_INFO "%s: MII PHY found at address %d, status "
583 "0x%4.4x advertising %4.4x.\n",
584 dev->name, phyx, mii_status, np->mii_if.advertising);
587 np->mii_preamble_required--;
590 printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
591 dev->name, ioread32(ioaddr + ASICCtrl));
592 goto err_out_unregister;
595 np->mii_if.phy_id = np->phys[0];
597 /* Parse override configuration */
599 if (card_idx < MAX_UNITS) {
600 if (media[card_idx] != NULL) {
602 if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
603 strcmp (media[card_idx], "4") == 0) {
605 np->mii_if.full_duplex = 1;
606 } else if (strcmp (media[card_idx], "100mbps_hd") == 0
607 || strcmp (media[card_idx], "3") == 0) {
609 np->mii_if.full_duplex = 0;
610 } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
611 strcmp (media[card_idx], "2") == 0) {
613 np->mii_if.full_duplex = 1;
614 } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
615 strcmp (media[card_idx], "1") == 0) {
617 np->mii_if.full_duplex = 0;
627 if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
628 /* Default 100Mbps Full */
631 np->mii_if.full_duplex = 1;
636 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
638 /* If flow control enabled, we need to advertise it.*/
640 mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
641 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
642 /* Force media type */
643 if (!np->an_enable) {
645 mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
646 mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
647 mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
648 printk (KERN_INFO "Override speed=%d, %s duplex\n",
649 np->speed, np->mii_if.full_duplex ? "Full" : "Half");
653 /* Perhaps move the reset here? */
654 /* Reset the chip to erase previous misconfiguration. */
655 if (netif_msg_hw(np))
656 printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
657 sundance_reset(dev, 0x00ff << 16);
658 if (netif_msg_hw(np))
659 printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
665 unregister_netdev(dev);
667 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
669 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
671 pci_set_drvdata(pdev, NULL);
672 pci_iounmap(pdev, ioaddr);
674 pci_release_regions(pdev);
680 static int change_mtu(struct net_device *dev, int new_mtu)
682 if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
684 if (netif_running(dev))
690 #define eeprom_delay(ee_addr) ioread32(ee_addr)
691 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
692 static int __devinit eeprom_read(void __iomem *ioaddr, int location)
694 int boguscnt = 10000; /* Typical 1900 ticks. */
695 iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
697 eeprom_delay(ioaddr + EECtrl);
698 if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
699 return ioread16(ioaddr + EEData);
701 } while (--boguscnt > 0);
705 /* MII transceiver control section.
706 Read and write the MII registers using software-generated serial
707 MDIO protocol. See the MII specifications or DP83840A data sheet
710 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
711 met by back-to-back 33Mhz PCI cycles. */
712 #define mdio_delay() ioread8(mdio_addr)
715 MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
717 #define MDIO_EnbIn (0)
718 #define MDIO_WRITE0 (MDIO_EnbOutput)
719 #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
721 /* Generate the preamble required for initial synchronization and
722 a few older transceivers. */
723 static void mdio_sync(void __iomem *mdio_addr)
727 /* Establish sync by sending at least 32 logic ones. */
728 while (--bits >= 0) {
729 iowrite8(MDIO_WRITE1, mdio_addr);
731 iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
736 static int mdio_read(struct net_device *dev, int phy_id, int location)
738 struct netdev_private *np = netdev_priv(dev);
739 void __iomem *mdio_addr = np->base + MIICtrl;
740 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
743 if (np->mii_preamble_required)
744 mdio_sync(mdio_addr);
746 /* Shift the read command bits out. */
747 for (i = 15; i >= 0; i--) {
748 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
750 iowrite8(dataval, mdio_addr);
752 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
755 /* Read the two transition, 16 data, and wire-idle bits. */
756 for (i = 19; i > 0; i--) {
757 iowrite8(MDIO_EnbIn, mdio_addr);
759 retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
760 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
763 return (retval>>1) & 0xffff;
766 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
768 struct netdev_private *np = netdev_priv(dev);
769 void __iomem *mdio_addr = np->base + MIICtrl;
770 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
773 if (np->mii_preamble_required)
774 mdio_sync(mdio_addr);
776 /* Shift the command bits out. */
777 for (i = 31; i >= 0; i--) {
778 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
780 iowrite8(dataval, mdio_addr);
782 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
785 /* Clear out extra bits. */
786 for (i = 2; i > 0; i--) {
787 iowrite8(MDIO_EnbIn, mdio_addr);
789 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
795 static int mdio_wait_link(struct net_device *dev, int wait)
799 struct netdev_private *np;
801 np = netdev_priv(dev);
802 phy_id = np->phys[0];
805 bmsr = mdio_read(dev, phy_id, MII_BMSR);
809 } while (--wait > 0);
813 static int netdev_open(struct net_device *dev)
815 struct netdev_private *np = netdev_priv(dev);
816 void __iomem *ioaddr = np->base;
820 /* Do we need to reset the chip??? */
822 i = request_irq(dev->irq, &intr_handler, IRQF_SHARED, dev->name, dev);
826 if (netif_msg_ifup(np))
827 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
828 dev->name, dev->irq);
831 iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
832 /* The Tx list pointer is written as packets are queued. */
834 /* Initialize other registers. */
836 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
837 iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
839 iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
842 iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
844 /* Configure the PCI bus bursts and FIFO thresholds. */
846 if (dev->if_port == 0)
847 dev->if_port = np->default_port;
849 spin_lock_init(&np->mcastlock);
852 iowrite16(0, ioaddr + IntrEnable);
853 iowrite16(0, ioaddr + DownCounter);
854 /* Set the chip to poll every N*320nsec. */
855 iowrite8(100, ioaddr + RxDMAPollPeriod);
856 iowrite8(127, ioaddr + TxDMAPollPeriod);
857 /* Fix DFE-580TX packet drop issue */
858 if (np->pci_dev->revision >= 0x14)
859 iowrite8(0x01, ioaddr + DebugCtrl1);
860 netif_start_queue(dev);
862 spin_lock_irqsave(&np->lock, flags);
864 spin_unlock_irqrestore(&np->lock, flags);
866 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
868 if (netif_msg_ifup(np))
869 printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
870 "MAC Control %x, %4.4x %4.4x.\n",
871 dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
872 ioread32(ioaddr + MACCtrl0),
873 ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
875 /* Set the timer to check for link beat. */
876 init_timer(&np->timer);
877 np->timer.expires = jiffies + 3*HZ;
878 np->timer.data = (unsigned long)dev;
879 np->timer.function = &netdev_timer; /* timer handler */
880 add_timer(&np->timer);
882 /* Enable interrupts by setting the interrupt mask. */
883 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
888 static void check_duplex(struct net_device *dev)
890 struct netdev_private *np = netdev_priv(dev);
891 void __iomem *ioaddr = np->base;
892 int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
893 int negotiated = mii_lpa & np->mii_if.advertising;
897 if (!np->an_enable || mii_lpa == 0xffff) {
898 if (np->mii_if.full_duplex)
899 iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
904 /* Autonegotiation */
905 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
906 if (np->mii_if.full_duplex != duplex) {
907 np->mii_if.full_duplex = duplex;
908 if (netif_msg_link(np))
909 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
910 "negotiated capability %4.4x.\n", dev->name,
911 duplex ? "full" : "half", np->phys[0], negotiated);
912 iowrite16(ioread16(ioaddr + MACCtrl0) | duplex ? 0x20 : 0, ioaddr + MACCtrl0);
916 static void netdev_timer(unsigned long data)
918 struct net_device *dev = (struct net_device *)data;
919 struct netdev_private *np = netdev_priv(dev);
920 void __iomem *ioaddr = np->base;
921 int next_tick = 10*HZ;
923 if (netif_msg_timer(np)) {
924 printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
926 dev->name, ioread16(ioaddr + IntrEnable),
927 ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
930 np->timer.expires = jiffies + next_tick;
931 add_timer(&np->timer);
934 static void tx_timeout(struct net_device *dev)
936 struct netdev_private *np = netdev_priv(dev);
937 void __iomem *ioaddr = np->base;
940 netif_stop_queue(dev);
941 tasklet_disable(&np->tx_tasklet);
942 iowrite16(0, ioaddr + IntrEnable);
943 printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
945 " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
946 ioread8(ioaddr + TxFrameId));
950 for (i=0; i<TX_RING_SIZE; i++) {
951 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
952 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
953 le32_to_cpu(np->tx_ring[i].next_desc),
954 le32_to_cpu(np->tx_ring[i].status),
955 (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
956 le32_to_cpu(np->tx_ring[i].frag[0].addr),
957 le32_to_cpu(np->tx_ring[i].frag[0].length));
959 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
960 ioread32(np->base + TxListPtr),
961 netif_queue_stopped(dev));
962 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
963 np->cur_tx, np->cur_tx % TX_RING_SIZE,
964 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
965 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
966 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
968 spin_lock_irqsave(&np->lock, flag);
970 /* Stop and restart the chip's Tx processes . */
972 spin_unlock_irqrestore(&np->lock, flag);
976 dev->trans_start = jiffies;
977 np->stats.tx_errors++;
978 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
979 netif_wake_queue(dev);
981 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
982 tasklet_enable(&np->tx_tasklet);
986 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
987 static void init_ring(struct net_device *dev)
989 struct netdev_private *np = netdev_priv(dev);
992 np->cur_rx = np->cur_tx = 0;
993 np->dirty_rx = np->dirty_tx = 0;
996 np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
998 /* Initialize all Rx descriptors. */
999 for (i = 0; i < RX_RING_SIZE; i++) {
1000 np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1001 ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1002 np->rx_ring[i].status = 0;
1003 np->rx_ring[i].frag[0].length = 0;
1004 np->rx_skbuff[i] = NULL;
1007 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1008 for (i = 0; i < RX_RING_SIZE; i++) {
1009 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
1010 np->rx_skbuff[i] = skb;
1013 skb->dev = dev; /* Mark as being used by this device. */
1014 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1015 np->rx_ring[i].frag[0].addr = cpu_to_le32(
1016 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz,
1017 PCI_DMA_FROMDEVICE));
1018 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1020 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1022 for (i = 0; i < TX_RING_SIZE; i++) {
1023 np->tx_skbuff[i] = NULL;
1024 np->tx_ring[i].status = 0;
1029 static void tx_poll (unsigned long data)
1031 struct net_device *dev = (struct net_device *)data;
1032 struct netdev_private *np = netdev_priv(dev);
1033 unsigned head = np->cur_task % TX_RING_SIZE;
1034 struct netdev_desc *txdesc =
1035 &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1037 /* Chain the next pointer */
1038 for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1039 int entry = np->cur_task % TX_RING_SIZE;
1040 txdesc = &np->tx_ring[entry];
1042 np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1043 entry*sizeof(struct netdev_desc));
1045 np->last_tx = txdesc;
1047 /* Indicate the latest descriptor of tx ring */
1048 txdesc->status |= cpu_to_le32(DescIntrOnTx);
1050 if (ioread32 (np->base + TxListPtr) == 0)
1051 iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1052 np->base + TxListPtr);
1057 start_tx (struct sk_buff *skb, struct net_device *dev)
1059 struct netdev_private *np = netdev_priv(dev);
1060 struct netdev_desc *txdesc;
1063 /* Calculate the next Tx descriptor entry. */
1064 entry = np->cur_tx % TX_RING_SIZE;
1065 np->tx_skbuff[entry] = skb;
1066 txdesc = &np->tx_ring[entry];
1068 txdesc->next_desc = 0;
1069 txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1070 txdesc->frag[0].addr = cpu_to_le32 (pci_map_single (np->pci_dev, skb->data,
1073 txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1075 /* Increment cur_tx before tasklet_schedule() */
1078 /* Schedule a tx_poll() task */
1079 tasklet_schedule(&np->tx_tasklet);
1081 /* On some architectures: explicitly flush cache lines here. */
1082 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1
1083 && !netif_queue_stopped(dev)) {
1086 netif_stop_queue (dev);
1088 dev->trans_start = jiffies;
1089 if (netif_msg_tx_queued(np)) {
1091 "%s: Transmit frame #%d queued in slot %d.\n",
1092 dev->name, np->cur_tx, entry);
1097 /* Reset hardware tx and free all of tx buffers */
1099 reset_tx (struct net_device *dev)
1101 struct netdev_private *np = netdev_priv(dev);
1102 void __iomem *ioaddr = np->base;
1103 struct sk_buff *skb;
1105 int irq = in_interrupt();
1107 /* Reset tx logic, TxListPtr will be cleaned */
1108 iowrite16 (TxDisable, ioaddr + MACCtrl1);
1109 sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1111 /* free all tx skbuff */
1112 for (i = 0; i < TX_RING_SIZE; i++) {
1113 np->tx_ring[i].next_desc = 0;
1115 skb = np->tx_skbuff[i];
1117 pci_unmap_single(np->pci_dev,
1118 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1119 skb->len, PCI_DMA_TODEVICE);
1121 dev_kfree_skb_irq (skb);
1123 dev_kfree_skb (skb);
1124 np->tx_skbuff[i] = NULL;
1125 np->stats.tx_dropped++;
1128 np->cur_tx = np->dirty_tx = 0;
1132 iowrite8(127, ioaddr + TxDMAPollPeriod);
1134 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1138 /* The interrupt handler cleans up after the Tx thread,
1139 and schedule a Rx thread work */
1140 static irqreturn_t intr_handler(int irq, void *dev_instance)
1142 struct net_device *dev = (struct net_device *)dev_instance;
1143 struct netdev_private *np = netdev_priv(dev);
1144 void __iomem *ioaddr = np->base;
1153 int intr_status = ioread16(ioaddr + IntrStatus);
1154 iowrite16(intr_status, ioaddr + IntrStatus);
1156 if (netif_msg_intr(np))
1157 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1158 dev->name, intr_status);
1160 if (!(intr_status & DEFAULT_INTR))
1165 if (intr_status & (IntrRxDMADone)) {
1166 iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1167 ioaddr + IntrEnable);
1169 np->budget = RX_BUDGET;
1170 tasklet_schedule(&np->rx_tasklet);
1172 if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1173 tx_status = ioread16 (ioaddr + TxStatus);
1174 for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1175 if (netif_msg_tx_done(np))
1177 ("%s: Transmit status is %2.2x.\n",
1178 dev->name, tx_status);
1179 if (tx_status & 0x1e) {
1180 if (netif_msg_tx_err(np))
1181 printk("%s: Transmit error status %4.4x.\n",
1182 dev->name, tx_status);
1183 np->stats.tx_errors++;
1184 if (tx_status & 0x10)
1185 np->stats.tx_fifo_errors++;
1186 if (tx_status & 0x08)
1187 np->stats.collisions++;
1188 if (tx_status & 0x04)
1189 np->stats.tx_fifo_errors++;
1190 if (tx_status & 0x02)
1191 np->stats.tx_window_errors++;
1194 ** This reset has been verified on
1195 ** DFE-580TX boards ! phdm@macqel.be.
1197 if (tx_status & 0x10) { /* TxUnderrun */
1198 /* Restart Tx FIFO and transmitter */
1199 sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1200 /* No need to reset the Tx pointer here */
1202 /* Restart the Tx. Need to make sure tx enabled */
1205 iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1206 if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1211 /* Yup, this is a documentation bug. It cost me *hours*. */
1212 iowrite16 (0, ioaddr + TxStatus);
1214 iowrite32(5000, ioaddr + DownCounter);
1217 tx_status = ioread16 (ioaddr + TxStatus);
1219 hw_frame_id = (tx_status >> 8) & 0xff;
1221 hw_frame_id = ioread8(ioaddr + TxFrameId);
1224 if (np->pci_dev->revision >= 0x14) {
1225 spin_lock(&np->lock);
1226 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1227 int entry = np->dirty_tx % TX_RING_SIZE;
1228 struct sk_buff *skb;
1230 sw_frame_id = (le32_to_cpu(
1231 np->tx_ring[entry].status) >> 2) & 0xff;
1232 if (sw_frame_id == hw_frame_id &&
1233 !(le32_to_cpu(np->tx_ring[entry].status)
1236 if (sw_frame_id == (hw_frame_id + 1) %
1239 skb = np->tx_skbuff[entry];
1240 /* Free the original skb. */
1241 pci_unmap_single(np->pci_dev,
1242 le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1243 skb->len, PCI_DMA_TODEVICE);
1244 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1245 np->tx_skbuff[entry] = NULL;
1246 np->tx_ring[entry].frag[0].addr = 0;
1247 np->tx_ring[entry].frag[0].length = 0;
1249 spin_unlock(&np->lock);
1251 spin_lock(&np->lock);
1252 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1253 int entry = np->dirty_tx % TX_RING_SIZE;
1254 struct sk_buff *skb;
1255 if (!(le32_to_cpu(np->tx_ring[entry].status)
1258 skb = np->tx_skbuff[entry];
1259 /* Free the original skb. */
1260 pci_unmap_single(np->pci_dev,
1261 le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1262 skb->len, PCI_DMA_TODEVICE);
1263 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1264 np->tx_skbuff[entry] = NULL;
1265 np->tx_ring[entry].frag[0].addr = 0;
1266 np->tx_ring[entry].frag[0].length = 0;
1268 spin_unlock(&np->lock);
1271 if (netif_queue_stopped(dev) &&
1272 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1273 /* The ring is no longer full, clear busy flag. */
1274 netif_wake_queue (dev);
1276 /* Abnormal error summary/uncommon events handlers. */
1277 if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1278 netdev_error(dev, intr_status);
1280 if (netif_msg_intr(np))
1281 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1282 dev->name, ioread16(ioaddr + IntrStatus));
1283 return IRQ_RETVAL(handled);
1286 static void rx_poll(unsigned long data)
1288 struct net_device *dev = (struct net_device *)data;
1289 struct netdev_private *np = netdev_priv(dev);
1290 int entry = np->cur_rx % RX_RING_SIZE;
1291 int boguscnt = np->budget;
1292 void __iomem *ioaddr = np->base;
1295 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1297 struct netdev_desc *desc = &(np->rx_ring[entry]);
1298 u32 frame_status = le32_to_cpu(desc->status);
1301 if (--boguscnt < 0) {
1304 if (!(frame_status & DescOwn))
1306 pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
1307 if (netif_msg_rx_status(np))
1308 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
1310 if (frame_status & 0x001f4000) {
1311 /* There was a error. */
1312 if (netif_msg_rx_err(np))
1313 printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
1315 np->stats.rx_errors++;
1316 if (frame_status & 0x00100000) np->stats.rx_length_errors++;
1317 if (frame_status & 0x00010000) np->stats.rx_fifo_errors++;
1318 if (frame_status & 0x00060000) np->stats.rx_frame_errors++;
1319 if (frame_status & 0x00080000) np->stats.rx_crc_errors++;
1320 if (frame_status & 0x00100000) {
1321 printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1323 dev->name, frame_status);
1326 struct sk_buff *skb;
1327 #ifndef final_version
1328 if (netif_msg_rx_status(np))
1329 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1330 ", bogus_cnt %d.\n",
1333 /* Check if the packet is long enough to accept without copying
1334 to a minimally-sized skbuff. */
1335 if (pkt_len < rx_copybreak
1336 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1337 skb_reserve(skb, 2); /* 16 byte align the IP header */
1338 pci_dma_sync_single_for_cpu(np->pci_dev,
1339 le32_to_cpu(desc->frag[0].addr),
1341 PCI_DMA_FROMDEVICE);
1343 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1344 pci_dma_sync_single_for_device(np->pci_dev,
1345 le32_to_cpu(desc->frag[0].addr),
1347 PCI_DMA_FROMDEVICE);
1348 skb_put(skb, pkt_len);
1350 pci_unmap_single(np->pci_dev,
1351 le32_to_cpu(desc->frag[0].addr),
1353 PCI_DMA_FROMDEVICE);
1354 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1355 np->rx_skbuff[entry] = NULL;
1357 skb->protocol = eth_type_trans(skb, dev);
1358 /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1361 entry = (entry + 1) % RX_RING_SIZE;
1366 np->budget -= received;
1367 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1375 np->budget -= received;
1376 if (np->budget <= 0)
1377 np->budget = RX_BUDGET;
1378 tasklet_schedule(&np->rx_tasklet);
1382 static void refill_rx (struct net_device *dev)
1384 struct netdev_private *np = netdev_priv(dev);
1388 /* Refill the Rx ring buffers. */
1389 for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1390 np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1391 struct sk_buff *skb;
1392 entry = np->dirty_rx % RX_RING_SIZE;
1393 if (np->rx_skbuff[entry] == NULL) {
1394 skb = dev_alloc_skb(np->rx_buf_sz);
1395 np->rx_skbuff[entry] = skb;
1397 break; /* Better luck next round. */
1398 skb->dev = dev; /* Mark as being used by this device. */
1399 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1400 np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1401 pci_map_single(np->pci_dev, skb->data,
1402 np->rx_buf_sz, PCI_DMA_FROMDEVICE));
1404 /* Perhaps we need not reset this field. */
1405 np->rx_ring[entry].frag[0].length =
1406 cpu_to_le32(np->rx_buf_sz | LastFrag);
1407 np->rx_ring[entry].status = 0;
1412 static void netdev_error(struct net_device *dev, int intr_status)
1414 struct netdev_private *np = netdev_priv(dev);
1415 void __iomem *ioaddr = np->base;
1416 u16 mii_ctl, mii_advertise, mii_lpa;
1419 if (intr_status & LinkChange) {
1420 if (mdio_wait_link(dev, 10) == 0) {
1421 printk(KERN_INFO "%s: Link up\n", dev->name);
1422 if (np->an_enable) {
1423 mii_advertise = mdio_read(dev, np->phys[0],
1425 mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1426 mii_advertise &= mii_lpa;
1427 printk(KERN_INFO "%s: Link changed: ",
1429 if (mii_advertise & ADVERTISE_100FULL) {
1431 printk("100Mbps, full duplex\n");
1432 } else if (mii_advertise & ADVERTISE_100HALF) {
1434 printk("100Mbps, half duplex\n");
1435 } else if (mii_advertise & ADVERTISE_10FULL) {
1437 printk("10Mbps, full duplex\n");
1438 } else if (mii_advertise & ADVERTISE_10HALF) {
1440 printk("10Mbps, half duplex\n");
1445 mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
1446 speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1448 printk(KERN_INFO "%s: Link changed: %dMbps ,",
1450 printk("%s duplex.\n",
1451 (mii_ctl & BMCR_FULLDPLX) ?
1455 if (np->flowctrl && np->mii_if.full_duplex) {
1456 iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1457 ioaddr + MulticastFilter1+2);
1458 iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1461 netif_carrier_on(dev);
1463 printk(KERN_INFO "%s: Link down\n", dev->name);
1464 netif_carrier_off(dev);
1467 if (intr_status & StatsMax) {
1470 if (intr_status & IntrPCIErr) {
1471 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1472 dev->name, intr_status);
1473 /* We must do a global reset of DMA to continue. */
1477 static struct net_device_stats *get_stats(struct net_device *dev)
1479 struct netdev_private *np = netdev_priv(dev);
1480 void __iomem *ioaddr = np->base;
1483 /* We should lock this segment of code for SMP eventually, although
1484 the vulnerability window is very small and statistics are
1486 /* The chip only need report frame silently dropped. */
1487 np->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
1488 np->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1489 np->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1490 np->stats.collisions += ioread8(ioaddr + StatsLateColl);
1491 np->stats.collisions += ioread8(ioaddr + StatsMultiColl);
1492 np->stats.collisions += ioread8(ioaddr + StatsOneColl);
1493 np->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1494 ioread8(ioaddr + StatsTxDefer);
1495 for (i = StatsTxDefer; i <= StatsMcastRx; i++)
1496 ioread8(ioaddr + i);
1497 np->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1498 np->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1499 np->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1500 np->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1505 static void set_rx_mode(struct net_device *dev)
1507 struct netdev_private *np = netdev_priv(dev);
1508 void __iomem *ioaddr = np->base;
1509 u16 mc_filter[4]; /* Multicast hash filter */
1513 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1514 memset(mc_filter, 0xff, sizeof(mc_filter));
1515 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1516 } else if ((dev->mc_count > multicast_filter_limit)
1517 || (dev->flags & IFF_ALLMULTI)) {
1518 /* Too many to match, or accept all multicasts. */
1519 memset(mc_filter, 0xff, sizeof(mc_filter));
1520 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1521 } else if (dev->mc_count) {
1522 struct dev_mc_list *mclist;
1526 memset (mc_filter, 0, sizeof (mc_filter));
1527 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1528 i++, mclist = mclist->next) {
1529 crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
1530 for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1531 if (crc & 0x80000000) index |= 1 << bit;
1532 mc_filter[index/16] |= (1 << (index % 16));
1534 rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1536 iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1539 if (np->mii_if.full_duplex && np->flowctrl)
1540 mc_filter[3] |= 0x0200;
1542 for (i = 0; i < 4; i++)
1543 iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1544 iowrite8(rx_mode, ioaddr + RxMode);
1547 static int __set_mac_addr(struct net_device *dev)
1549 struct netdev_private *np = netdev_priv(dev);
1552 addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1553 iowrite16(addr16, np->base + StationAddr);
1554 addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1555 iowrite16(addr16, np->base + StationAddr+2);
1556 addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1557 iowrite16(addr16, np->base + StationAddr+4);
1561 static int check_if_running(struct net_device *dev)
1563 if (!netif_running(dev))
1568 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1570 struct netdev_private *np = netdev_priv(dev);
1571 strcpy(info->driver, DRV_NAME);
1572 strcpy(info->version, DRV_VERSION);
1573 strcpy(info->bus_info, pci_name(np->pci_dev));
1576 static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1578 struct netdev_private *np = netdev_priv(dev);
1579 spin_lock_irq(&np->lock);
1580 mii_ethtool_gset(&np->mii_if, ecmd);
1581 spin_unlock_irq(&np->lock);
1585 static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1587 struct netdev_private *np = netdev_priv(dev);
1589 spin_lock_irq(&np->lock);
1590 res = mii_ethtool_sset(&np->mii_if, ecmd);
1591 spin_unlock_irq(&np->lock);
1595 static int nway_reset(struct net_device *dev)
1597 struct netdev_private *np = netdev_priv(dev);
1598 return mii_nway_restart(&np->mii_if);
1601 static u32 get_link(struct net_device *dev)
1603 struct netdev_private *np = netdev_priv(dev);
1604 return mii_link_ok(&np->mii_if);
1607 static u32 get_msglevel(struct net_device *dev)
1609 struct netdev_private *np = netdev_priv(dev);
1610 return np->msg_enable;
1613 static void set_msglevel(struct net_device *dev, u32 val)
1615 struct netdev_private *np = netdev_priv(dev);
1616 np->msg_enable = val;
1619 static const struct ethtool_ops ethtool_ops = {
1620 .begin = check_if_running,
1621 .get_drvinfo = get_drvinfo,
1622 .get_settings = get_settings,
1623 .set_settings = set_settings,
1624 .nway_reset = nway_reset,
1625 .get_link = get_link,
1626 .get_msglevel = get_msglevel,
1627 .set_msglevel = set_msglevel,
1630 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1632 struct netdev_private *np = netdev_priv(dev);
1635 if (!netif_running(dev))
1638 spin_lock_irq(&np->lock);
1639 rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1640 spin_unlock_irq(&np->lock);
1645 static int netdev_close(struct net_device *dev)
1647 struct netdev_private *np = netdev_priv(dev);
1648 void __iomem *ioaddr = np->base;
1649 struct sk_buff *skb;
1652 /* Wait and kill tasklet */
1653 tasklet_kill(&np->rx_tasklet);
1654 tasklet_kill(&np->tx_tasklet);
1660 netif_stop_queue(dev);
1662 if (netif_msg_ifdown(np)) {
1663 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1664 "Rx %4.4x Int %2.2x.\n",
1665 dev->name, ioread8(ioaddr + TxStatus),
1666 ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1667 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1668 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1671 /* Disable interrupts by clearing the interrupt mask. */
1672 iowrite16(0x0000, ioaddr + IntrEnable);
1674 /* Disable Rx and Tx DMA for safely release resource */
1675 iowrite32(0x500, ioaddr + DMACtrl);
1677 /* Stop the chip's Tx and Rx processes. */
1678 iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1680 for (i = 2000; i > 0; i--) {
1681 if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1686 iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
1687 ioaddr +ASICCtrl + 2);
1689 for (i = 2000; i > 0; i--) {
1690 if ((ioread16(ioaddr + ASICCtrl +2) & ResetBusy) == 0)
1696 if (netif_msg_hw(np)) {
1697 printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
1698 (int)(np->tx_ring_dma));
1699 for (i = 0; i < TX_RING_SIZE; i++)
1700 printk(" #%d desc. %4.4x %8.8x %8.8x.\n",
1701 i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1702 np->tx_ring[i].frag[0].length);
1703 printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
1704 (int)(np->rx_ring_dma));
1705 for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1706 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1707 i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1708 np->rx_ring[i].frag[0].length);
1711 #endif /* __i386__ debugging only */
1713 free_irq(dev->irq, dev);
1715 del_timer_sync(&np->timer);
1717 /* Free all the skbuffs in the Rx queue. */
1718 for (i = 0; i < RX_RING_SIZE; i++) {
1719 np->rx_ring[i].status = 0;
1720 skb = np->rx_skbuff[i];
1722 pci_unmap_single(np->pci_dev,
1723 le32_to_cpu(np->rx_ring[i].frag[0].addr),
1724 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1726 np->rx_skbuff[i] = NULL;
1728 np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */
1730 for (i = 0; i < TX_RING_SIZE; i++) {
1731 np->tx_ring[i].next_desc = 0;
1732 skb = np->tx_skbuff[i];
1734 pci_unmap_single(np->pci_dev,
1735 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1736 skb->len, PCI_DMA_TODEVICE);
1738 np->tx_skbuff[i] = NULL;
1745 static void __devexit sundance_remove1 (struct pci_dev *pdev)
1747 struct net_device *dev = pci_get_drvdata(pdev);
1750 struct netdev_private *np = netdev_priv(dev);
1752 unregister_netdev(dev);
1753 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring,
1755 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
1757 pci_iounmap(pdev, np->base);
1758 pci_release_regions(pdev);
1760 pci_set_drvdata(pdev, NULL);
1764 static struct pci_driver sundance_driver = {
1766 .id_table = sundance_pci_tbl,
1767 .probe = sundance_probe1,
1768 .remove = __devexit_p(sundance_remove1),
1771 static int __init sundance_init(void)
1773 /* when a module, this is printed whether or not devices are found in probe */
1777 return pci_register_driver(&sundance_driver);
1780 static void __exit sundance_exit(void)
1782 pci_unregister_driver(&sundance_driver);
1785 module_init(sundance_init);
1786 module_exit(sundance_exit);