1 /* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
3 Written 1999-2000 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
17 Support and updates available at
18 http://www.scyld.com/network/sundance.html
19 [link no longer provides useful info -jgarzik]
20 Archives of the mailing list are still available at
21 http://www.beowulf.org/pipermail/netdrivers/
25 #define DRV_NAME "sundance"
26 #define DRV_VERSION "1.2"
27 #define DRV_RELDATE "11-Sep-2006"
30 /* The user-configurable values.
31 These may be modified when a driver module is loaded.*/
32 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
33 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
34 Typical is a 64 element hash table based on the Ethernet CRC. */
35 static const int multicast_filter_limit = 32;
37 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
38 Setting to > 1518 effectively disables this feature.
39 This chip can receive into offset buffers, so the Alpha does not
41 static int rx_copybreak;
42 static int flowctrl=1;
44 /* media[] specifies the media type the NIC operates at.
45 autosense Autosensing active media.
46 10mbps_hd 10Mbps half duplex.
47 10mbps_fd 10Mbps full duplex.
48 100mbps_hd 100Mbps half duplex.
49 100mbps_fd 100Mbps full duplex.
50 0 Autosensing active media.
53 3 100Mbps half duplex.
54 4 100Mbps full duplex.
57 static char *media[MAX_UNITS];
60 /* Operational parameters that are set at compile time. */
62 /* Keep the ring sizes a power of two for compile efficiency.
63 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
64 Making the Tx ring too large decreases the effectiveness of channel
65 bonding and packet priority, and more than 128 requires modifying the
67 Large receive rings merely waste memory. */
68 #define TX_RING_SIZE 32
69 #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
70 #define RX_RING_SIZE 64
72 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
73 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
75 /* Operational parameters that usually are not changed. */
76 /* Time in jiffies before concluding the transmitter is hung. */
77 #define TX_TIMEOUT (4*HZ)
78 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
80 /* Include files, designed to support most kernel versions 2.0.0 and later. */
81 #include <linux/module.h>
82 #include <linux/kernel.h>
83 #include <linux/string.h>
84 #include <linux/timer.h>
85 #include <linux/errno.h>
86 #include <linux/ioport.h>
87 #include <linux/slab.h>
88 #include <linux/interrupt.h>
89 #include <linux/pci.h>
90 #include <linux/netdevice.h>
91 #include <linux/etherdevice.h>
92 #include <linux/skbuff.h>
93 #include <linux/init.h>
94 #include <linux/bitops.h>
95 #include <asm/uaccess.h>
96 #include <asm/processor.h> /* Processor type for cache alignment. */
98 #include <linux/delay.h>
99 #include <linux/spinlock.h>
100 #ifndef _COMPAT_WITH_OLD_KERNEL
101 #include <linux/crc32.h>
102 #include <linux/ethtool.h>
103 #include <linux/mii.h>
111 /* These identify the driver base version and may not be removed. */
112 static char version[] =
113 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n";
115 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
116 MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
117 MODULE_LICENSE("GPL");
119 module_param(debug, int, 0);
120 module_param(rx_copybreak, int, 0);
121 module_param_array(media, charp, NULL, 0);
122 module_param(flowctrl, int, 0);
123 MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
124 MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
125 MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
130 I. Board Compatibility
132 This driver is designed for the Sundance Technologies "Alta" ST201 chip.
134 II. Board-specific settings
136 III. Driver operation
140 This driver uses two statically allocated fixed-size descriptor lists
141 formed into rings by a branch from the final descriptor to the beginning of
142 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
143 Some chips explicitly use only 2^N sized rings, while others use a
144 'next descriptor' pointer that the driver forms into rings.
146 IIIb/c. Transmit/Receive Structure
148 This driver uses a zero-copy receive and transmit scheme.
149 The driver allocates full frame size skbuffs for the Rx ring buffers at
150 open() time and passes the skb->data field to the chip as receive data
151 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
152 a fresh skbuff is allocated and the frame is copied to the new skbuff.
153 When the incoming frame is larger, the skbuff is passed directly up the
154 protocol stack. Buffers consumed this way are replaced by newly allocated
155 skbuffs in a later phase of receives.
157 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
158 using a full-sized skbuff for small frames vs. the copying costs of larger
159 frames. New boards are typically used in generously configured machines
160 and the underfilled buffers have negligible impact compared to the benefit of
161 a single allocation size, so the default value of zero results in never
162 copying packets. When copying is done, the cost is usually mitigated by using
163 a combined copy/checksum routine. Copying also preloads the cache, which is
164 most useful with small frames.
166 A subtle aspect of the operation is that the IP header at offset 14 in an
167 ethernet frame isn't longword aligned for further processing.
168 Unaligned buffers are permitted by the Sundance hardware, so
169 frames are received into the skbuff at an offset of "+2", 16-byte aligning
172 IIId. Synchronization
174 The driver runs as two independent, single-threaded flows of control. One
175 is the send-packet routine, which enforces single-threaded use by the
176 dev->tbusy flag. The other thread is the interrupt handler, which is single
177 threaded by the hardware and interrupt handling software.
179 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
180 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
181 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
182 the 'lp->tx_full' flag.
184 The interrupt handler has exclusive control over the Rx ring and records stats
185 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
186 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
187 clears both the tx_full and tbusy flags.
193 The Sundance ST201 datasheet, preliminary version.
194 The Kendin KS8723 datasheet, preliminary version.
195 The ICplus IP100 datasheet, preliminary version.
196 http://www.scyld.com/expert/100mbps.html
197 http://www.scyld.com/expert/NWay.html
203 /* Work-around for Kendin chip bugs. */
204 #ifndef CONFIG_SUNDANCE_MMIO
208 static const struct pci_device_id sundance_pci_tbl[] = {
209 { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
210 { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
211 { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
212 { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
213 { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
214 { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
215 { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
218 MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
227 static const struct pci_id_info pci_id_tbl[] __devinitdata = {
228 {"D-Link DFE-550TX FAST Ethernet Adapter"},
229 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
230 {"D-Link DFE-580TX 4 port Server Adapter"},
231 {"D-Link DFE-530TXS FAST Ethernet Adapter"},
232 {"D-Link DL10050-based FAST Ethernet Adapter"},
233 {"Sundance Technology Alta"},
234 {"IC Plus Corporation IP100A FAST Ethernet Adapter"},
235 { } /* terminate list. */
238 /* This driver was written to use PCI memory space, however x86-oriented
239 hardware often uses I/O space accesses. */
241 /* Offsets to the device registers.
242 Unlike software-only systems, device drivers interact with complex hardware.
243 It's not useful to define symbolic names for every register bit in the
244 device. The name can only partially document the semantics and make
245 the driver longer and more difficult to read.
246 In general, only the important configuration values or bits changed
247 multiple times should be defined symbolically.
252 TxDMABurstThresh = 0x08,
253 TxDMAUrgentThresh = 0x09,
254 TxDMAPollPeriod = 0x0a,
259 RxDMABurstThresh = 0x14,
260 RxDMAUrgentThresh = 0x15,
261 RxDMAPollPeriod = 0x16,
280 MulticastFilter0 = 0x60,
281 MulticastFilter1 = 0x64,
288 StatsCarrierError = 0x74,
289 StatsLateColl = 0x75,
290 StatsMultiColl = 0x76,
294 StatsTxXSDefer = 0x7a,
300 /* Aliased and bogus values! */
303 enum ASICCtrl_HiWord_bit {
304 GlobalReset = 0x0001,
309 NetworkReset = 0x0020,
314 /* Bits in the interrupt status/mask registers. */
315 enum intr_status_bits {
316 IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
317 IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
319 StatsMax=0x0080, LinkChange=0x0100,
320 IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
323 /* Bits in the RxMode register. */
325 AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
326 AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
328 /* Bits in MACCtrl. */
329 enum mac_ctrl0_bits {
330 EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
331 EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
333 enum mac_ctrl1_bits {
334 StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
335 TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
336 RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
339 /* The Rx and Tx buffer descriptors. */
340 /* Note that using only 32 bit fields simplifies conversion to big-endian
345 struct desc_frag { u32 addr, length; } frag[1];
348 /* Bits in netdev_desc.status */
349 enum desc_status_bits {
351 DescEndPacket=0x4000,
355 DescIntrOnDMADone=0x80000000,
356 DisableAlign = 0x00000001,
359 #define PRIV_ALIGN 15 /* Required alignment mask */
360 /* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
361 within the structure. */
363 struct netdev_private {
364 /* Descriptor rings first for alignment. */
365 struct netdev_desc *rx_ring;
366 struct netdev_desc *tx_ring;
367 struct sk_buff* rx_skbuff[RX_RING_SIZE];
368 struct sk_buff* tx_skbuff[TX_RING_SIZE];
369 dma_addr_t tx_ring_dma;
370 dma_addr_t rx_ring_dma;
371 struct net_device_stats stats;
372 struct timer_list timer; /* Media monitoring timer. */
373 /* Frequently used values: keep some adjacent for cache effect. */
375 spinlock_t rx_lock; /* Group with Tx control cache line. */
378 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
379 unsigned int rx_buf_sz; /* Based on MTU+slack. */
380 struct netdev_desc *last_tx; /* Last Tx descriptor used. */
381 unsigned int cur_tx, dirty_tx;
382 /* These values are keep track of the transceiver/media in use. */
383 unsigned int flowctrl:1;
384 unsigned int default_port:4; /* Last dev->if_port value. */
385 unsigned int an_enable:1;
387 struct tasklet_struct rx_tasklet;
388 struct tasklet_struct tx_tasklet;
391 /* Multicast and receive mode. */
392 spinlock_t mcastlock; /* SMP lock multicast updates. */
394 /* MII transceiver section. */
395 struct mii_if_info mii_if;
396 int mii_preamble_required;
397 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
398 struct pci_dev *pci_dev;
400 unsigned char pci_rev_id;
403 /* The station address location in the EEPROM. */
404 #define EEPROM_SA_OFFSET 0x10
405 #define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
406 IntrDrvRqst | IntrTxDone | StatsMax | \
409 static int change_mtu(struct net_device *dev, int new_mtu);
410 static int eeprom_read(void __iomem *ioaddr, int location);
411 static int mdio_read(struct net_device *dev, int phy_id, int location);
412 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
413 static int netdev_open(struct net_device *dev);
414 static void check_duplex(struct net_device *dev);
415 static void netdev_timer(unsigned long data);
416 static void tx_timeout(struct net_device *dev);
417 static void init_ring(struct net_device *dev);
418 static int start_tx(struct sk_buff *skb, struct net_device *dev);
419 static int reset_tx (struct net_device *dev);
420 static irqreturn_t intr_handler(int irq, void *dev_instance);
421 static void rx_poll(unsigned long data);
422 static void tx_poll(unsigned long data);
423 static void refill_rx (struct net_device *dev);
424 static void netdev_error(struct net_device *dev, int intr_status);
425 static void netdev_error(struct net_device *dev, int intr_status);
426 static void set_rx_mode(struct net_device *dev);
427 static int __set_mac_addr(struct net_device *dev);
428 static struct net_device_stats *get_stats(struct net_device *dev);
429 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
430 static int netdev_close(struct net_device *dev);
431 static const struct ethtool_ops ethtool_ops;
433 static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
435 struct netdev_private *np = netdev_priv(dev);
436 void __iomem *ioaddr = np->base + ASICCtrl;
439 /* ST201 documentation states ASICCtrl is a 32bit register */
440 iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
441 /* ST201 documentation states reset can take up to 1 ms */
443 while (ioread32 (ioaddr) & (ResetBusy << 16)) {
444 if (--countdown == 0) {
445 printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
452 static int __devinit sundance_probe1 (struct pci_dev *pdev,
453 const struct pci_device_id *ent)
455 struct net_device *dev;
456 struct netdev_private *np;
458 int chip_idx = ent->driver_data;
461 void __iomem *ioaddr;
470 int phy, phy_idx = 0;
473 /* when built into the kernel, we only print version if device is found */
475 static int printed_version;
476 if (!printed_version++)
480 if (pci_enable_device(pdev))
482 pci_set_master(pdev);
486 dev = alloc_etherdev(sizeof(*np));
489 SET_MODULE_OWNER(dev);
490 SET_NETDEV_DEV(dev, &pdev->dev);
492 if (pci_request_regions(pdev, DRV_NAME))
495 ioaddr = pci_iomap(pdev, bar, netdev_io_size);
499 for (i = 0; i < 3; i++)
500 ((u16 *)dev->dev_addr)[i] =
501 le16_to_cpu(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
502 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
504 dev->base_addr = (unsigned long)ioaddr;
507 np = netdev_priv(dev);
510 np->chip_id = chip_idx;
511 np->msg_enable = (1 << debug) - 1;
512 spin_lock_init(&np->lock);
513 tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
514 tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
516 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
518 goto err_out_cleardev;
519 np->tx_ring = (struct netdev_desc *)ring_space;
520 np->tx_ring_dma = ring_dma;
522 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
524 goto err_out_unmap_tx;
525 np->rx_ring = (struct netdev_desc *)ring_space;
526 np->rx_ring_dma = ring_dma;
528 np->mii_if.dev = dev;
529 np->mii_if.mdio_read = mdio_read;
530 np->mii_if.mdio_write = mdio_write;
531 np->mii_if.phy_id_mask = 0x1f;
532 np->mii_if.reg_num_mask = 0x1f;
534 /* The chip-specific entries in the device structure. */
535 dev->open = &netdev_open;
536 dev->hard_start_xmit = &start_tx;
537 dev->stop = &netdev_close;
538 dev->get_stats = &get_stats;
539 dev->set_multicast_list = &set_rx_mode;
540 dev->do_ioctl = &netdev_ioctl;
541 SET_ETHTOOL_OPS(dev, ðtool_ops);
542 dev->tx_timeout = &tx_timeout;
543 dev->watchdog_timeo = TX_TIMEOUT;
544 dev->change_mtu = &change_mtu;
545 pci_set_drvdata(pdev, dev);
547 pci_read_config_byte(pdev, PCI_REVISION_ID, &np->pci_rev_id);
549 i = register_netdev(dev);
551 goto err_out_unmap_rx;
553 printk(KERN_INFO "%s: %s at %p, ",
554 dev->name, pci_id_tbl[chip_idx].name, ioaddr);
555 for (i = 0; i < 5; i++)
556 printk("%2.2x:", dev->dev_addr[i]);
557 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
559 np->phys[0] = 1; /* Default setting */
560 np->mii_preamble_required++;
562 * It seems some phys doesn't deal well with address 0 being accessed
563 * first, so leave address zero to the end of the loop (32 & 31).
565 for (phy = 1; phy <= 32 && phy_idx < MII_CNT; phy++) {
566 int phyx = phy & 0x1f;
567 int mii_status = mdio_read(dev, phyx, MII_BMSR);
568 if (mii_status != 0xffff && mii_status != 0x0000) {
569 np->phys[phy_idx++] = phyx;
570 np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
571 if ((mii_status & 0x0040) == 0)
572 np->mii_preamble_required++;
573 printk(KERN_INFO "%s: MII PHY found at address %d, status "
574 "0x%4.4x advertising %4.4x.\n",
575 dev->name, phyx, mii_status, np->mii_if.advertising);
578 np->mii_preamble_required--;
581 printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
582 dev->name, ioread32(ioaddr + ASICCtrl));
583 goto err_out_unregister;
586 np->mii_if.phy_id = np->phys[0];
588 /* Parse override configuration */
590 if (card_idx < MAX_UNITS) {
591 if (media[card_idx] != NULL) {
593 if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
594 strcmp (media[card_idx], "4") == 0) {
596 np->mii_if.full_duplex = 1;
597 } else if (strcmp (media[card_idx], "100mbps_hd") == 0
598 || strcmp (media[card_idx], "3") == 0) {
600 np->mii_if.full_duplex = 0;
601 } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
602 strcmp (media[card_idx], "2") == 0) {
604 np->mii_if.full_duplex = 1;
605 } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
606 strcmp (media[card_idx], "1") == 0) {
608 np->mii_if.full_duplex = 0;
618 if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
619 /* Default 100Mbps Full */
622 np->mii_if.full_duplex = 1;
627 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
629 /* If flow control enabled, we need to advertise it.*/
631 mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
632 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
633 /* Force media type */
634 if (!np->an_enable) {
636 mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
637 mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
638 mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
639 printk (KERN_INFO "Override speed=%d, %s duplex\n",
640 np->speed, np->mii_if.full_duplex ? "Full" : "Half");
644 /* Perhaps move the reset here? */
645 /* Reset the chip to erase previous misconfiguration. */
646 if (netif_msg_hw(np))
647 printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
648 sundance_reset(dev, 0x00ff << 16);
649 if (netif_msg_hw(np))
650 printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
656 unregister_netdev(dev);
658 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
660 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
662 pci_set_drvdata(pdev, NULL);
663 pci_iounmap(pdev, ioaddr);
665 pci_release_regions(pdev);
671 static int change_mtu(struct net_device *dev, int new_mtu)
673 if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
675 if (netif_running(dev))
681 #define eeprom_delay(ee_addr) ioread32(ee_addr)
682 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
683 static int __devinit eeprom_read(void __iomem *ioaddr, int location)
685 int boguscnt = 10000; /* Typical 1900 ticks. */
686 iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
688 eeprom_delay(ioaddr + EECtrl);
689 if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
690 return ioread16(ioaddr + EEData);
692 } while (--boguscnt > 0);
696 /* MII transceiver control section.
697 Read and write the MII registers using software-generated serial
698 MDIO protocol. See the MII specifications or DP83840A data sheet
701 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
702 met by back-to-back 33Mhz PCI cycles. */
703 #define mdio_delay() ioread8(mdio_addr)
706 MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
708 #define MDIO_EnbIn (0)
709 #define MDIO_WRITE0 (MDIO_EnbOutput)
710 #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
712 /* Generate the preamble required for initial synchronization and
713 a few older transceivers. */
714 static void mdio_sync(void __iomem *mdio_addr)
718 /* Establish sync by sending at least 32 logic ones. */
719 while (--bits >= 0) {
720 iowrite8(MDIO_WRITE1, mdio_addr);
722 iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
727 static int mdio_read(struct net_device *dev, int phy_id, int location)
729 struct netdev_private *np = netdev_priv(dev);
730 void __iomem *mdio_addr = np->base + MIICtrl;
731 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
734 if (np->mii_preamble_required)
735 mdio_sync(mdio_addr);
737 /* Shift the read command bits out. */
738 for (i = 15; i >= 0; i--) {
739 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
741 iowrite8(dataval, mdio_addr);
743 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
746 /* Read the two transition, 16 data, and wire-idle bits. */
747 for (i = 19; i > 0; i--) {
748 iowrite8(MDIO_EnbIn, mdio_addr);
750 retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
751 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
754 return (retval>>1) & 0xffff;
757 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
759 struct netdev_private *np = netdev_priv(dev);
760 void __iomem *mdio_addr = np->base + MIICtrl;
761 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
764 if (np->mii_preamble_required)
765 mdio_sync(mdio_addr);
767 /* Shift the command bits out. */
768 for (i = 31; i >= 0; i--) {
769 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
771 iowrite8(dataval, mdio_addr);
773 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
776 /* Clear out extra bits. */
777 for (i = 2; i > 0; i--) {
778 iowrite8(MDIO_EnbIn, mdio_addr);
780 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
786 static int netdev_open(struct net_device *dev)
788 struct netdev_private *np = netdev_priv(dev);
789 void __iomem *ioaddr = np->base;
793 /* Do we need to reset the chip??? */
795 i = request_irq(dev->irq, &intr_handler, IRQF_SHARED, dev->name, dev);
799 if (netif_msg_ifup(np))
800 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
801 dev->name, dev->irq);
804 iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
805 /* The Tx list pointer is written as packets are queued. */
807 /* Initialize other registers. */
809 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
810 iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
812 iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
815 iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
817 /* Configure the PCI bus bursts and FIFO thresholds. */
819 if (dev->if_port == 0)
820 dev->if_port = np->default_port;
822 spin_lock_init(&np->mcastlock);
825 iowrite16(0, ioaddr + IntrEnable);
826 iowrite16(0, ioaddr + DownCounter);
827 /* Set the chip to poll every N*320nsec. */
828 iowrite8(100, ioaddr + RxDMAPollPeriod);
829 iowrite8(127, ioaddr + TxDMAPollPeriod);
830 /* Fix DFE-580TX packet drop issue */
831 if (np->pci_rev_id >= 0x14)
832 iowrite8(0x01, ioaddr + DebugCtrl1);
833 netif_start_queue(dev);
835 spin_lock_irqsave(&np->lock, flags);
837 spin_unlock_irqrestore(&np->lock, flags);
839 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
841 if (netif_msg_ifup(np))
842 printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
843 "MAC Control %x, %4.4x %4.4x.\n",
844 dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
845 ioread32(ioaddr + MACCtrl0),
846 ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
848 /* Set the timer to check for link beat. */
849 init_timer(&np->timer);
850 np->timer.expires = jiffies + 3*HZ;
851 np->timer.data = (unsigned long)dev;
852 np->timer.function = &netdev_timer; /* timer handler */
853 add_timer(&np->timer);
855 /* Enable interrupts by setting the interrupt mask. */
856 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
861 static void check_duplex(struct net_device *dev)
863 struct netdev_private *np = netdev_priv(dev);
864 void __iomem *ioaddr = np->base;
865 int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
866 int negotiated = mii_lpa & np->mii_if.advertising;
870 if (!np->an_enable || mii_lpa == 0xffff) {
871 if (np->mii_if.full_duplex)
872 iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
877 /* Autonegotiation */
878 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
879 if (np->mii_if.full_duplex != duplex) {
880 np->mii_if.full_duplex = duplex;
881 if (netif_msg_link(np))
882 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
883 "negotiated capability %4.4x.\n", dev->name,
884 duplex ? "full" : "half", np->phys[0], negotiated);
885 iowrite16(ioread16(ioaddr + MACCtrl0) | duplex ? 0x20 : 0, ioaddr + MACCtrl0);
889 static void netdev_timer(unsigned long data)
891 struct net_device *dev = (struct net_device *)data;
892 struct netdev_private *np = netdev_priv(dev);
893 void __iomem *ioaddr = np->base;
894 int next_tick = 10*HZ;
896 if (netif_msg_timer(np)) {
897 printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
899 dev->name, ioread16(ioaddr + IntrEnable),
900 ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
903 np->timer.expires = jiffies + next_tick;
904 add_timer(&np->timer);
907 static void tx_timeout(struct net_device *dev)
909 struct netdev_private *np = netdev_priv(dev);
910 void __iomem *ioaddr = np->base;
913 netif_stop_queue(dev);
914 tasklet_disable(&np->tx_tasklet);
915 iowrite16(0, ioaddr + IntrEnable);
916 printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
918 " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
919 ioread8(ioaddr + TxFrameId));
923 for (i=0; i<TX_RING_SIZE; i++) {
924 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
925 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
926 le32_to_cpu(np->tx_ring[i].next_desc),
927 le32_to_cpu(np->tx_ring[i].status),
928 (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
929 le32_to_cpu(np->tx_ring[i].frag[0].addr),
930 le32_to_cpu(np->tx_ring[i].frag[0].length));
932 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
933 ioread32(np->base + TxListPtr),
934 netif_queue_stopped(dev));
935 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
936 np->cur_tx, np->cur_tx % TX_RING_SIZE,
937 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
938 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
939 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
941 spin_lock_irqsave(&np->lock, flag);
943 /* Stop and restart the chip's Tx processes . */
945 spin_unlock_irqrestore(&np->lock, flag);
949 dev->trans_start = jiffies;
950 np->stats.tx_errors++;
951 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
952 netif_wake_queue(dev);
954 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
955 tasklet_enable(&np->tx_tasklet);
959 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
960 static void init_ring(struct net_device *dev)
962 struct netdev_private *np = netdev_priv(dev);
965 np->cur_rx = np->cur_tx = 0;
966 np->dirty_rx = np->dirty_tx = 0;
969 np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
971 /* Initialize all Rx descriptors. */
972 for (i = 0; i < RX_RING_SIZE; i++) {
973 np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
974 ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
975 np->rx_ring[i].status = 0;
976 np->rx_ring[i].frag[0].length = 0;
977 np->rx_skbuff[i] = NULL;
980 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
981 for (i = 0; i < RX_RING_SIZE; i++) {
982 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
983 np->rx_skbuff[i] = skb;
986 skb->dev = dev; /* Mark as being used by this device. */
987 skb_reserve(skb, 2); /* 16 byte align the IP header. */
988 np->rx_ring[i].frag[0].addr = cpu_to_le32(
989 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz,
990 PCI_DMA_FROMDEVICE));
991 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
993 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
995 for (i = 0; i < TX_RING_SIZE; i++) {
996 np->tx_skbuff[i] = NULL;
997 np->tx_ring[i].status = 0;
1002 static void tx_poll (unsigned long data)
1004 struct net_device *dev = (struct net_device *)data;
1005 struct netdev_private *np = netdev_priv(dev);
1006 unsigned head = np->cur_task % TX_RING_SIZE;
1007 struct netdev_desc *txdesc =
1008 &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1010 /* Chain the next pointer */
1011 for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1012 int entry = np->cur_task % TX_RING_SIZE;
1013 txdesc = &np->tx_ring[entry];
1015 np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1016 entry*sizeof(struct netdev_desc));
1018 np->last_tx = txdesc;
1020 /* Indicate the latest descriptor of tx ring */
1021 txdesc->status |= cpu_to_le32(DescIntrOnTx);
1023 if (ioread32 (np->base + TxListPtr) == 0)
1024 iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1025 np->base + TxListPtr);
1030 start_tx (struct sk_buff *skb, struct net_device *dev)
1032 struct netdev_private *np = netdev_priv(dev);
1033 struct netdev_desc *txdesc;
1036 /* Calculate the next Tx descriptor entry. */
1037 entry = np->cur_tx % TX_RING_SIZE;
1038 np->tx_skbuff[entry] = skb;
1039 txdesc = &np->tx_ring[entry];
1041 txdesc->next_desc = 0;
1042 txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1043 txdesc->frag[0].addr = cpu_to_le32 (pci_map_single (np->pci_dev, skb->data,
1046 txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1048 /* Increment cur_tx before tasklet_schedule() */
1051 /* Schedule a tx_poll() task */
1052 tasklet_schedule(&np->tx_tasklet);
1054 /* On some architectures: explicitly flush cache lines here. */
1055 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1
1056 && !netif_queue_stopped(dev)) {
1059 netif_stop_queue (dev);
1061 dev->trans_start = jiffies;
1062 if (netif_msg_tx_queued(np)) {
1064 "%s: Transmit frame #%d queued in slot %d.\n",
1065 dev->name, np->cur_tx, entry);
1070 /* Reset hardware tx and free all of tx buffers */
1072 reset_tx (struct net_device *dev)
1074 struct netdev_private *np = netdev_priv(dev);
1075 void __iomem *ioaddr = np->base;
1076 struct sk_buff *skb;
1078 int irq = in_interrupt();
1080 /* Reset tx logic, TxListPtr will be cleaned */
1081 iowrite16 (TxDisable, ioaddr + MACCtrl1);
1082 sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1084 /* free all tx skbuff */
1085 for (i = 0; i < TX_RING_SIZE; i++) {
1086 np->tx_ring[i].next_desc = 0;
1088 skb = np->tx_skbuff[i];
1090 pci_unmap_single(np->pci_dev,
1091 np->tx_ring[i].frag[0].addr, skb->len,
1094 dev_kfree_skb_irq (skb);
1096 dev_kfree_skb (skb);
1097 np->tx_skbuff[i] = NULL;
1098 np->stats.tx_dropped++;
1101 np->cur_tx = np->dirty_tx = 0;
1105 iowrite8(127, ioaddr + TxDMAPollPeriod);
1107 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1111 /* The interrupt handler cleans up after the Tx thread,
1112 and schedule a Rx thread work */
1113 static irqreturn_t intr_handler(int irq, void *dev_instance)
1115 struct net_device *dev = (struct net_device *)dev_instance;
1116 struct netdev_private *np = netdev_priv(dev);
1117 void __iomem *ioaddr = np->base;
1126 int intr_status = ioread16(ioaddr + IntrStatus);
1127 iowrite16(intr_status, ioaddr + IntrStatus);
1129 if (netif_msg_intr(np))
1130 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1131 dev->name, intr_status);
1133 if (!(intr_status & DEFAULT_INTR))
1138 if (intr_status & (IntrRxDMADone)) {
1139 iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1140 ioaddr + IntrEnable);
1142 np->budget = RX_BUDGET;
1143 tasklet_schedule(&np->rx_tasklet);
1145 if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1146 tx_status = ioread16 (ioaddr + TxStatus);
1147 for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1148 if (netif_msg_tx_done(np))
1150 ("%s: Transmit status is %2.2x.\n",
1151 dev->name, tx_status);
1152 if (tx_status & 0x1e) {
1153 if (netif_msg_tx_err(np))
1154 printk("%s: Transmit error status %4.4x.\n",
1155 dev->name, tx_status);
1156 np->stats.tx_errors++;
1157 if (tx_status & 0x10)
1158 np->stats.tx_fifo_errors++;
1159 if (tx_status & 0x08)
1160 np->stats.collisions++;
1161 if (tx_status & 0x04)
1162 np->stats.tx_fifo_errors++;
1163 if (tx_status & 0x02)
1164 np->stats.tx_window_errors++;
1167 ** This reset has been verified on
1168 ** DFE-580TX boards ! phdm@macqel.be.
1170 if (tx_status & 0x10) { /* TxUnderrun */
1171 /* Restart Tx FIFO and transmitter */
1172 sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1173 /* No need to reset the Tx pointer here */
1175 /* Restart the Tx. Need to make sure tx enabled */
1178 iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1179 if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1184 /* Yup, this is a documentation bug. It cost me *hours*. */
1185 iowrite16 (0, ioaddr + TxStatus);
1187 iowrite32(5000, ioaddr + DownCounter);
1190 tx_status = ioread16 (ioaddr + TxStatus);
1192 hw_frame_id = (tx_status >> 8) & 0xff;
1194 hw_frame_id = ioread8(ioaddr + TxFrameId);
1197 if (np->pci_rev_id >= 0x14) {
1198 spin_lock(&np->lock);
1199 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1200 int entry = np->dirty_tx % TX_RING_SIZE;
1201 struct sk_buff *skb;
1203 sw_frame_id = (le32_to_cpu(
1204 np->tx_ring[entry].status) >> 2) & 0xff;
1205 if (sw_frame_id == hw_frame_id &&
1206 !(le32_to_cpu(np->tx_ring[entry].status)
1209 if (sw_frame_id == (hw_frame_id + 1) %
1212 skb = np->tx_skbuff[entry];
1213 /* Free the original skb. */
1214 pci_unmap_single(np->pci_dev,
1215 np->tx_ring[entry].frag[0].addr,
1216 skb->len, PCI_DMA_TODEVICE);
1217 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1218 np->tx_skbuff[entry] = NULL;
1219 np->tx_ring[entry].frag[0].addr = 0;
1220 np->tx_ring[entry].frag[0].length = 0;
1222 spin_unlock(&np->lock);
1224 spin_lock(&np->lock);
1225 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1226 int entry = np->dirty_tx % TX_RING_SIZE;
1227 struct sk_buff *skb;
1228 if (!(le32_to_cpu(np->tx_ring[entry].status)
1231 skb = np->tx_skbuff[entry];
1232 /* Free the original skb. */
1233 pci_unmap_single(np->pci_dev,
1234 np->tx_ring[entry].frag[0].addr,
1235 skb->len, PCI_DMA_TODEVICE);
1236 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1237 np->tx_skbuff[entry] = NULL;
1238 np->tx_ring[entry].frag[0].addr = 0;
1239 np->tx_ring[entry].frag[0].length = 0;
1241 spin_unlock(&np->lock);
1244 if (netif_queue_stopped(dev) &&
1245 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1246 /* The ring is no longer full, clear busy flag. */
1247 netif_wake_queue (dev);
1249 /* Abnormal error summary/uncommon events handlers. */
1250 if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1251 netdev_error(dev, intr_status);
1253 if (netif_msg_intr(np))
1254 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1255 dev->name, ioread16(ioaddr + IntrStatus));
1256 return IRQ_RETVAL(handled);
1259 static void rx_poll(unsigned long data)
1261 struct net_device *dev = (struct net_device *)data;
1262 struct netdev_private *np = netdev_priv(dev);
1263 int entry = np->cur_rx % RX_RING_SIZE;
1264 int boguscnt = np->budget;
1265 void __iomem *ioaddr = np->base;
1268 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1270 struct netdev_desc *desc = &(np->rx_ring[entry]);
1271 u32 frame_status = le32_to_cpu(desc->status);
1274 if (--boguscnt < 0) {
1277 if (!(frame_status & DescOwn))
1279 pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
1280 if (netif_msg_rx_status(np))
1281 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
1283 if (frame_status & 0x001f4000) {
1284 /* There was a error. */
1285 if (netif_msg_rx_err(np))
1286 printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
1288 np->stats.rx_errors++;
1289 if (frame_status & 0x00100000) np->stats.rx_length_errors++;
1290 if (frame_status & 0x00010000) np->stats.rx_fifo_errors++;
1291 if (frame_status & 0x00060000) np->stats.rx_frame_errors++;
1292 if (frame_status & 0x00080000) np->stats.rx_crc_errors++;
1293 if (frame_status & 0x00100000) {
1294 printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1296 dev->name, frame_status);
1299 struct sk_buff *skb;
1300 #ifndef final_version
1301 if (netif_msg_rx_status(np))
1302 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1303 ", bogus_cnt %d.\n",
1306 /* Check if the packet is long enough to accept without copying
1307 to a minimally-sized skbuff. */
1308 if (pkt_len < rx_copybreak
1309 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1310 skb_reserve(skb, 2); /* 16 byte align the IP header */
1311 pci_dma_sync_single_for_cpu(np->pci_dev,
1314 PCI_DMA_FROMDEVICE);
1316 eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0);
1317 pci_dma_sync_single_for_device(np->pci_dev,
1320 PCI_DMA_FROMDEVICE);
1321 skb_put(skb, pkt_len);
1323 pci_unmap_single(np->pci_dev,
1326 PCI_DMA_FROMDEVICE);
1327 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1328 np->rx_skbuff[entry] = NULL;
1330 skb->protocol = eth_type_trans(skb, dev);
1331 /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1333 dev->last_rx = jiffies;
1335 entry = (entry + 1) % RX_RING_SIZE;
1340 np->budget -= received;
1341 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1349 np->budget -= received;
1350 if (np->budget <= 0)
1351 np->budget = RX_BUDGET;
1352 tasklet_schedule(&np->rx_tasklet);
1356 static void refill_rx (struct net_device *dev)
1358 struct netdev_private *np = netdev_priv(dev);
1362 /* Refill the Rx ring buffers. */
1363 for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1364 np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1365 struct sk_buff *skb;
1366 entry = np->dirty_rx % RX_RING_SIZE;
1367 if (np->rx_skbuff[entry] == NULL) {
1368 skb = dev_alloc_skb(np->rx_buf_sz);
1369 np->rx_skbuff[entry] = skb;
1371 break; /* Better luck next round. */
1372 skb->dev = dev; /* Mark as being used by this device. */
1373 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1374 np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1375 pci_map_single(np->pci_dev, skb->data,
1376 np->rx_buf_sz, PCI_DMA_FROMDEVICE));
1378 /* Perhaps we need not reset this field. */
1379 np->rx_ring[entry].frag[0].length =
1380 cpu_to_le32(np->rx_buf_sz | LastFrag);
1381 np->rx_ring[entry].status = 0;
1386 static void netdev_error(struct net_device *dev, int intr_status)
1388 struct netdev_private *np = netdev_priv(dev);
1389 void __iomem *ioaddr = np->base;
1390 u16 mii_ctl, mii_advertise, mii_lpa;
1393 if (intr_status & LinkChange) {
1394 if (np->an_enable) {
1395 mii_advertise = mdio_read (dev, np->phys[0], MII_ADVERTISE);
1396 mii_lpa= mdio_read (dev, np->phys[0], MII_LPA);
1397 mii_advertise &= mii_lpa;
1398 printk (KERN_INFO "%s: Link changed: ", dev->name);
1399 if (mii_advertise & ADVERTISE_100FULL) {
1401 printk ("100Mbps, full duplex\n");
1402 } else if (mii_advertise & ADVERTISE_100HALF) {
1404 printk ("100Mbps, half duplex\n");
1405 } else if (mii_advertise & ADVERTISE_10FULL) {
1407 printk ("10Mbps, full duplex\n");
1408 } else if (mii_advertise & ADVERTISE_10HALF) {
1410 printk ("10Mbps, half duplex\n");
1415 mii_ctl = mdio_read (dev, np->phys[0], MII_BMCR);
1416 speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1418 printk (KERN_INFO "%s: Link changed: %dMbps ,",
1420 printk ("%s duplex.\n", (mii_ctl & BMCR_FULLDPLX) ?
1424 if (np->flowctrl && np->mii_if.full_duplex) {
1425 iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1426 ioaddr + MulticastFilter1+2);
1427 iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1431 if (intr_status & StatsMax) {
1434 if (intr_status & IntrPCIErr) {
1435 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1436 dev->name, intr_status);
1437 /* We must do a global reset of DMA to continue. */
1441 static struct net_device_stats *get_stats(struct net_device *dev)
1443 struct netdev_private *np = netdev_priv(dev);
1444 void __iomem *ioaddr = np->base;
1447 /* We should lock this segment of code for SMP eventually, although
1448 the vulnerability window is very small and statistics are
1450 /* The chip only need report frame silently dropped. */
1451 np->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
1452 np->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1453 np->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1454 np->stats.collisions += ioread8(ioaddr + StatsLateColl);
1455 np->stats.collisions += ioread8(ioaddr + StatsMultiColl);
1456 np->stats.collisions += ioread8(ioaddr + StatsOneColl);
1457 np->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1458 ioread8(ioaddr + StatsTxDefer);
1459 for (i = StatsTxDefer; i <= StatsMcastRx; i++)
1460 ioread8(ioaddr + i);
1461 np->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1462 np->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1463 np->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1464 np->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1469 static void set_rx_mode(struct net_device *dev)
1471 struct netdev_private *np = netdev_priv(dev);
1472 void __iomem *ioaddr = np->base;
1473 u16 mc_filter[4]; /* Multicast hash filter */
1477 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1478 memset(mc_filter, 0xff, sizeof(mc_filter));
1479 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1480 } else if ((dev->mc_count > multicast_filter_limit)
1481 || (dev->flags & IFF_ALLMULTI)) {
1482 /* Too many to match, or accept all multicasts. */
1483 memset(mc_filter, 0xff, sizeof(mc_filter));
1484 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1485 } else if (dev->mc_count) {
1486 struct dev_mc_list *mclist;
1490 memset (mc_filter, 0, sizeof (mc_filter));
1491 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1492 i++, mclist = mclist->next) {
1493 crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
1494 for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1495 if (crc & 0x80000000) index |= 1 << bit;
1496 mc_filter[index/16] |= (1 << (index % 16));
1498 rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1500 iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1503 if (np->mii_if.full_duplex && np->flowctrl)
1504 mc_filter[3] |= 0x0200;
1506 for (i = 0; i < 4; i++)
1507 iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1508 iowrite8(rx_mode, ioaddr + RxMode);
1511 static int __set_mac_addr(struct net_device *dev)
1513 struct netdev_private *np = netdev_priv(dev);
1516 addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1517 iowrite16(addr16, np->base + StationAddr);
1518 addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1519 iowrite16(addr16, np->base + StationAddr+2);
1520 addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1521 iowrite16(addr16, np->base + StationAddr+4);
1525 static int check_if_running(struct net_device *dev)
1527 if (!netif_running(dev))
1532 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1534 struct netdev_private *np = netdev_priv(dev);
1535 strcpy(info->driver, DRV_NAME);
1536 strcpy(info->version, DRV_VERSION);
1537 strcpy(info->bus_info, pci_name(np->pci_dev));
1540 static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1542 struct netdev_private *np = netdev_priv(dev);
1543 spin_lock_irq(&np->lock);
1544 mii_ethtool_gset(&np->mii_if, ecmd);
1545 spin_unlock_irq(&np->lock);
1549 static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1551 struct netdev_private *np = netdev_priv(dev);
1553 spin_lock_irq(&np->lock);
1554 res = mii_ethtool_sset(&np->mii_if, ecmd);
1555 spin_unlock_irq(&np->lock);
1559 static int nway_reset(struct net_device *dev)
1561 struct netdev_private *np = netdev_priv(dev);
1562 return mii_nway_restart(&np->mii_if);
1565 static u32 get_link(struct net_device *dev)
1567 struct netdev_private *np = netdev_priv(dev);
1568 return mii_link_ok(&np->mii_if);
1571 static u32 get_msglevel(struct net_device *dev)
1573 struct netdev_private *np = netdev_priv(dev);
1574 return np->msg_enable;
1577 static void set_msglevel(struct net_device *dev, u32 val)
1579 struct netdev_private *np = netdev_priv(dev);
1580 np->msg_enable = val;
1583 static const struct ethtool_ops ethtool_ops = {
1584 .begin = check_if_running,
1585 .get_drvinfo = get_drvinfo,
1586 .get_settings = get_settings,
1587 .set_settings = set_settings,
1588 .nway_reset = nway_reset,
1589 .get_link = get_link,
1590 .get_msglevel = get_msglevel,
1591 .set_msglevel = set_msglevel,
1592 .get_perm_addr = ethtool_op_get_perm_addr,
1595 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1597 struct netdev_private *np = netdev_priv(dev);
1598 void __iomem *ioaddr = np->base;
1602 if (!netif_running(dev))
1605 spin_lock_irq(&np->lock);
1606 rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1607 spin_unlock_irq(&np->lock);
1609 case SIOCDEVPRIVATE:
1610 for (i=0; i<TX_RING_SIZE; i++) {
1611 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
1612 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
1613 le32_to_cpu(np->tx_ring[i].next_desc),
1614 le32_to_cpu(np->tx_ring[i].status),
1615 (le32_to_cpu(np->tx_ring[i].status) >> 2)
1617 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1618 le32_to_cpu(np->tx_ring[i].frag[0].length));
1620 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
1621 ioread32(np->base + TxListPtr),
1622 netif_queue_stopped(dev));
1623 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
1624 np->cur_tx, np->cur_tx % TX_RING_SIZE,
1625 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
1626 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
1627 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
1628 printk(KERN_DEBUG "TxStatus=%04x\n", ioread16(ioaddr + TxStatus));
1636 static int netdev_close(struct net_device *dev)
1638 struct netdev_private *np = netdev_priv(dev);
1639 void __iomem *ioaddr = np->base;
1640 struct sk_buff *skb;
1643 /* Wait and kill tasklet */
1644 tasklet_kill(&np->rx_tasklet);
1645 tasklet_kill(&np->tx_tasklet);
1651 netif_stop_queue(dev);
1653 if (netif_msg_ifdown(np)) {
1654 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1655 "Rx %4.4x Int %2.2x.\n",
1656 dev->name, ioread8(ioaddr + TxStatus),
1657 ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1658 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1659 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1662 /* Disable interrupts by clearing the interrupt mask. */
1663 iowrite16(0x0000, ioaddr + IntrEnable);
1665 /* Disable Rx and Tx DMA for safely release resource */
1666 iowrite32(0x500, ioaddr + DMACtrl);
1668 /* Stop the chip's Tx and Rx processes. */
1669 iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1671 for (i = 2000; i > 0; i--) {
1672 if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1677 iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
1678 ioaddr +ASICCtrl + 2);
1680 for (i = 2000; i > 0; i--) {
1681 if ((ioread16(ioaddr + ASICCtrl +2) & ResetBusy) == 0)
1687 if (netif_msg_hw(np)) {
1688 printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
1689 (int)(np->tx_ring_dma));
1690 for (i = 0; i < TX_RING_SIZE; i++)
1691 printk(" #%d desc. %4.4x %8.8x %8.8x.\n",
1692 i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1693 np->tx_ring[i].frag[0].length);
1694 printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
1695 (int)(np->rx_ring_dma));
1696 for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1697 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1698 i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1699 np->rx_ring[i].frag[0].length);
1702 #endif /* __i386__ debugging only */
1704 free_irq(dev->irq, dev);
1706 del_timer_sync(&np->timer);
1708 /* Free all the skbuffs in the Rx queue. */
1709 for (i = 0; i < RX_RING_SIZE; i++) {
1710 np->rx_ring[i].status = 0;
1711 np->rx_ring[i].frag[0].addr = 0xBADF00D0; /* An invalid address. */
1712 skb = np->rx_skbuff[i];
1714 pci_unmap_single(np->pci_dev,
1715 np->rx_ring[i].frag[0].addr, np->rx_buf_sz,
1716 PCI_DMA_FROMDEVICE);
1718 np->rx_skbuff[i] = NULL;
1721 for (i = 0; i < TX_RING_SIZE; i++) {
1722 np->tx_ring[i].next_desc = 0;
1723 skb = np->tx_skbuff[i];
1725 pci_unmap_single(np->pci_dev,
1726 np->tx_ring[i].frag[0].addr, skb->len,
1729 np->tx_skbuff[i] = NULL;
1736 static void __devexit sundance_remove1 (struct pci_dev *pdev)
1738 struct net_device *dev = pci_get_drvdata(pdev);
1741 struct netdev_private *np = netdev_priv(dev);
1743 unregister_netdev(dev);
1744 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring,
1746 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
1748 pci_iounmap(pdev, np->base);
1749 pci_release_regions(pdev);
1751 pci_set_drvdata(pdev, NULL);
1755 static struct pci_driver sundance_driver = {
1757 .id_table = sundance_pci_tbl,
1758 .probe = sundance_probe1,
1759 .remove = __devexit_p(sundance_remove1),
1762 static int __init sundance_init(void)
1764 /* when a module, this is printed whether or not devices are found in probe */
1768 return pci_register_driver(&sundance_driver);
1771 static void __exit sundance_exit(void)
1773 pci_unregister_driver(&sundance_driver);
1776 module_init(sundance_init);
1777 module_exit(sundance_exit);