1 /* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
3 Written 1999-2000 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
17 Support and updates available at
18 http://www.scyld.com/network/sundance.html
19 [link no longer provides useful info -jgarzik]
20 Archives of the mailing list are still available at
21 http://www.beowulf.org/pipermail/netdrivers/
25 #define DRV_NAME "sundance"
26 #define DRV_VERSION "1.2"
27 #define DRV_RELDATE "11-Sep-2006"
30 /* The user-configurable values.
31 These may be modified when a driver module is loaded.*/
32 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
33 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
34 Typical is a 64 element hash table based on the Ethernet CRC. */
35 static const int multicast_filter_limit = 32;
37 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
38 Setting to > 1518 effectively disables this feature.
39 This chip can receive into offset buffers, so the Alpha does not
41 static int rx_copybreak;
42 static int flowctrl=1;
44 /* media[] specifies the media type the NIC operates at.
45 autosense Autosensing active media.
46 10mbps_hd 10Mbps half duplex.
47 10mbps_fd 10Mbps full duplex.
48 100mbps_hd 100Mbps half duplex.
49 100mbps_fd 100Mbps full duplex.
50 0 Autosensing active media.
53 3 100Mbps half duplex.
54 4 100Mbps full duplex.
57 static char *media[MAX_UNITS];
60 /* Operational parameters that are set at compile time. */
62 /* Keep the ring sizes a power of two for compile efficiency.
63 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
64 Making the Tx ring too large decreases the effectiveness of channel
65 bonding and packet priority, and more than 128 requires modifying the
67 Large receive rings merely waste memory. */
68 #define TX_RING_SIZE 32
69 #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
70 #define RX_RING_SIZE 64
72 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
73 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
75 /* Operational parameters that usually are not changed. */
76 /* Time in jiffies before concluding the transmitter is hung. */
77 #define TX_TIMEOUT (4*HZ)
78 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
80 /* Include files, designed to support most kernel versions 2.0.0 and later. */
81 #include <linux/module.h>
82 #include <linux/kernel.h>
83 #include <linux/string.h>
84 #include <linux/timer.h>
85 #include <linux/errno.h>
86 #include <linux/ioport.h>
87 #include <linux/slab.h>
88 #include <linux/interrupt.h>
89 #include <linux/pci.h>
90 #include <linux/netdevice.h>
91 #include <linux/etherdevice.h>
92 #include <linux/skbuff.h>
93 #include <linux/init.h>
94 #include <linux/bitops.h>
95 #include <asm/uaccess.h>
96 #include <asm/processor.h> /* Processor type for cache alignment. */
98 #include <linux/delay.h>
99 #include <linux/spinlock.h>
100 #ifndef _COMPAT_WITH_OLD_KERNEL
101 #include <linux/crc32.h>
102 #include <linux/ethtool.h>
103 #include <linux/mii.h>
111 /* These identify the driver base version and may not be removed. */
112 static char version[] =
113 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n"
114 KERN_INFO " http://www.scyld.com/network/sundance.html\n";
116 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
117 MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
118 MODULE_LICENSE("GPL");
120 module_param(debug, int, 0);
121 module_param(rx_copybreak, int, 0);
122 module_param_array(media, charp, NULL, 0);
123 module_param(flowctrl, int, 0);
124 MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
125 MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
126 MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
131 I. Board Compatibility
133 This driver is designed for the Sundance Technologies "Alta" ST201 chip.
135 II. Board-specific settings
137 III. Driver operation
141 This driver uses two statically allocated fixed-size descriptor lists
142 formed into rings by a branch from the final descriptor to the beginning of
143 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
144 Some chips explicitly use only 2^N sized rings, while others use a
145 'next descriptor' pointer that the driver forms into rings.
147 IIIb/c. Transmit/Receive Structure
149 This driver uses a zero-copy receive and transmit scheme.
150 The driver allocates full frame size skbuffs for the Rx ring buffers at
151 open() time and passes the skb->data field to the chip as receive data
152 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
153 a fresh skbuff is allocated and the frame is copied to the new skbuff.
154 When the incoming frame is larger, the skbuff is passed directly up the
155 protocol stack. Buffers consumed this way are replaced by newly allocated
156 skbuffs in a later phase of receives.
158 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
159 using a full-sized skbuff for small frames vs. the copying costs of larger
160 frames. New boards are typically used in generously configured machines
161 and the underfilled buffers have negligible impact compared to the benefit of
162 a single allocation size, so the default value of zero results in never
163 copying packets. When copying is done, the cost is usually mitigated by using
164 a combined copy/checksum routine. Copying also preloads the cache, which is
165 most useful with small frames.
167 A subtle aspect of the operation is that the IP header at offset 14 in an
168 ethernet frame isn't longword aligned for further processing.
169 Unaligned buffers are permitted by the Sundance hardware, so
170 frames are received into the skbuff at an offset of "+2", 16-byte aligning
173 IIId. Synchronization
175 The driver runs as two independent, single-threaded flows of control. One
176 is the send-packet routine, which enforces single-threaded use by the
177 dev->tbusy flag. The other thread is the interrupt handler, which is single
178 threaded by the hardware and interrupt handling software.
180 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
181 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
182 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
183 the 'lp->tx_full' flag.
185 The interrupt handler has exclusive control over the Rx ring and records stats
186 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
187 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
188 clears both the tx_full and tbusy flags.
194 The Sundance ST201 datasheet, preliminary version.
195 The Kendin KS8723 datasheet, preliminary version.
196 The ICplus IP100 datasheet, preliminary version.
197 http://www.scyld.com/expert/100mbps.html
198 http://www.scyld.com/expert/NWay.html
204 /* Work-around for Kendin chip bugs. */
205 #ifndef CONFIG_SUNDANCE_MMIO
209 static const struct pci_device_id sundance_pci_tbl[] = {
210 { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
211 { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
212 { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
213 { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
214 { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
215 { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
216 { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
219 MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
228 static const struct pci_id_info pci_id_tbl[] __devinitdata = {
229 {"D-Link DFE-550TX FAST Ethernet Adapter"},
230 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
231 {"D-Link DFE-580TX 4 port Server Adapter"},
232 {"D-Link DFE-530TXS FAST Ethernet Adapter"},
233 {"D-Link DL10050-based FAST Ethernet Adapter"},
234 {"Sundance Technology Alta"},
235 {"IC Plus Corporation IP100A FAST Ethernet Adapter"},
236 { } /* terminate list. */
239 /* This driver was written to use PCI memory space, however x86-oriented
240 hardware often uses I/O space accesses. */
242 /* Offsets to the device registers.
243 Unlike software-only systems, device drivers interact with complex hardware.
244 It's not useful to define symbolic names for every register bit in the
245 device. The name can only partially document the semantics and make
246 the driver longer and more difficult to read.
247 In general, only the important configuration values or bits changed
248 multiple times should be defined symbolically.
253 TxDMABurstThresh = 0x08,
254 TxDMAUrgentThresh = 0x09,
255 TxDMAPollPeriod = 0x0a,
260 RxDMABurstThresh = 0x14,
261 RxDMAUrgentThresh = 0x15,
262 RxDMAPollPeriod = 0x16,
281 MulticastFilter0 = 0x60,
282 MulticastFilter1 = 0x64,
289 StatsCarrierError = 0x74,
290 StatsLateColl = 0x75,
291 StatsMultiColl = 0x76,
295 StatsTxXSDefer = 0x7a,
301 /* Aliased and bogus values! */
304 enum ASICCtrl_HiWord_bit {
305 GlobalReset = 0x0001,
310 NetworkReset = 0x0020,
315 /* Bits in the interrupt status/mask registers. */
316 enum intr_status_bits {
317 IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
318 IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
320 StatsMax=0x0080, LinkChange=0x0100,
321 IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
324 /* Bits in the RxMode register. */
326 AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
327 AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
329 /* Bits in MACCtrl. */
330 enum mac_ctrl0_bits {
331 EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
332 EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
334 enum mac_ctrl1_bits {
335 StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
336 TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
337 RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
340 /* The Rx and Tx buffer descriptors. */
341 /* Note that using only 32 bit fields simplifies conversion to big-endian
346 struct desc_frag { u32 addr, length; } frag[1];
349 /* Bits in netdev_desc.status */
350 enum desc_status_bits {
352 DescEndPacket=0x4000,
356 DescIntrOnDMADone=0x80000000,
357 DisableAlign = 0x00000001,
360 #define PRIV_ALIGN 15 /* Required alignment mask */
361 /* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
362 within the structure. */
364 struct netdev_private {
365 /* Descriptor rings first for alignment. */
366 struct netdev_desc *rx_ring;
367 struct netdev_desc *tx_ring;
368 struct sk_buff* rx_skbuff[RX_RING_SIZE];
369 struct sk_buff* tx_skbuff[TX_RING_SIZE];
370 dma_addr_t tx_ring_dma;
371 dma_addr_t rx_ring_dma;
372 struct net_device_stats stats;
373 struct timer_list timer; /* Media monitoring timer. */
374 /* Frequently used values: keep some adjacent for cache effect. */
376 spinlock_t rx_lock; /* Group with Tx control cache line. */
379 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
380 unsigned int rx_buf_sz; /* Based on MTU+slack. */
381 struct netdev_desc *last_tx; /* Last Tx descriptor used. */
382 unsigned int cur_tx, dirty_tx;
383 /* These values are keep track of the transceiver/media in use. */
384 unsigned int flowctrl:1;
385 unsigned int default_port:4; /* Last dev->if_port value. */
386 unsigned int an_enable:1;
388 struct tasklet_struct rx_tasklet;
389 struct tasklet_struct tx_tasklet;
392 /* Multicast and receive mode. */
393 spinlock_t mcastlock; /* SMP lock multicast updates. */
395 /* MII transceiver section. */
396 struct mii_if_info mii_if;
397 int mii_preamble_required;
398 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
399 struct pci_dev *pci_dev;
401 unsigned char pci_rev_id;
404 /* The station address location in the EEPROM. */
405 #define EEPROM_SA_OFFSET 0x10
406 #define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
407 IntrDrvRqst | IntrTxDone | StatsMax | \
410 static int change_mtu(struct net_device *dev, int new_mtu);
411 static int eeprom_read(void __iomem *ioaddr, int location);
412 static int mdio_read(struct net_device *dev, int phy_id, int location);
413 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
414 static int netdev_open(struct net_device *dev);
415 static void check_duplex(struct net_device *dev);
416 static void netdev_timer(unsigned long data);
417 static void tx_timeout(struct net_device *dev);
418 static void init_ring(struct net_device *dev);
419 static int start_tx(struct sk_buff *skb, struct net_device *dev);
420 static int reset_tx (struct net_device *dev);
421 static irqreturn_t intr_handler(int irq, void *dev_instance);
422 static void rx_poll(unsigned long data);
423 static void tx_poll(unsigned long data);
424 static void refill_rx (struct net_device *dev);
425 static void netdev_error(struct net_device *dev, int intr_status);
426 static void netdev_error(struct net_device *dev, int intr_status);
427 static void set_rx_mode(struct net_device *dev);
428 static int __set_mac_addr(struct net_device *dev);
429 static struct net_device_stats *get_stats(struct net_device *dev);
430 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
431 static int netdev_close(struct net_device *dev);
432 static const struct ethtool_ops ethtool_ops;
434 static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
436 struct netdev_private *np = netdev_priv(dev);
437 void __iomem *ioaddr = np->base + ASICCtrl;
440 /* ST201 documentation states ASICCtrl is a 32bit register */
441 iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
442 /* ST201 documentation states reset can take up to 1 ms */
444 while (ioread32 (ioaddr) & (ResetBusy << 16)) {
445 if (--countdown == 0) {
446 printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
453 static int __devinit sundance_probe1 (struct pci_dev *pdev,
454 const struct pci_device_id *ent)
456 struct net_device *dev;
457 struct netdev_private *np;
459 int chip_idx = ent->driver_data;
462 void __iomem *ioaddr;
471 int phy, phy_idx = 0;
474 /* when built into the kernel, we only print version if device is found */
476 static int printed_version;
477 if (!printed_version++)
481 if (pci_enable_device(pdev))
483 pci_set_master(pdev);
487 dev = alloc_etherdev(sizeof(*np));
490 SET_MODULE_OWNER(dev);
491 SET_NETDEV_DEV(dev, &pdev->dev);
493 if (pci_request_regions(pdev, DRV_NAME))
496 ioaddr = pci_iomap(pdev, bar, netdev_io_size);
500 for (i = 0; i < 3; i++)
501 ((u16 *)dev->dev_addr)[i] =
502 le16_to_cpu(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
503 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
505 dev->base_addr = (unsigned long)ioaddr;
508 np = netdev_priv(dev);
511 np->chip_id = chip_idx;
512 np->msg_enable = (1 << debug) - 1;
513 spin_lock_init(&np->lock);
514 tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
515 tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
517 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
519 goto err_out_cleardev;
520 np->tx_ring = (struct netdev_desc *)ring_space;
521 np->tx_ring_dma = ring_dma;
523 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
525 goto err_out_unmap_tx;
526 np->rx_ring = (struct netdev_desc *)ring_space;
527 np->rx_ring_dma = ring_dma;
529 np->mii_if.dev = dev;
530 np->mii_if.mdio_read = mdio_read;
531 np->mii_if.mdio_write = mdio_write;
532 np->mii_if.phy_id_mask = 0x1f;
533 np->mii_if.reg_num_mask = 0x1f;
535 /* The chip-specific entries in the device structure. */
536 dev->open = &netdev_open;
537 dev->hard_start_xmit = &start_tx;
538 dev->stop = &netdev_close;
539 dev->get_stats = &get_stats;
540 dev->set_multicast_list = &set_rx_mode;
541 dev->do_ioctl = &netdev_ioctl;
542 SET_ETHTOOL_OPS(dev, ðtool_ops);
543 dev->tx_timeout = &tx_timeout;
544 dev->watchdog_timeo = TX_TIMEOUT;
545 dev->change_mtu = &change_mtu;
546 pci_set_drvdata(pdev, dev);
548 pci_read_config_byte(pdev, PCI_REVISION_ID, &np->pci_rev_id);
550 i = register_netdev(dev);
552 goto err_out_unmap_rx;
554 printk(KERN_INFO "%s: %s at %p, ",
555 dev->name, pci_id_tbl[chip_idx].name, ioaddr);
556 for (i = 0; i < 5; i++)
557 printk("%2.2x:", dev->dev_addr[i]);
558 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
560 np->phys[0] = 1; /* Default setting */
561 np->mii_preamble_required++;
563 * It seems some phys doesn't deal well with address 0 being accessed
564 * first, so leave address zero to the end of the loop (32 & 31).
566 for (phy = 1; phy <= 32 && phy_idx < MII_CNT; phy++) {
567 int phyx = phy & 0x1f;
568 int mii_status = mdio_read(dev, phyx, MII_BMSR);
569 if (mii_status != 0xffff && mii_status != 0x0000) {
570 np->phys[phy_idx++] = phyx;
571 np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
572 if ((mii_status & 0x0040) == 0)
573 np->mii_preamble_required++;
574 printk(KERN_INFO "%s: MII PHY found at address %d, status "
575 "0x%4.4x advertising %4.4x.\n",
576 dev->name, phyx, mii_status, np->mii_if.advertising);
579 np->mii_preamble_required--;
582 printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
583 dev->name, ioread32(ioaddr + ASICCtrl));
584 goto err_out_unregister;
587 np->mii_if.phy_id = np->phys[0];
589 /* Parse override configuration */
591 if (card_idx < MAX_UNITS) {
592 if (media[card_idx] != NULL) {
594 if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
595 strcmp (media[card_idx], "4") == 0) {
597 np->mii_if.full_duplex = 1;
598 } else if (strcmp (media[card_idx], "100mbps_hd") == 0
599 || strcmp (media[card_idx], "3") == 0) {
601 np->mii_if.full_duplex = 0;
602 } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
603 strcmp (media[card_idx], "2") == 0) {
605 np->mii_if.full_duplex = 1;
606 } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
607 strcmp (media[card_idx], "1") == 0) {
609 np->mii_if.full_duplex = 0;
619 if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
620 /* Default 100Mbps Full */
623 np->mii_if.full_duplex = 1;
628 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
630 /* If flow control enabled, we need to advertise it.*/
632 mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
633 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
634 /* Force media type */
635 if (!np->an_enable) {
637 mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
638 mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
639 mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
640 printk (KERN_INFO "Override speed=%d, %s duplex\n",
641 np->speed, np->mii_if.full_duplex ? "Full" : "Half");
645 /* Perhaps move the reset here? */
646 /* Reset the chip to erase previous misconfiguration. */
647 if (netif_msg_hw(np))
648 printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
649 sundance_reset(dev, 0x00ff << 16);
650 if (netif_msg_hw(np))
651 printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
657 unregister_netdev(dev);
659 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
661 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
663 pci_set_drvdata(pdev, NULL);
664 pci_iounmap(pdev, ioaddr);
666 pci_release_regions(pdev);
672 static int change_mtu(struct net_device *dev, int new_mtu)
674 if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
676 if (netif_running(dev))
682 #define eeprom_delay(ee_addr) ioread32(ee_addr)
683 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
684 static int __devinit eeprom_read(void __iomem *ioaddr, int location)
686 int boguscnt = 10000; /* Typical 1900 ticks. */
687 iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
689 eeprom_delay(ioaddr + EECtrl);
690 if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
691 return ioread16(ioaddr + EEData);
693 } while (--boguscnt > 0);
697 /* MII transceiver control section.
698 Read and write the MII registers using software-generated serial
699 MDIO protocol. See the MII specifications or DP83840A data sheet
702 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
703 met by back-to-back 33Mhz PCI cycles. */
704 #define mdio_delay() ioread8(mdio_addr)
707 MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
709 #define MDIO_EnbIn (0)
710 #define MDIO_WRITE0 (MDIO_EnbOutput)
711 #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
713 /* Generate the preamble required for initial synchronization and
714 a few older transceivers. */
715 static void mdio_sync(void __iomem *mdio_addr)
719 /* Establish sync by sending at least 32 logic ones. */
720 while (--bits >= 0) {
721 iowrite8(MDIO_WRITE1, mdio_addr);
723 iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
728 static int mdio_read(struct net_device *dev, int phy_id, int location)
730 struct netdev_private *np = netdev_priv(dev);
731 void __iomem *mdio_addr = np->base + MIICtrl;
732 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
735 if (np->mii_preamble_required)
736 mdio_sync(mdio_addr);
738 /* Shift the read command bits out. */
739 for (i = 15; i >= 0; i--) {
740 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
742 iowrite8(dataval, mdio_addr);
744 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
747 /* Read the two transition, 16 data, and wire-idle bits. */
748 for (i = 19; i > 0; i--) {
749 iowrite8(MDIO_EnbIn, mdio_addr);
751 retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
752 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
755 return (retval>>1) & 0xffff;
758 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
760 struct netdev_private *np = netdev_priv(dev);
761 void __iomem *mdio_addr = np->base + MIICtrl;
762 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
765 if (np->mii_preamble_required)
766 mdio_sync(mdio_addr);
768 /* Shift the command bits out. */
769 for (i = 31; i >= 0; i--) {
770 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
772 iowrite8(dataval, mdio_addr);
774 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
777 /* Clear out extra bits. */
778 for (i = 2; i > 0; i--) {
779 iowrite8(MDIO_EnbIn, mdio_addr);
781 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
787 static int netdev_open(struct net_device *dev)
789 struct netdev_private *np = netdev_priv(dev);
790 void __iomem *ioaddr = np->base;
794 /* Do we need to reset the chip??? */
796 i = request_irq(dev->irq, &intr_handler, IRQF_SHARED, dev->name, dev);
800 if (netif_msg_ifup(np))
801 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
802 dev->name, dev->irq);
805 iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
806 /* The Tx list pointer is written as packets are queued. */
808 /* Initialize other registers. */
810 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
811 iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
813 iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
816 iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
818 /* Configure the PCI bus bursts and FIFO thresholds. */
820 if (dev->if_port == 0)
821 dev->if_port = np->default_port;
823 spin_lock_init(&np->mcastlock);
826 iowrite16(0, ioaddr + IntrEnable);
827 iowrite16(0, ioaddr + DownCounter);
828 /* Set the chip to poll every N*320nsec. */
829 iowrite8(100, ioaddr + RxDMAPollPeriod);
830 iowrite8(127, ioaddr + TxDMAPollPeriod);
831 /* Fix DFE-580TX packet drop issue */
832 if (np->pci_rev_id >= 0x14)
833 iowrite8(0x01, ioaddr + DebugCtrl1);
834 netif_start_queue(dev);
836 spin_lock_irqsave(&np->lock, flags);
838 spin_unlock_irqrestore(&np->lock, flags);
840 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
842 if (netif_msg_ifup(np))
843 printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
844 "MAC Control %x, %4.4x %4.4x.\n",
845 dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
846 ioread32(ioaddr + MACCtrl0),
847 ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
849 /* Set the timer to check for link beat. */
850 init_timer(&np->timer);
851 np->timer.expires = jiffies + 3*HZ;
852 np->timer.data = (unsigned long)dev;
853 np->timer.function = &netdev_timer; /* timer handler */
854 add_timer(&np->timer);
856 /* Enable interrupts by setting the interrupt mask. */
857 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
862 static void check_duplex(struct net_device *dev)
864 struct netdev_private *np = netdev_priv(dev);
865 void __iomem *ioaddr = np->base;
866 int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
867 int negotiated = mii_lpa & np->mii_if.advertising;
871 if (!np->an_enable || mii_lpa == 0xffff) {
872 if (np->mii_if.full_duplex)
873 iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
878 /* Autonegotiation */
879 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
880 if (np->mii_if.full_duplex != duplex) {
881 np->mii_if.full_duplex = duplex;
882 if (netif_msg_link(np))
883 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
884 "negotiated capability %4.4x.\n", dev->name,
885 duplex ? "full" : "half", np->phys[0], negotiated);
886 iowrite16(ioread16(ioaddr + MACCtrl0) | duplex ? 0x20 : 0, ioaddr + MACCtrl0);
890 static void netdev_timer(unsigned long data)
892 struct net_device *dev = (struct net_device *)data;
893 struct netdev_private *np = netdev_priv(dev);
894 void __iomem *ioaddr = np->base;
895 int next_tick = 10*HZ;
897 if (netif_msg_timer(np)) {
898 printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
900 dev->name, ioread16(ioaddr + IntrEnable),
901 ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
904 np->timer.expires = jiffies + next_tick;
905 add_timer(&np->timer);
908 static void tx_timeout(struct net_device *dev)
910 struct netdev_private *np = netdev_priv(dev);
911 void __iomem *ioaddr = np->base;
914 netif_stop_queue(dev);
915 tasklet_disable(&np->tx_tasklet);
916 iowrite16(0, ioaddr + IntrEnable);
917 printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
919 " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
920 ioread8(ioaddr + TxFrameId));
924 for (i=0; i<TX_RING_SIZE; i++) {
925 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
926 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
927 le32_to_cpu(np->tx_ring[i].next_desc),
928 le32_to_cpu(np->tx_ring[i].status),
929 (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
930 le32_to_cpu(np->tx_ring[i].frag[0].addr),
931 le32_to_cpu(np->tx_ring[i].frag[0].length));
933 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
934 ioread32(np->base + TxListPtr),
935 netif_queue_stopped(dev));
936 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
937 np->cur_tx, np->cur_tx % TX_RING_SIZE,
938 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
939 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
940 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
942 spin_lock_irqsave(&np->lock, flag);
944 /* Stop and restart the chip's Tx processes . */
946 spin_unlock_irqrestore(&np->lock, flag);
950 dev->trans_start = jiffies;
951 np->stats.tx_errors++;
952 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
953 netif_wake_queue(dev);
955 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
956 tasklet_enable(&np->tx_tasklet);
960 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
961 static void init_ring(struct net_device *dev)
963 struct netdev_private *np = netdev_priv(dev);
966 np->cur_rx = np->cur_tx = 0;
967 np->dirty_rx = np->dirty_tx = 0;
970 np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
972 /* Initialize all Rx descriptors. */
973 for (i = 0; i < RX_RING_SIZE; i++) {
974 np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
975 ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
976 np->rx_ring[i].status = 0;
977 np->rx_ring[i].frag[0].length = 0;
978 np->rx_skbuff[i] = NULL;
981 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
982 for (i = 0; i < RX_RING_SIZE; i++) {
983 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
984 np->rx_skbuff[i] = skb;
987 skb->dev = dev; /* Mark as being used by this device. */
988 skb_reserve(skb, 2); /* 16 byte align the IP header. */
989 np->rx_ring[i].frag[0].addr = cpu_to_le32(
990 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz,
991 PCI_DMA_FROMDEVICE));
992 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
994 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
996 for (i = 0; i < TX_RING_SIZE; i++) {
997 np->tx_skbuff[i] = NULL;
998 np->tx_ring[i].status = 0;
1003 static void tx_poll (unsigned long data)
1005 struct net_device *dev = (struct net_device *)data;
1006 struct netdev_private *np = netdev_priv(dev);
1007 unsigned head = np->cur_task % TX_RING_SIZE;
1008 struct netdev_desc *txdesc =
1009 &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1011 /* Chain the next pointer */
1012 for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1013 int entry = np->cur_task % TX_RING_SIZE;
1014 txdesc = &np->tx_ring[entry];
1016 np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1017 entry*sizeof(struct netdev_desc));
1019 np->last_tx = txdesc;
1021 /* Indicate the latest descriptor of tx ring */
1022 txdesc->status |= cpu_to_le32(DescIntrOnTx);
1024 if (ioread32 (np->base + TxListPtr) == 0)
1025 iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1026 np->base + TxListPtr);
1031 start_tx (struct sk_buff *skb, struct net_device *dev)
1033 struct netdev_private *np = netdev_priv(dev);
1034 struct netdev_desc *txdesc;
1037 /* Calculate the next Tx descriptor entry. */
1038 entry = np->cur_tx % TX_RING_SIZE;
1039 np->tx_skbuff[entry] = skb;
1040 txdesc = &np->tx_ring[entry];
1042 txdesc->next_desc = 0;
1043 txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1044 txdesc->frag[0].addr = cpu_to_le32 (pci_map_single (np->pci_dev, skb->data,
1047 txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1049 /* Increment cur_tx before tasklet_schedule() */
1052 /* Schedule a tx_poll() task */
1053 tasklet_schedule(&np->tx_tasklet);
1055 /* On some architectures: explicitly flush cache lines here. */
1056 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1
1057 && !netif_queue_stopped(dev)) {
1060 netif_stop_queue (dev);
1062 dev->trans_start = jiffies;
1063 if (netif_msg_tx_queued(np)) {
1065 "%s: Transmit frame #%d queued in slot %d.\n",
1066 dev->name, np->cur_tx, entry);
1071 /* Reset hardware tx and free all of tx buffers */
1073 reset_tx (struct net_device *dev)
1075 struct netdev_private *np = netdev_priv(dev);
1076 void __iomem *ioaddr = np->base;
1077 struct sk_buff *skb;
1079 int irq = in_interrupt();
1081 /* Reset tx logic, TxListPtr will be cleaned */
1082 iowrite16 (TxDisable, ioaddr + MACCtrl1);
1083 sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1085 /* free all tx skbuff */
1086 for (i = 0; i < TX_RING_SIZE; i++) {
1087 np->tx_ring[i].next_desc = 0;
1089 skb = np->tx_skbuff[i];
1091 pci_unmap_single(np->pci_dev,
1092 np->tx_ring[i].frag[0].addr, skb->len,
1095 dev_kfree_skb_irq (skb);
1097 dev_kfree_skb (skb);
1098 np->tx_skbuff[i] = NULL;
1099 np->stats.tx_dropped++;
1102 np->cur_tx = np->dirty_tx = 0;
1106 iowrite8(127, ioaddr + TxDMAPollPeriod);
1108 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1112 /* The interrupt handler cleans up after the Tx thread,
1113 and schedule a Rx thread work */
1114 static irqreturn_t intr_handler(int irq, void *dev_instance)
1116 struct net_device *dev = (struct net_device *)dev_instance;
1117 struct netdev_private *np = netdev_priv(dev);
1118 void __iomem *ioaddr = np->base;
1127 int intr_status = ioread16(ioaddr + IntrStatus);
1128 iowrite16(intr_status, ioaddr + IntrStatus);
1130 if (netif_msg_intr(np))
1131 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1132 dev->name, intr_status);
1134 if (!(intr_status & DEFAULT_INTR))
1139 if (intr_status & (IntrRxDMADone)) {
1140 iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1141 ioaddr + IntrEnable);
1143 np->budget = RX_BUDGET;
1144 tasklet_schedule(&np->rx_tasklet);
1146 if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1147 tx_status = ioread16 (ioaddr + TxStatus);
1148 for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1149 if (netif_msg_tx_done(np))
1151 ("%s: Transmit status is %2.2x.\n",
1152 dev->name, tx_status);
1153 if (tx_status & 0x1e) {
1154 if (netif_msg_tx_err(np))
1155 printk("%s: Transmit error status %4.4x.\n",
1156 dev->name, tx_status);
1157 np->stats.tx_errors++;
1158 if (tx_status & 0x10)
1159 np->stats.tx_fifo_errors++;
1160 if (tx_status & 0x08)
1161 np->stats.collisions++;
1162 if (tx_status & 0x04)
1163 np->stats.tx_fifo_errors++;
1164 if (tx_status & 0x02)
1165 np->stats.tx_window_errors++;
1168 ** This reset has been verified on
1169 ** DFE-580TX boards ! phdm@macqel.be.
1171 if (tx_status & 0x10) { /* TxUnderrun */
1172 /* Restart Tx FIFO and transmitter */
1173 sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1174 /* No need to reset the Tx pointer here */
1176 /* Restart the Tx. Need to make sure tx enabled */
1179 iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1180 if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1185 /* Yup, this is a documentation bug. It cost me *hours*. */
1186 iowrite16 (0, ioaddr + TxStatus);
1188 iowrite32(5000, ioaddr + DownCounter);
1191 tx_status = ioread16 (ioaddr + TxStatus);
1193 hw_frame_id = (tx_status >> 8) & 0xff;
1195 hw_frame_id = ioread8(ioaddr + TxFrameId);
1198 if (np->pci_rev_id >= 0x14) {
1199 spin_lock(&np->lock);
1200 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1201 int entry = np->dirty_tx % TX_RING_SIZE;
1202 struct sk_buff *skb;
1204 sw_frame_id = (le32_to_cpu(
1205 np->tx_ring[entry].status) >> 2) & 0xff;
1206 if (sw_frame_id == hw_frame_id &&
1207 !(le32_to_cpu(np->tx_ring[entry].status)
1210 if (sw_frame_id == (hw_frame_id + 1) %
1213 skb = np->tx_skbuff[entry];
1214 /* Free the original skb. */
1215 pci_unmap_single(np->pci_dev,
1216 np->tx_ring[entry].frag[0].addr,
1217 skb->len, PCI_DMA_TODEVICE);
1218 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1219 np->tx_skbuff[entry] = NULL;
1220 np->tx_ring[entry].frag[0].addr = 0;
1221 np->tx_ring[entry].frag[0].length = 0;
1223 spin_unlock(&np->lock);
1225 spin_lock(&np->lock);
1226 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1227 int entry = np->dirty_tx % TX_RING_SIZE;
1228 struct sk_buff *skb;
1229 if (!(le32_to_cpu(np->tx_ring[entry].status)
1232 skb = np->tx_skbuff[entry];
1233 /* Free the original skb. */
1234 pci_unmap_single(np->pci_dev,
1235 np->tx_ring[entry].frag[0].addr,
1236 skb->len, PCI_DMA_TODEVICE);
1237 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1238 np->tx_skbuff[entry] = NULL;
1239 np->tx_ring[entry].frag[0].addr = 0;
1240 np->tx_ring[entry].frag[0].length = 0;
1242 spin_unlock(&np->lock);
1245 if (netif_queue_stopped(dev) &&
1246 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1247 /* The ring is no longer full, clear busy flag. */
1248 netif_wake_queue (dev);
1250 /* Abnormal error summary/uncommon events handlers. */
1251 if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1252 netdev_error(dev, intr_status);
1254 if (netif_msg_intr(np))
1255 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1256 dev->name, ioread16(ioaddr + IntrStatus));
1257 return IRQ_RETVAL(handled);
1260 static void rx_poll(unsigned long data)
1262 struct net_device *dev = (struct net_device *)data;
1263 struct netdev_private *np = netdev_priv(dev);
1264 int entry = np->cur_rx % RX_RING_SIZE;
1265 int boguscnt = np->budget;
1266 void __iomem *ioaddr = np->base;
1269 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1271 struct netdev_desc *desc = &(np->rx_ring[entry]);
1272 u32 frame_status = le32_to_cpu(desc->status);
1275 if (--boguscnt < 0) {
1278 if (!(frame_status & DescOwn))
1280 pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
1281 if (netif_msg_rx_status(np))
1282 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
1284 if (frame_status & 0x001f4000) {
1285 /* There was a error. */
1286 if (netif_msg_rx_err(np))
1287 printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
1289 np->stats.rx_errors++;
1290 if (frame_status & 0x00100000) np->stats.rx_length_errors++;
1291 if (frame_status & 0x00010000) np->stats.rx_fifo_errors++;
1292 if (frame_status & 0x00060000) np->stats.rx_frame_errors++;
1293 if (frame_status & 0x00080000) np->stats.rx_crc_errors++;
1294 if (frame_status & 0x00100000) {
1295 printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1297 dev->name, frame_status);
1300 struct sk_buff *skb;
1301 #ifndef final_version
1302 if (netif_msg_rx_status(np))
1303 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1304 ", bogus_cnt %d.\n",
1307 /* Check if the packet is long enough to accept without copying
1308 to a minimally-sized skbuff. */
1309 if (pkt_len < rx_copybreak
1310 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1312 skb_reserve(skb, 2); /* 16 byte align the IP header */
1313 pci_dma_sync_single_for_cpu(np->pci_dev,
1316 PCI_DMA_FROMDEVICE);
1318 eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0);
1319 pci_dma_sync_single_for_device(np->pci_dev,
1322 PCI_DMA_FROMDEVICE);
1323 skb_put(skb, pkt_len);
1325 pci_unmap_single(np->pci_dev,
1328 PCI_DMA_FROMDEVICE);
1329 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1330 np->rx_skbuff[entry] = NULL;
1332 skb->protocol = eth_type_trans(skb, dev);
1333 /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1335 dev->last_rx = jiffies;
1337 entry = (entry + 1) % RX_RING_SIZE;
1342 np->budget -= received;
1343 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1351 np->budget -= received;
1352 if (np->budget <= 0)
1353 np->budget = RX_BUDGET;
1354 tasklet_schedule(&np->rx_tasklet);
1358 static void refill_rx (struct net_device *dev)
1360 struct netdev_private *np = netdev_priv(dev);
1364 /* Refill the Rx ring buffers. */
1365 for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1366 np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1367 struct sk_buff *skb;
1368 entry = np->dirty_rx % RX_RING_SIZE;
1369 if (np->rx_skbuff[entry] == NULL) {
1370 skb = dev_alloc_skb(np->rx_buf_sz);
1371 np->rx_skbuff[entry] = skb;
1373 break; /* Better luck next round. */
1374 skb->dev = dev; /* Mark as being used by this device. */
1375 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1376 np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1377 pci_map_single(np->pci_dev, skb->data,
1378 np->rx_buf_sz, PCI_DMA_FROMDEVICE));
1380 /* Perhaps we need not reset this field. */
1381 np->rx_ring[entry].frag[0].length =
1382 cpu_to_le32(np->rx_buf_sz | LastFrag);
1383 np->rx_ring[entry].status = 0;
1388 static void netdev_error(struct net_device *dev, int intr_status)
1390 struct netdev_private *np = netdev_priv(dev);
1391 void __iomem *ioaddr = np->base;
1392 u16 mii_ctl, mii_advertise, mii_lpa;
1395 if (intr_status & LinkChange) {
1396 if (np->an_enable) {
1397 mii_advertise = mdio_read (dev, np->phys[0], MII_ADVERTISE);
1398 mii_lpa= mdio_read (dev, np->phys[0], MII_LPA);
1399 mii_advertise &= mii_lpa;
1400 printk (KERN_INFO "%s: Link changed: ", dev->name);
1401 if (mii_advertise & ADVERTISE_100FULL) {
1403 printk ("100Mbps, full duplex\n");
1404 } else if (mii_advertise & ADVERTISE_100HALF) {
1406 printk ("100Mbps, half duplex\n");
1407 } else if (mii_advertise & ADVERTISE_10FULL) {
1409 printk ("10Mbps, full duplex\n");
1410 } else if (mii_advertise & ADVERTISE_10HALF) {
1412 printk ("10Mbps, half duplex\n");
1417 mii_ctl = mdio_read (dev, np->phys[0], MII_BMCR);
1418 speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1420 printk (KERN_INFO "%s: Link changed: %dMbps ,",
1422 printk ("%s duplex.\n", (mii_ctl & BMCR_FULLDPLX) ?
1426 if (np->flowctrl && np->mii_if.full_duplex) {
1427 iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1428 ioaddr + MulticastFilter1+2);
1429 iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1433 if (intr_status & StatsMax) {
1436 if (intr_status & IntrPCIErr) {
1437 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1438 dev->name, intr_status);
1439 /* We must do a global reset of DMA to continue. */
1443 static struct net_device_stats *get_stats(struct net_device *dev)
1445 struct netdev_private *np = netdev_priv(dev);
1446 void __iomem *ioaddr = np->base;
1449 /* We should lock this segment of code for SMP eventually, although
1450 the vulnerability window is very small and statistics are
1452 /* The chip only need report frame silently dropped. */
1453 np->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
1454 np->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1455 np->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1456 np->stats.collisions += ioread8(ioaddr + StatsLateColl);
1457 np->stats.collisions += ioread8(ioaddr + StatsMultiColl);
1458 np->stats.collisions += ioread8(ioaddr + StatsOneColl);
1459 np->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1460 ioread8(ioaddr + StatsTxDefer);
1461 for (i = StatsTxDefer; i <= StatsMcastRx; i++)
1462 ioread8(ioaddr + i);
1463 np->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1464 np->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1465 np->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1466 np->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1471 static void set_rx_mode(struct net_device *dev)
1473 struct netdev_private *np = netdev_priv(dev);
1474 void __iomem *ioaddr = np->base;
1475 u16 mc_filter[4]; /* Multicast hash filter */
1479 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1480 memset(mc_filter, 0xff, sizeof(mc_filter));
1481 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1482 } else if ((dev->mc_count > multicast_filter_limit)
1483 || (dev->flags & IFF_ALLMULTI)) {
1484 /* Too many to match, or accept all multicasts. */
1485 memset(mc_filter, 0xff, sizeof(mc_filter));
1486 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1487 } else if (dev->mc_count) {
1488 struct dev_mc_list *mclist;
1492 memset (mc_filter, 0, sizeof (mc_filter));
1493 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1494 i++, mclist = mclist->next) {
1495 crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
1496 for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1497 if (crc & 0x80000000) index |= 1 << bit;
1498 mc_filter[index/16] |= (1 << (index % 16));
1500 rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1502 iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1505 if (np->mii_if.full_duplex && np->flowctrl)
1506 mc_filter[3] |= 0x0200;
1508 for (i = 0; i < 4; i++)
1509 iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1510 iowrite8(rx_mode, ioaddr + RxMode);
1513 static int __set_mac_addr(struct net_device *dev)
1515 struct netdev_private *np = netdev_priv(dev);
1518 addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1519 iowrite16(addr16, np->base + StationAddr);
1520 addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1521 iowrite16(addr16, np->base + StationAddr+2);
1522 addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1523 iowrite16(addr16, np->base + StationAddr+4);
1527 static int check_if_running(struct net_device *dev)
1529 if (!netif_running(dev))
1534 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1536 struct netdev_private *np = netdev_priv(dev);
1537 strcpy(info->driver, DRV_NAME);
1538 strcpy(info->version, DRV_VERSION);
1539 strcpy(info->bus_info, pci_name(np->pci_dev));
1542 static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1544 struct netdev_private *np = netdev_priv(dev);
1545 spin_lock_irq(&np->lock);
1546 mii_ethtool_gset(&np->mii_if, ecmd);
1547 spin_unlock_irq(&np->lock);
1551 static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1553 struct netdev_private *np = netdev_priv(dev);
1555 spin_lock_irq(&np->lock);
1556 res = mii_ethtool_sset(&np->mii_if, ecmd);
1557 spin_unlock_irq(&np->lock);
1561 static int nway_reset(struct net_device *dev)
1563 struct netdev_private *np = netdev_priv(dev);
1564 return mii_nway_restart(&np->mii_if);
1567 static u32 get_link(struct net_device *dev)
1569 struct netdev_private *np = netdev_priv(dev);
1570 return mii_link_ok(&np->mii_if);
1573 static u32 get_msglevel(struct net_device *dev)
1575 struct netdev_private *np = netdev_priv(dev);
1576 return np->msg_enable;
1579 static void set_msglevel(struct net_device *dev, u32 val)
1581 struct netdev_private *np = netdev_priv(dev);
1582 np->msg_enable = val;
1585 static const struct ethtool_ops ethtool_ops = {
1586 .begin = check_if_running,
1587 .get_drvinfo = get_drvinfo,
1588 .get_settings = get_settings,
1589 .set_settings = set_settings,
1590 .nway_reset = nway_reset,
1591 .get_link = get_link,
1592 .get_msglevel = get_msglevel,
1593 .set_msglevel = set_msglevel,
1594 .get_perm_addr = ethtool_op_get_perm_addr,
1597 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1599 struct netdev_private *np = netdev_priv(dev);
1600 void __iomem *ioaddr = np->base;
1604 if (!netif_running(dev))
1607 spin_lock_irq(&np->lock);
1608 rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1609 spin_unlock_irq(&np->lock);
1611 case SIOCDEVPRIVATE:
1612 for (i=0; i<TX_RING_SIZE; i++) {
1613 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
1614 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
1615 le32_to_cpu(np->tx_ring[i].next_desc),
1616 le32_to_cpu(np->tx_ring[i].status),
1617 (le32_to_cpu(np->tx_ring[i].status) >> 2)
1619 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1620 le32_to_cpu(np->tx_ring[i].frag[0].length));
1622 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
1623 ioread32(np->base + TxListPtr),
1624 netif_queue_stopped(dev));
1625 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
1626 np->cur_tx, np->cur_tx % TX_RING_SIZE,
1627 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
1628 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
1629 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
1630 printk(KERN_DEBUG "TxStatus=%04x\n", ioread16(ioaddr + TxStatus));
1638 static int netdev_close(struct net_device *dev)
1640 struct netdev_private *np = netdev_priv(dev);
1641 void __iomem *ioaddr = np->base;
1642 struct sk_buff *skb;
1645 /* Wait and kill tasklet */
1646 tasklet_kill(&np->rx_tasklet);
1647 tasklet_kill(&np->tx_tasklet);
1653 netif_stop_queue(dev);
1655 if (netif_msg_ifdown(np)) {
1656 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1657 "Rx %4.4x Int %2.2x.\n",
1658 dev->name, ioread8(ioaddr + TxStatus),
1659 ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1660 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1661 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1664 /* Disable interrupts by clearing the interrupt mask. */
1665 iowrite16(0x0000, ioaddr + IntrEnable);
1667 /* Disable Rx and Tx DMA for safely release resource */
1668 iowrite32(0x500, ioaddr + DMACtrl);
1670 /* Stop the chip's Tx and Rx processes. */
1671 iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1673 for (i = 2000; i > 0; i--) {
1674 if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1679 iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
1680 ioaddr +ASICCtrl + 2);
1682 for (i = 2000; i > 0; i--) {
1683 if ((ioread16(ioaddr + ASICCtrl +2) & ResetBusy) == 0)
1689 if (netif_msg_hw(np)) {
1690 printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
1691 (int)(np->tx_ring_dma));
1692 for (i = 0; i < TX_RING_SIZE; i++)
1693 printk(" #%d desc. %4.4x %8.8x %8.8x.\n",
1694 i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1695 np->tx_ring[i].frag[0].length);
1696 printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
1697 (int)(np->rx_ring_dma));
1698 for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1699 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1700 i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1701 np->rx_ring[i].frag[0].length);
1704 #endif /* __i386__ debugging only */
1706 free_irq(dev->irq, dev);
1708 del_timer_sync(&np->timer);
1710 /* Free all the skbuffs in the Rx queue. */
1711 for (i = 0; i < RX_RING_SIZE; i++) {
1712 np->rx_ring[i].status = 0;
1713 np->rx_ring[i].frag[0].addr = 0xBADF00D0; /* An invalid address. */
1714 skb = np->rx_skbuff[i];
1716 pci_unmap_single(np->pci_dev,
1717 np->rx_ring[i].frag[0].addr, np->rx_buf_sz,
1718 PCI_DMA_FROMDEVICE);
1720 np->rx_skbuff[i] = NULL;
1723 for (i = 0; i < TX_RING_SIZE; i++) {
1724 np->tx_ring[i].next_desc = 0;
1725 skb = np->tx_skbuff[i];
1727 pci_unmap_single(np->pci_dev,
1728 np->tx_ring[i].frag[0].addr, skb->len,
1731 np->tx_skbuff[i] = NULL;
1738 static void __devexit sundance_remove1 (struct pci_dev *pdev)
1740 struct net_device *dev = pci_get_drvdata(pdev);
1743 struct netdev_private *np = netdev_priv(dev);
1745 unregister_netdev(dev);
1746 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring,
1748 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
1750 pci_iounmap(pdev, np->base);
1751 pci_release_regions(pdev);
1753 pci_set_drvdata(pdev, NULL);
1757 static struct pci_driver sundance_driver = {
1759 .id_table = sundance_pci_tbl,
1760 .probe = sundance_probe1,
1761 .remove = __devexit_p(sundance_remove1),
1764 static int __init sundance_init(void)
1766 /* when a module, this is printed whether or not devices are found in probe */
1770 return pci_register_driver(&sundance_driver);
1773 static void __exit sundance_exit(void)
1775 pci_unregister_driver(&sundance_driver);
1778 module_init(sundance_init);
1779 module_exit(sundance_exit);