1 /* epic100.c: A SMC 83c170 EPIC/100 Fast Ethernet driver for Linux. */
3 Written/copyright 1997-2001 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 This driver is for the SMC83c170/175 "EPIC" series, as used on the
13 SMC EtherPower II 9432 PCI adapter, and several CardBus cards.
15 The author may be reached as becker@scyld.com, or C/O
16 Scyld Computing Corporation
17 410 Severn Ave., Suite 210
20 Information and updates available at
21 http://www.scyld.com/network/epic100.html
23 ---------------------------------------------------------------------
25 Linux kernel-specific changes:
28 * Merge becker version 1.09 (4/08/2000)
31 * Major bugfix to 1.09 driver (Francis Romieu)
34 * Merge becker test version 1.09 (5/29/2000)
37 * Fix locking (jgarzik)
38 * Limit 83c175 probe to ethernet-class PCI devices (rgooch)
41 * Merge becker version 1.11
42 * Move pci_enable_device before any PCI BAR len checks
48 * ethtool driver info support (jgarzik)
51 * ethtool media get/set support (jgarzik)
54 * revert MII transceiver init change (jgarzik)
57 * implement ETHTOOL_[GS]SET, _NWAY_RST, _[GS]MSGLVL, _GLINK (jgarzik)
58 * replace some MII-related magic numbers with constants
61 * fix power-up sequence
64 * revert version 1.1.12, power-up sequence "fix"
66 LK1.1.14 (Kryzsztof Halasa):
67 * fix spurious bad initializations
68 * pound phy a la SMSC's app note on the subject
71 * fix power up/down for ethtool that broke in 1.11
75 #define DRV_NAME "epic100"
76 #define DRV_VERSION "1.11+LK1.1.14+AC1.1.14"
77 #define DRV_RELDATE "June 2, 2004"
79 /* The user-configurable values.
80 These may be modified when a driver module is loaded.*/
82 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
84 /* Used to pass the full-duplex flag, etc. */
85 #define MAX_UNITS 8 /* More are supported, limit only on options */
86 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
87 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
89 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
90 Setting to > 1518 effectively disables this feature. */
91 static int rx_copybreak;
93 /* Operational parameters that are set at compile time. */
95 /* Keep the ring sizes a power of two for operational efficiency.
96 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
97 Making the Tx ring too large decreases the effectiveness of channel
98 bonding and packet priority.
99 There are no ill effects from too-large receive rings. */
100 #define TX_RING_SIZE 256
101 #define TX_QUEUE_LEN 240 /* Limit ring entries actually used. */
102 #define RX_RING_SIZE 256
103 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct epic_tx_desc)
104 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct epic_rx_desc)
106 /* Operational parameters that usually are not changed. */
107 /* Time in jiffies before concluding the transmitter is hung. */
108 #define TX_TIMEOUT (2*HZ)
110 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
112 /* Bytes transferred to chip before transmission starts. */
113 /* Initial threshold, increased on underflow, rounded down to 4 byte units. */
114 #define TX_FIFO_THRESH 256
115 #define RX_FIFO_THRESH 1 /* 0-3, 0==32, 64,96, or 3==128 bytes */
117 #include <linux/config.h>
118 #include <linux/module.h>
119 #include <linux/kernel.h>
120 #include <linux/string.h>
121 #include <linux/timer.h>
122 #include <linux/errno.h>
123 #include <linux/ioport.h>
124 #include <linux/slab.h>
125 #include <linux/interrupt.h>
126 #include <linux/pci.h>
127 #include <linux/delay.h>
128 #include <linux/netdevice.h>
129 #include <linux/etherdevice.h>
130 #include <linux/skbuff.h>
131 #include <linux/init.h>
132 #include <linux/spinlock.h>
133 #include <linux/ethtool.h>
134 #include <linux/mii.h>
135 #include <linux/crc32.h>
136 #include <linux/bitops.h>
138 #include <asm/uaccess.h>
140 /* These identify the driver base version and may not be removed. */
141 static char version[] __devinitdata =
142 DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>\n";
143 static char version2[] __devinitdata =
144 " http://www.scyld.com/network/epic100.html\n";
145 static char version3[] __devinitdata =
146 " (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
148 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
149 MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
150 MODULE_LICENSE("GPL");
152 module_param(debug, int, 0);
153 module_param(rx_copybreak, int, 0);
154 module_param_array(options, int, NULL, 0);
155 module_param_array(full_duplex, int, NULL, 0);
156 MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)");
157 MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex");
158 MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames");
159 MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)");
164 I. Board Compatibility
166 This device driver is designed for the SMC "EPIC/100", the SMC
167 single-chip Ethernet controllers for PCI. This chip is used on
168 the SMC EtherPower II boards.
170 II. Board-specific settings
172 PCI bus devices are configured by the system at boot time, so no jumpers
173 need to be set on the board. The system BIOS will assign the
174 PCI INTA signal to a (preferably otherwise unused) system IRQ line.
175 Note: Kernel versions earlier than 1.3.73 do not support shared PCI
178 III. Driver operation
184 http://www.smsc.com/main/datasheets/83c171.pdf
185 http://www.smsc.com/main/datasheets/83c175.pdf
186 http://scyld.com/expert/NWay.html
187 http://www.national.com/pf/DP/DP83840A.html
194 enum pci_id_flags_bits {
195 /* Set PCI command register bits before calling probe1(). */
196 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
197 /* Read and map the single following PCI BAR. */
198 PCI_ADDR0=0<<4, PCI_ADDR1=1<<4, PCI_ADDR2=2<<4, PCI_ADDR3=3<<4,
199 PCI_ADDR_64BITS=0x100, PCI_NO_ACPI_WAKE=0x200, PCI_NO_MIN_LATENCY=0x400,
202 enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
204 #define EPIC_TOTAL_SIZE 0x100
207 #define EPIC_IOTYPE PCI_USES_MASTER|PCI_USES_IO|PCI_ADDR0
209 #define EPIC_IOTYPE PCI_USES_MASTER|PCI_USES_MEM|PCI_ADDR1
219 struct epic_chip_info {
221 enum pci_id_flags_bits pci_flags;
222 int io_size; /* Needed for I/O region check or ioremap(). */
223 int drv_flags; /* Driver use, intended as capability flags. */
227 /* indexed by chip_t */
228 static const struct epic_chip_info pci_id_tbl[] = {
229 { "SMSC EPIC/100 83c170",
230 EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR | NO_MII | MII_PWRDWN },
231 { "SMSC EPIC/100 83c170",
232 EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR },
233 { "SMSC EPIC/C 83c175",
234 EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR | MII_PWRDWN },
238 static struct pci_device_id epic_pci_tbl[] = {
239 { 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
240 { 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
241 { 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
242 PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, SMSC_83C175 },
245 MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
263 /* Offsets to registers, using the (ugh) SMC names. */
264 enum epic_registers {
265 COMMAND=0, INTSTAT=4, INTMASK=8, GENCTL=0x0C, NVCTL=0x10, EECTL=0x14,
267 TEST1=0x1C, CRCCNT=0x20, ALICNT=0x24, MPCNT=0x28, /* Rx error counters. */
268 MIICtrl=0x30, MIIData=0x34, MIICfg=0x38,
269 LAN0=64, /* MAC address. */
270 MC0=80, /* Multicast filter table. */
271 RxCtrl=96, TxCtrl=112, TxSTAT=0x74,
272 PRxCDAR=0x84, RxSTAT=0xA4, EarlyRx=0xB0, PTxCDAR=0xC4, TxThresh=0xDC,
275 /* Interrupt register bits, using my own meaningful names. */
277 TxIdle=0x40000, RxIdle=0x20000, IntrSummary=0x010000,
278 PCIBusErr170=0x7000, PCIBusErr175=0x1000, PhyEvent175=0x8000,
279 RxStarted=0x0800, RxEarlyWarn=0x0400, CntFull=0x0200, TxUnderrun=0x0100,
280 TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010,
281 RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001,
284 StopRx=1, StartRx=2, TxQueued=4, RxQueued=8,
285 StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80,
288 #define EpicRemoved 0xffffffff /* Chip failed or removed (CardBus) */
290 #define EpicNapiEvent (TxEmpty | TxDone | \
291 RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull)
292 #define EpicNormalEvent (0x0000ffff & ~EpicNapiEvent)
294 static const u16 media2miictl[16] = {
295 0, 0x0C00, 0x0C00, 0x2000, 0x0100, 0x2100, 0, 0,
296 0, 0, 0, 0, 0, 0, 0, 0 };
298 /* The EPIC100 Rx and Tx buffer descriptors. */
300 struct epic_tx_desc {
307 struct epic_rx_desc {
314 enum desc_status_bits {
318 #define PRIV_ALIGN 15 /* Required alignment mask */
319 struct epic_private {
320 struct epic_rx_desc *rx_ring;
321 struct epic_tx_desc *tx_ring;
322 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
323 struct sk_buff* tx_skbuff[TX_RING_SIZE];
324 /* The addresses of receive-in-place skbuffs. */
325 struct sk_buff* rx_skbuff[RX_RING_SIZE];
327 dma_addr_t tx_ring_dma;
328 dma_addr_t rx_ring_dma;
331 spinlock_t lock; /* Group with Tx control cache line. */
332 spinlock_t napi_lock;
333 unsigned int reschedule_in_poll;
334 unsigned int cur_tx, dirty_tx;
336 unsigned int cur_rx, dirty_rx;
338 unsigned int rx_buf_sz; /* Based on MTU+slack. */
340 struct pci_dev *pci_dev; /* PCI bus location. */
341 int chip_id, chip_flags;
343 struct net_device_stats stats;
344 struct timer_list timer; /* Media selection timer. */
346 unsigned char mc_filter[8];
347 signed char phys[4]; /* MII device addresses. */
348 u16 advertising; /* NWay media advertisement */
350 struct mii_if_info mii;
351 unsigned int tx_full:1; /* The Tx queue is full. */
352 unsigned int default_port:4; /* Last dev->if_port value. */
355 static int epic_open(struct net_device *dev);
356 static int read_eeprom(long ioaddr, int location);
357 static int mdio_read(struct net_device *dev, int phy_id, int location);
358 static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
359 static void epic_restart(struct net_device *dev);
360 static void epic_timer(unsigned long data);
361 static void epic_tx_timeout(struct net_device *dev);
362 static void epic_init_ring(struct net_device *dev);
363 static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev);
364 static int epic_rx(struct net_device *dev, int budget);
365 static int epic_poll(struct net_device *dev, int *budget);
366 static irqreturn_t epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
367 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
368 static struct ethtool_ops netdev_ethtool_ops;
369 static int epic_close(struct net_device *dev);
370 static struct net_device_stats *epic_get_stats(struct net_device *dev);
371 static void set_rx_mode(struct net_device *dev);
375 static int __devinit epic_init_one (struct pci_dev *pdev,
376 const struct pci_device_id *ent)
378 static int card_idx = -1;
380 int chip_idx = (int) ent->driver_data;
382 struct net_device *dev;
383 struct epic_private *ep;
384 int i, ret, option = 0, duplex = 0;
388 /* when built into the kernel, we only print version if device is found */
390 static int printed_version;
391 if (!printed_version++)
392 printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s",
393 version, version2, version3);
398 ret = pci_enable_device(pdev);
403 if (pci_resource_len(pdev, 0) < pci_id_tbl[chip_idx].io_size) {
404 printk (KERN_ERR "card %d: no PCI region space\n", card_idx);
406 goto err_out_disable;
409 pci_set_master(pdev);
411 ret = pci_request_regions(pdev, DRV_NAME);
413 goto err_out_disable;
417 dev = alloc_etherdev(sizeof (*ep));
419 printk (KERN_ERR "card %d: no memory for eth device\n", card_idx);
420 goto err_out_free_res;
422 SET_MODULE_OWNER(dev);
423 SET_NETDEV_DEV(dev, &pdev->dev);
426 ioaddr = pci_resource_start (pdev, 0);
428 ioaddr = pci_resource_start (pdev, 1);
429 ioaddr = (long) ioremap (ioaddr, pci_resource_len (pdev, 1));
431 printk (KERN_ERR DRV_NAME " %d: ioremap failed\n", card_idx);
432 goto err_out_free_netdev;
436 pci_set_drvdata(pdev, dev);
439 ep->mii.mdio_read = mdio_read;
440 ep->mii.mdio_write = mdio_write;
441 ep->mii.phy_id_mask = 0x1f;
442 ep->mii.reg_num_mask = 0x1f;
444 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
446 goto err_out_iounmap;
447 ep->tx_ring = (struct epic_tx_desc *)ring_space;
448 ep->tx_ring_dma = ring_dma;
450 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
452 goto err_out_unmap_tx;
453 ep->rx_ring = (struct epic_rx_desc *)ring_space;
454 ep->rx_ring_dma = ring_dma;
456 if (dev->mem_start) {
457 option = dev->mem_start;
458 duplex = (dev->mem_start & 16) ? 1 : 0;
459 } else if (card_idx >= 0 && card_idx < MAX_UNITS) {
460 if (options[card_idx] >= 0)
461 option = options[card_idx];
462 if (full_duplex[card_idx] >= 0)
463 duplex = full_duplex[card_idx];
466 dev->base_addr = ioaddr;
469 spin_lock_init(&ep->lock);
470 spin_lock_init(&ep->napi_lock);
471 ep->reschedule_in_poll = 0;
473 /* Bring the chip out of low-power mode. */
474 outl(0x4200, ioaddr + GENCTL);
475 /* Magic?! If we don't set this bit the MII interface won't work. */
476 /* This magic is documented in SMSC app note 7.15 */
477 for (i = 16; i > 0; i--)
478 outl(0x0008, ioaddr + TEST1);
480 /* Turn on the MII transceiver. */
481 outl(0x12, ioaddr + MIICfg);
483 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
484 outl(0x0200, ioaddr + GENCTL);
486 /* Note: the '175 does not have a serial EEPROM. */
487 for (i = 0; i < 3; i++)
488 ((u16 *)dev->dev_addr)[i] = le16_to_cpu(inw(ioaddr + LAN0 + i*4));
491 printk(KERN_DEBUG DRV_NAME "(%s): EEPROM contents\n",
493 for (i = 0; i < 64; i++)
494 printk(" %4.4x%s", read_eeprom(ioaddr, i),
495 i % 16 == 15 ? "\n" : "");
499 ep->chip_id = chip_idx;
500 ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
502 (ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
503 | CntFull | TxUnderrun | EpicNapiEvent;
505 /* Find the connected MII xcvrs.
506 Doing this in open() would allow detecting external xcvrs later, but
507 takes much time and no cards have external MII. */
509 int phy, phy_idx = 0;
510 for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) {
511 int mii_status = mdio_read(dev, phy, MII_BMSR);
512 if (mii_status != 0xffff && mii_status != 0x0000) {
513 ep->phys[phy_idx++] = phy;
514 printk(KERN_INFO DRV_NAME "(%s): MII transceiver #%d control "
515 "%4.4x status %4.4x.\n",
516 pci_name(pdev), phy, mdio_read(dev, phy, 0), mii_status);
519 ep->mii_phy_cnt = phy_idx;
522 ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE);
523 printk(KERN_INFO DRV_NAME "(%s): Autonegotiation advertising %4.4x link "
525 pci_name(pdev), ep->mii.advertising, mdio_read(dev, phy, 5));
526 } else if ( ! (ep->chip_flags & NO_MII)) {
527 printk(KERN_WARNING DRV_NAME "(%s): ***WARNING***: No MII transceiver found!\n",
529 /* Use the known PHY address of the EPII. */
532 ep->mii.phy_id = ep->phys[0];
535 /* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
536 if (ep->chip_flags & MII_PWRDWN)
537 outl(inl(ioaddr + NVCTL) & ~0x483C, ioaddr + NVCTL);
538 outl(0x0008, ioaddr + GENCTL);
540 /* The lower four bits are the media type. */
542 ep->mii.force_media = ep->mii.full_duplex = 1;
543 printk(KERN_INFO DRV_NAME "(%s): Forced full duplex operation requested.\n",
546 dev->if_port = ep->default_port = option;
548 /* The Epic-specific entries in the device structure. */
549 dev->open = &epic_open;
550 dev->hard_start_xmit = &epic_start_xmit;
551 dev->stop = &epic_close;
552 dev->get_stats = &epic_get_stats;
553 dev->set_multicast_list = &set_rx_mode;
554 dev->do_ioctl = &netdev_ioctl;
555 dev->ethtool_ops = &netdev_ethtool_ops;
556 dev->watchdog_timeo = TX_TIMEOUT;
557 dev->tx_timeout = &epic_tx_timeout;
558 dev->poll = epic_poll;
561 ret = register_netdev(dev);
563 goto err_out_unmap_rx;
565 printk(KERN_INFO "%s: %s at %#lx, IRQ %d, ",
566 dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq);
567 for (i = 0; i < 5; i++)
568 printk("%2.2x:", dev->dev_addr[i]);
569 printk("%2.2x.\n", dev->dev_addr[i]);
575 pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
577 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
585 pci_release_regions(pdev);
587 pci_disable_device(pdev);
591 /* Serial EEPROM section. */
593 /* EEPROM_Ctrl bits. */
594 #define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
595 #define EE_CS 0x02 /* EEPROM chip select. */
596 #define EE_DATA_WRITE 0x08 /* EEPROM chip data in. */
597 #define EE_WRITE_0 0x01
598 #define EE_WRITE_1 0x09
599 #define EE_DATA_READ 0x10 /* EEPROM chip data out. */
600 #define EE_ENB (0x0001 | EE_CS)
602 /* Delay between EEPROM clock transitions.
603 This serves to flush the operation to the PCI bus.
606 #define eeprom_delay() inl(ee_addr)
608 /* The EEPROM commands include the alway-set leading bit. */
609 #define EE_WRITE_CMD (5 << 6)
610 #define EE_READ64_CMD (6 << 6)
611 #define EE_READ256_CMD (6 << 8)
612 #define EE_ERASE_CMD (7 << 6)
614 static void epic_disable_int(struct net_device *dev, struct epic_private *ep)
616 long ioaddr = dev->base_addr;
618 outl(0x00000000, ioaddr + INTMASK);
621 static inline void __epic_pci_commit(long ioaddr)
624 inl(ioaddr + INTMASK);
628 static inline void epic_napi_irq_off(struct net_device *dev,
629 struct epic_private *ep)
631 long ioaddr = dev->base_addr;
633 outl(ep->irq_mask & ~EpicNapiEvent, ioaddr + INTMASK);
634 __epic_pci_commit(ioaddr);
637 static inline void epic_napi_irq_on(struct net_device *dev,
638 struct epic_private *ep)
640 long ioaddr = dev->base_addr;
642 /* No need to commit possible posted write */
643 outl(ep->irq_mask | EpicNapiEvent, ioaddr + INTMASK);
646 static int __devinit read_eeprom(long ioaddr, int location)
650 long ee_addr = ioaddr + EECTL;
651 int read_cmd = location |
652 (inl(ee_addr) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
654 outl(EE_ENB & ~EE_CS, ee_addr);
655 outl(EE_ENB, ee_addr);
657 /* Shift the read command bits out. */
658 for (i = 12; i >= 0; i--) {
659 short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
660 outl(EE_ENB | dataval, ee_addr);
662 outl(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
665 outl(EE_ENB, ee_addr);
667 for (i = 16; i > 0; i--) {
668 outl(EE_ENB | EE_SHIFT_CLK, ee_addr);
670 retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0);
671 outl(EE_ENB, ee_addr);
675 /* Terminate the EEPROM access. */
676 outl(EE_ENB & ~EE_CS, ee_addr);
681 #define MII_WRITEOP 2
682 static int mdio_read(struct net_device *dev, int phy_id, int location)
684 long ioaddr = dev->base_addr;
685 int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
688 outl(read_cmd, ioaddr + MIICtrl);
689 /* Typical operation takes 25 loops. */
690 for (i = 400; i > 0; i--) {
692 if ((inl(ioaddr + MIICtrl) & MII_READOP) == 0) {
693 /* Work around read failure bug. */
694 if (phy_id == 1 && location < 6
695 && inw(ioaddr + MIIData) == 0xffff) {
696 outl(read_cmd, ioaddr + MIICtrl);
699 return inw(ioaddr + MIIData);
705 static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
707 long ioaddr = dev->base_addr;
710 outw(value, ioaddr + MIIData);
711 outl((phy_id << 9) | (loc << 4) | MII_WRITEOP, ioaddr + MIICtrl);
712 for (i = 10000; i > 0; i--) {
714 if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0)
721 static int epic_open(struct net_device *dev)
723 struct epic_private *ep = dev->priv;
724 long ioaddr = dev->base_addr;
728 /* Soft reset the chip. */
729 outl(0x4001, ioaddr + GENCTL);
731 if ((retval = request_irq(dev->irq, &epic_interrupt, SA_SHIRQ, dev->name, dev)))
736 outl(0x4000, ioaddr + GENCTL);
737 /* This magic is documented in SMSC app note 7.15 */
738 for (i = 16; i > 0; i--)
739 outl(0x0008, ioaddr + TEST1);
741 /* Pull the chip out of low-power mode, enable interrupts, and set for
742 PCI read multiple. The MIIcfg setting and strange write order are
743 required by the details of which bits are reset and the transceiver
744 wiring on the Ositech CardBus card.
747 outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
749 if (ep->chip_flags & MII_PWRDWN)
750 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
752 #if defined(__powerpc__) || defined(__sparc__) /* Big endian */
753 outl(0x4432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
754 inl(ioaddr + GENCTL);
755 outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
757 outl(0x4412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
758 inl(ioaddr + GENCTL);
759 outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
762 udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
764 for (i = 0; i < 3; i++)
765 outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
767 ep->tx_threshold = TX_FIFO_THRESH;
768 outl(ep->tx_threshold, ioaddr + TxThresh);
770 if (media2miictl[dev->if_port & 15]) {
772 mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]);
773 if (dev->if_port == 1) {
775 printk(KERN_INFO "%s: Using the 10base2 transceiver, MII "
777 dev->name, mdio_read(dev, ep->phys[0], MII_BMSR));
780 int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA);
781 if (mii_lpa != 0xffff) {
782 if ((mii_lpa & LPA_100FULL) || (mii_lpa & 0x01C0) == LPA_10FULL)
783 ep->mii.full_duplex = 1;
784 else if (! (mii_lpa & LPA_LPACK))
785 mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
787 printk(KERN_INFO "%s: Setting %s-duplex based on MII xcvr %d"
788 " register read of %4.4x.\n", dev->name,
789 ep->mii.full_duplex ? "full" : "half",
790 ep->phys[0], mii_lpa);
794 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
795 outl(ep->rx_ring_dma, ioaddr + PRxCDAR);
796 outl(ep->tx_ring_dma, ioaddr + PTxCDAR);
798 /* Start the chip's Rx process. */
800 outl(StartRx | RxQueued, ioaddr + COMMAND);
802 netif_start_queue(dev);
804 /* Enable interrupts by setting the interrupt mask. */
805 outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
806 | CntFull | TxUnderrun
807 | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
810 printk(KERN_DEBUG "%s: epic_open() ioaddr %lx IRQ %d status %4.4x "
812 dev->name, ioaddr, dev->irq, (int)inl(ioaddr + GENCTL),
813 ep->mii.full_duplex ? "full" : "half");
815 /* Set the timer to switch to check for link beat and perhaps switch
816 to an alternate media type. */
817 init_timer(&ep->timer);
818 ep->timer.expires = jiffies + 3*HZ;
819 ep->timer.data = (unsigned long)dev;
820 ep->timer.function = &epic_timer; /* timer handler */
821 add_timer(&ep->timer);
826 /* Reset the chip to recover from a PCI transaction error.
827 This may occur at interrupt time. */
828 static void epic_pause(struct net_device *dev)
830 long ioaddr = dev->base_addr;
831 struct epic_private *ep = dev->priv;
833 netif_stop_queue (dev);
835 /* Disable interrupts by clearing the interrupt mask. */
836 outl(0x00000000, ioaddr + INTMASK);
837 /* Stop the chip's Tx and Rx DMA processes. */
838 outw(StopRx | StopTxDMA | StopRxDMA, ioaddr + COMMAND);
840 /* Update the error counts. */
841 if (inw(ioaddr + COMMAND) != 0xffff) {
842 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
843 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
844 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
847 /* Remove the packets on the Rx queue. */
848 epic_rx(dev, RX_RING_SIZE);
851 static void epic_restart(struct net_device *dev)
853 long ioaddr = dev->base_addr;
854 struct epic_private *ep = dev->priv;
857 /* Soft reset the chip. */
858 outl(0x4001, ioaddr + GENCTL);
860 printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
861 dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
864 /* This magic is documented in SMSC app note 7.15 */
865 for (i = 16; i > 0; i--)
866 outl(0x0008, ioaddr + TEST1);
868 #if defined(__powerpc__) || defined(__sparc__) /* Big endian */
869 outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
871 outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
873 outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
874 if (ep->chip_flags & MII_PWRDWN)
875 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
877 for (i = 0; i < 3; i++)
878 outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
880 ep->tx_threshold = TX_FIFO_THRESH;
881 outl(ep->tx_threshold, ioaddr + TxThresh);
882 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
883 outl(ep->rx_ring_dma + (ep->cur_rx%RX_RING_SIZE)*
884 sizeof(struct epic_rx_desc), ioaddr + PRxCDAR);
885 outl(ep->tx_ring_dma + (ep->dirty_tx%TX_RING_SIZE)*
886 sizeof(struct epic_tx_desc), ioaddr + PTxCDAR);
888 /* Start the chip's Rx process. */
890 outl(StartRx | RxQueued, ioaddr + COMMAND);
892 /* Enable interrupts by setting the interrupt mask. */
893 outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
894 | CntFull | TxUnderrun
895 | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
897 printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x"
898 " interrupt %4.4x.\n",
899 dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL),
900 (int)inl(ioaddr + INTSTAT));
904 static void check_media(struct net_device *dev)
906 struct epic_private *ep = dev->priv;
907 long ioaddr = dev->base_addr;
908 int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;
909 int negotiated = mii_lpa & ep->mii.advertising;
910 int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
912 if (ep->mii.force_media)
914 if (mii_lpa == 0xffff) /* Bogus read */
916 if (ep->mii.full_duplex != duplex) {
917 ep->mii.full_duplex = duplex;
918 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
919 " partner capability of %4.4x.\n", dev->name,
920 ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa);
921 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
925 static void epic_timer(unsigned long data)
927 struct net_device *dev = (struct net_device *)data;
928 struct epic_private *ep = dev->priv;
929 long ioaddr = dev->base_addr;
930 int next_tick = 5*HZ;
933 printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n",
934 dev->name, (int)inl(ioaddr + TxSTAT));
935 printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x "
936 "IntStatus %4.4x RxStatus %4.4x.\n",
937 dev->name, (int)inl(ioaddr + INTMASK),
938 (int)inl(ioaddr + INTSTAT), (int)inl(ioaddr + RxSTAT));
943 ep->timer.expires = jiffies + next_tick;
944 add_timer(&ep->timer);
947 static void epic_tx_timeout(struct net_device *dev)
949 struct epic_private *ep = dev->priv;
950 long ioaddr = dev->base_addr;
953 printk(KERN_WARNING "%s: Transmit timeout using MII device, "
954 "Tx status %4.4x.\n",
955 dev->name, (int)inw(ioaddr + TxSTAT));
957 printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n",
958 dev->name, ep->dirty_tx, ep->cur_tx);
961 if (inw(ioaddr + TxSTAT) & 0x10) { /* Tx FIFO underflow. */
962 ep->stats.tx_fifo_errors++;
963 outl(RestartTx, ioaddr + COMMAND);
966 outl(TxQueued, dev->base_addr + COMMAND);
969 dev->trans_start = jiffies;
970 ep->stats.tx_errors++;
972 netif_wake_queue(dev);
975 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
976 static void epic_init_ring(struct net_device *dev)
978 struct epic_private *ep = dev->priv;
982 ep->dirty_tx = ep->cur_tx = 0;
983 ep->cur_rx = ep->dirty_rx = 0;
984 ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
986 /* Initialize all Rx descriptors. */
987 for (i = 0; i < RX_RING_SIZE; i++) {
988 ep->rx_ring[i].rxstatus = 0;
989 ep->rx_ring[i].buflength = cpu_to_le32(ep->rx_buf_sz);
990 ep->rx_ring[i].next = ep->rx_ring_dma +
991 (i+1)*sizeof(struct epic_rx_desc);
992 ep->rx_skbuff[i] = NULL;
994 /* Mark the last entry as wrapping the ring. */
995 ep->rx_ring[i-1].next = ep->rx_ring_dma;
997 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
998 for (i = 0; i < RX_RING_SIZE; i++) {
999 struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz);
1000 ep->rx_skbuff[i] = skb;
1003 skb->dev = dev; /* Mark as being used by this device. */
1004 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1005 ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
1006 skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1007 ep->rx_ring[i].rxstatus = cpu_to_le32(DescOwn);
1009 ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1011 /* The Tx buffer descriptor is filled in as needed, but we
1012 do need to clear the ownership bit. */
1013 for (i = 0; i < TX_RING_SIZE; i++) {
1014 ep->tx_skbuff[i] = NULL;
1015 ep->tx_ring[i].txstatus = 0x0000;
1016 ep->tx_ring[i].next = ep->tx_ring_dma +
1017 (i+1)*sizeof(struct epic_tx_desc);
1019 ep->tx_ring[i-1].next = ep->tx_ring_dma;
1023 static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
1025 struct epic_private *ep = dev->priv;
1026 int entry, free_count;
1028 unsigned long flags;
1030 if (skb_padto(skb, ETH_ZLEN))
1033 /* Caution: the write order is important here, set the field with the
1034 "ownership" bit last. */
1036 /* Calculate the next Tx descriptor entry. */
1037 spin_lock_irqsave(&ep->lock, flags);
1038 free_count = ep->cur_tx - ep->dirty_tx;
1039 entry = ep->cur_tx % TX_RING_SIZE;
1041 ep->tx_skbuff[entry] = skb;
1042 ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
1043 skb->len, PCI_DMA_TODEVICE);
1044 if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
1045 ctrl_word = cpu_to_le32(0x100000); /* No interrupt */
1046 } else if (free_count == TX_QUEUE_LEN/2) {
1047 ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */
1048 } else if (free_count < TX_QUEUE_LEN - 1) {
1049 ctrl_word = cpu_to_le32(0x100000); /* No Tx-done intr. */
1051 /* Leave room for an additional entry. */
1052 ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */
1055 ep->tx_ring[entry].buflength = ctrl_word | cpu_to_le32(skb->len);
1056 ep->tx_ring[entry].txstatus =
1057 ((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
1058 | cpu_to_le32(DescOwn);
1062 netif_stop_queue(dev);
1064 spin_unlock_irqrestore(&ep->lock, flags);
1065 /* Trigger an immediate transmit demand. */
1066 outl(TxQueued, dev->base_addr + COMMAND);
1068 dev->trans_start = jiffies;
1070 printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
1071 "flag %2.2x Tx status %8.8x.\n",
1072 dev->name, (int)skb->len, entry, ctrl_word,
1073 (int)inl(dev->base_addr + TxSTAT));
1078 static void epic_tx_error(struct net_device *dev, struct epic_private *ep,
1081 struct net_device_stats *stats = &ep->stats;
1083 #ifndef final_version
1084 /* There was an major error, log it. */
1086 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1090 if (status & 0x1050)
1091 stats->tx_aborted_errors++;
1092 if (status & 0x0008)
1093 stats->tx_carrier_errors++;
1094 if (status & 0x0040)
1095 stats->tx_window_errors++;
1096 if (status & 0x0010)
1097 stats->tx_fifo_errors++;
1100 static void epic_tx(struct net_device *dev, struct epic_private *ep)
1102 unsigned int dirty_tx, cur_tx;
1105 * Note: if this lock becomes a problem we can narrow the locked
1106 * region at the cost of occasionally grabbing the lock more times.
1108 cur_tx = ep->cur_tx;
1109 for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) {
1110 struct sk_buff *skb;
1111 int entry = dirty_tx % TX_RING_SIZE;
1112 int txstatus = le32_to_cpu(ep->tx_ring[entry].txstatus);
1114 if (txstatus & DescOwn)
1115 break; /* It still hasn't been Txed */
1117 if (likely(txstatus & 0x0001)) {
1118 ep->stats.collisions += (txstatus >> 8) & 15;
1119 ep->stats.tx_packets++;
1120 ep->stats.tx_bytes += ep->tx_skbuff[entry]->len;
1122 epic_tx_error(dev, ep, txstatus);
1124 /* Free the original skb. */
1125 skb = ep->tx_skbuff[entry];
1126 pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
1127 skb->len, PCI_DMA_TODEVICE);
1128 dev_kfree_skb_irq(skb);
1129 ep->tx_skbuff[entry] = NULL;
1132 #ifndef final_version
1133 if (cur_tx - dirty_tx > TX_RING_SIZE) {
1135 "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1136 dev->name, dirty_tx, cur_tx, ep->tx_full);
1137 dirty_tx += TX_RING_SIZE;
1140 ep->dirty_tx = dirty_tx;
1141 if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
1142 /* The ring is no longer full, allow new TX entries. */
1144 netif_wake_queue(dev);
1148 /* The interrupt handler does all of the Rx thread work and cleans up
1149 after the Tx thread. */
1150 static irqreturn_t epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1152 struct net_device *dev = dev_instance;
1153 struct epic_private *ep = dev->priv;
1154 long ioaddr = dev->base_addr;
1155 unsigned int handled = 0;
1158 status = inl(ioaddr + INTSTAT);
1159 /* Acknowledge all of the current interrupt sources ASAP. */
1160 outl(status & EpicNormalEvent, ioaddr + INTSTAT);
1163 printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
1164 "intstat=%#8.8x.\n", dev->name, status,
1165 (int)inl(ioaddr + INTSTAT));
1168 if ((status & IntrSummary) == 0)
1173 if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) {
1174 spin_lock(&ep->napi_lock);
1175 if (netif_rx_schedule_prep(dev)) {
1176 epic_napi_irq_off(dev, ep);
1177 __netif_rx_schedule(dev);
1179 ep->reschedule_in_poll++;
1180 spin_unlock(&ep->napi_lock);
1182 status &= ~EpicNapiEvent;
1184 /* Check uncommon events all at once. */
1185 if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) {
1186 if (status == EpicRemoved)
1189 /* Always update the error counts to avoid overhead later. */
1190 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
1191 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
1192 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
1194 if (status & TxUnderrun) { /* Tx FIFO underflow. */
1195 ep->stats.tx_fifo_errors++;
1196 outl(ep->tx_threshold += 128, ioaddr + TxThresh);
1197 /* Restart the transmit process. */
1198 outl(RestartTx, ioaddr + COMMAND);
1200 if (status & PCIBusErr170) {
1201 printk(KERN_ERR "%s: PCI Bus Error! status %4.4x.\n",
1206 /* Clear all error sources. */
1207 outl(status & 0x7f18, ioaddr + INTSTAT);
1212 printk(KERN_DEBUG "%s: exit interrupt, intr_status=%#4.4x.\n",
1216 return IRQ_RETVAL(handled);
1219 static int epic_rx(struct net_device *dev, int budget)
1221 struct epic_private *ep = dev->priv;
1222 int entry = ep->cur_rx % RX_RING_SIZE;
1223 int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx;
1227 printk(KERN_DEBUG " In epic_rx(), entry %d %8.8x.\n", entry,
1228 ep->rx_ring[entry].rxstatus);
1230 if (rx_work_limit > budget)
1231 rx_work_limit = budget;
1233 /* If we own the next entry, it's a new packet. Send it up. */
1234 while ((ep->rx_ring[entry].rxstatus & cpu_to_le32(DescOwn)) == 0) {
1235 int status = le32_to_cpu(ep->rx_ring[entry].rxstatus);
1238 printk(KERN_DEBUG " epic_rx() status was %8.8x.\n", status);
1239 if (--rx_work_limit < 0)
1241 if (status & 0x2006) {
1243 printk(KERN_DEBUG "%s: epic_rx() error status was %8.8x.\n",
1245 if (status & 0x2000) {
1246 printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
1247 "multiple buffers, status %4.4x!\n", dev->name, status);
1248 ep->stats.rx_length_errors++;
1249 } else if (status & 0x0006)
1250 /* Rx Frame errors are counted in hardware. */
1251 ep->stats.rx_errors++;
1253 /* Malloc up new buffer, compatible with net-2e. */
1254 /* Omit the four octet CRC from the length. */
1255 short pkt_len = (status >> 16) - 4;
1256 struct sk_buff *skb;
1258 if (pkt_len > PKT_BUF_SZ - 4) {
1259 printk(KERN_ERR "%s: Oversized Ethernet frame, status %x "
1261 dev->name, status, pkt_len);
1264 /* Check if the packet is long enough to accept without copying
1265 to a minimally-sized skbuff. */
1266 if (pkt_len < rx_copybreak
1267 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1269 skb_reserve(skb, 2); /* 16 byte align the IP header */
1270 pci_dma_sync_single_for_cpu(ep->pci_dev,
1271 ep->rx_ring[entry].bufaddr,
1273 PCI_DMA_FROMDEVICE);
1274 eth_copy_and_sum(skb, ep->rx_skbuff[entry]->data, pkt_len, 0);
1275 skb_put(skb, pkt_len);
1276 pci_dma_sync_single_for_device(ep->pci_dev,
1277 ep->rx_ring[entry].bufaddr,
1279 PCI_DMA_FROMDEVICE);
1281 pci_unmap_single(ep->pci_dev,
1282 ep->rx_ring[entry].bufaddr,
1283 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1284 skb_put(skb = ep->rx_skbuff[entry], pkt_len);
1285 ep->rx_skbuff[entry] = NULL;
1287 skb->protocol = eth_type_trans(skb, dev);
1288 netif_receive_skb(skb);
1289 dev->last_rx = jiffies;
1290 ep->stats.rx_packets++;
1291 ep->stats.rx_bytes += pkt_len;
1294 entry = (++ep->cur_rx) % RX_RING_SIZE;
1297 /* Refill the Rx ring buffers. */
1298 for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) {
1299 entry = ep->dirty_rx % RX_RING_SIZE;
1300 if (ep->rx_skbuff[entry] == NULL) {
1301 struct sk_buff *skb;
1302 skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz);
1305 skb->dev = dev; /* Mark as being used by this device. */
1306 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1307 ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
1308 skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1311 ep->rx_ring[entry].rxstatus = cpu_to_le32(DescOwn);
1316 static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
1318 long ioaddr = dev->base_addr;
1321 status = inl(ioaddr + INTSTAT);
1323 if (status == EpicRemoved)
1325 if (status & RxOverflow) /* Missed a Rx frame. */
1326 ep->stats.rx_errors++;
1327 if (status & (RxOverflow | RxFull))
1328 outw(RxQueued, ioaddr + COMMAND);
1331 static int epic_poll(struct net_device *dev, int *budget)
1333 struct epic_private *ep = dev->priv;
1334 int work_done = 0, orig_budget;
1335 long ioaddr = dev->base_addr;
1337 orig_budget = (*budget > dev->quota) ? dev->quota : *budget;
1343 work_done += epic_rx(dev, *budget);
1345 epic_rx_err(dev, ep);
1347 *budget -= work_done;
1348 dev->quota -= work_done;
1350 if (netif_running(dev) && (work_done < orig_budget)) {
1351 unsigned long flags;
1354 /* A bit baroque but it avoids a (space hungry) spin_unlock */
1356 spin_lock_irqsave(&ep->napi_lock, flags);
1358 more = ep->reschedule_in_poll;
1360 __netif_rx_complete(dev);
1361 outl(EpicNapiEvent, ioaddr + INTSTAT);
1362 epic_napi_irq_on(dev, ep);
1364 ep->reschedule_in_poll--;
1366 spin_unlock_irqrestore(&ep->napi_lock, flags);
1372 return (work_done >= orig_budget);
1375 static int epic_close(struct net_device *dev)
1377 long ioaddr = dev->base_addr;
1378 struct epic_private *ep = dev->priv;
1379 struct sk_buff *skb;
1382 netif_stop_queue(dev);
1385 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
1386 dev->name, (int)inl(ioaddr + INTSTAT));
1388 del_timer_sync(&ep->timer);
1390 epic_disable_int(dev, ep);
1392 free_irq(dev->irq, dev);
1396 /* Free all the skbuffs in the Rx queue. */
1397 for (i = 0; i < RX_RING_SIZE; i++) {
1398 skb = ep->rx_skbuff[i];
1399 ep->rx_skbuff[i] = NULL;
1400 ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */
1401 ep->rx_ring[i].buflength = 0;
1403 pci_unmap_single(ep->pci_dev, ep->rx_ring[i].bufaddr,
1404 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1407 ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
1409 for (i = 0; i < TX_RING_SIZE; i++) {
1410 skb = ep->tx_skbuff[i];
1411 ep->tx_skbuff[i] = NULL;
1414 pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr,
1415 skb->len, PCI_DMA_TODEVICE);
1419 /* Green! Leave the chip in low-power mode. */
1420 outl(0x0008, ioaddr + GENCTL);
1425 static struct net_device_stats *epic_get_stats(struct net_device *dev)
1427 struct epic_private *ep = dev->priv;
1428 long ioaddr = dev->base_addr;
1430 if (netif_running(dev)) {
1431 /* Update the error counts. */
1432 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
1433 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
1434 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
1440 /* Set or clear the multicast filter for this adaptor.
1441 Note that we only use exclusion around actually queueing the
1442 new frame, not around filling ep->setup_frame. This is non-deterministic
1443 when re-entered but still correct. */
1445 static void set_rx_mode(struct net_device *dev)
1447 long ioaddr = dev->base_addr;
1448 struct epic_private *ep = dev->priv;
1449 unsigned char mc_filter[8]; /* Multicast hash filter */
1452 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1453 outl(0x002C, ioaddr + RxCtrl);
1454 /* Unconditionally log net taps. */
1455 printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
1456 memset(mc_filter, 0xff, sizeof(mc_filter));
1457 } else if ((dev->mc_count > 0) || (dev->flags & IFF_ALLMULTI)) {
1458 /* There is apparently a chip bug, so the multicast filter
1459 is never enabled. */
1460 /* Too many to filter perfectly -- accept all multicasts. */
1461 memset(mc_filter, 0xff, sizeof(mc_filter));
1462 outl(0x000C, ioaddr + RxCtrl);
1463 } else if (dev->mc_count == 0) {
1464 outl(0x0004, ioaddr + RxCtrl);
1466 } else { /* Never executed, for now. */
1467 struct dev_mc_list *mclist;
1469 memset(mc_filter, 0, sizeof(mc_filter));
1470 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1471 i++, mclist = mclist->next) {
1472 unsigned int bit_nr =
1473 ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f;
1474 mc_filter[bit_nr >> 3] |= (1 << bit_nr);
1477 /* ToDo: perhaps we need to stop the Tx and Rx process here? */
1478 if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
1479 for (i = 0; i < 4; i++)
1480 outw(((u16 *)mc_filter)[i], ioaddr + MC0 + i*4);
1481 memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
1486 static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1488 struct epic_private *np = dev->priv;
1490 strcpy (info->driver, DRV_NAME);
1491 strcpy (info->version, DRV_VERSION);
1492 strcpy (info->bus_info, pci_name(np->pci_dev));
1495 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1497 struct epic_private *np = dev->priv;
1500 spin_lock_irq(&np->lock);
1501 rc = mii_ethtool_gset(&np->mii, cmd);
1502 spin_unlock_irq(&np->lock);
1507 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1509 struct epic_private *np = dev->priv;
1512 spin_lock_irq(&np->lock);
1513 rc = mii_ethtool_sset(&np->mii, cmd);
1514 spin_unlock_irq(&np->lock);
1519 static int netdev_nway_reset(struct net_device *dev)
1521 struct epic_private *np = dev->priv;
1522 return mii_nway_restart(&np->mii);
1525 static u32 netdev_get_link(struct net_device *dev)
1527 struct epic_private *np = dev->priv;
1528 return mii_link_ok(&np->mii);
1531 static u32 netdev_get_msglevel(struct net_device *dev)
1536 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1541 static int ethtool_begin(struct net_device *dev)
1543 unsigned long ioaddr = dev->base_addr;
1544 /* power-up, if interface is down */
1545 if (! netif_running(dev)) {
1546 outl(0x0200, ioaddr + GENCTL);
1547 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
1552 static void ethtool_complete(struct net_device *dev)
1554 unsigned long ioaddr = dev->base_addr;
1555 /* power-down, if interface is down */
1556 if (! netif_running(dev)) {
1557 outl(0x0008, ioaddr + GENCTL);
1558 outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
1562 static struct ethtool_ops netdev_ethtool_ops = {
1563 .get_drvinfo = netdev_get_drvinfo,
1564 .get_settings = netdev_get_settings,
1565 .set_settings = netdev_set_settings,
1566 .nway_reset = netdev_nway_reset,
1567 .get_link = netdev_get_link,
1568 .get_msglevel = netdev_get_msglevel,
1569 .set_msglevel = netdev_set_msglevel,
1570 .get_sg = ethtool_op_get_sg,
1571 .get_tx_csum = ethtool_op_get_tx_csum,
1572 .begin = ethtool_begin,
1573 .complete = ethtool_complete
1576 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1578 struct epic_private *np = dev->priv;
1579 long ioaddr = dev->base_addr;
1580 struct mii_ioctl_data *data = if_mii(rq);
1583 /* power-up, if interface is down */
1584 if (! netif_running(dev)) {
1585 outl(0x0200, ioaddr + GENCTL);
1586 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
1589 /* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */
1590 spin_lock_irq(&np->lock);
1591 rc = generic_mii_ioctl(&np->mii, data, cmd, NULL);
1592 spin_unlock_irq(&np->lock);
1594 /* power-down, if interface is down */
1595 if (! netif_running(dev)) {
1596 outl(0x0008, ioaddr + GENCTL);
1597 outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
1603 static void __devexit epic_remove_one (struct pci_dev *pdev)
1605 struct net_device *dev = pci_get_drvdata(pdev);
1606 struct epic_private *ep = dev->priv;
1608 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
1609 pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
1610 unregister_netdev(dev);
1612 iounmap((void*) dev->base_addr);
1614 pci_release_regions(pdev);
1616 pci_disable_device(pdev);
1617 pci_set_drvdata(pdev, NULL);
1618 /* pci_power_off(pdev, -1); */
1624 static int epic_suspend (struct pci_dev *pdev, pm_message_t state)
1626 struct net_device *dev = pci_get_drvdata(pdev);
1627 long ioaddr = dev->base_addr;
1629 if (!netif_running(dev))
1632 /* Put the chip into low-power mode. */
1633 outl(0x0008, ioaddr + GENCTL);
1634 /* pci_power_off(pdev, -1); */
1639 static int epic_resume (struct pci_dev *pdev)
1641 struct net_device *dev = pci_get_drvdata(pdev);
1643 if (!netif_running(dev))
1646 /* pci_power_on(pdev); */
1650 #endif /* CONFIG_PM */
1653 static struct pci_driver epic_driver = {
1655 .id_table = epic_pci_tbl,
1656 .probe = epic_init_one,
1657 .remove = __devexit_p(epic_remove_one),
1659 .suspend = epic_suspend,
1660 .resume = epic_resume,
1661 #endif /* CONFIG_PM */
1665 static int __init epic_init (void)
1667 /* when a module, this is printed whether or not devices are found in probe */
1669 printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s",
1670 version, version2, version3);
1673 return pci_module_init (&epic_driver);
1677 static void __exit epic_cleanup (void)
1679 pci_unregister_driver (&epic_driver);
1683 module_init(epic_init);
1684 module_exit(epic_cleanup);