1 /* xircom_tulip_cb.c: A Xircom CBE-100 ethernet driver for Linux. */
3 Written/copyright 1994-1999 by Donald Becker.
5 This software may be used and distributed according to the terms
6 of the GNU General Public License, incorporated herein by reference.
8 The author may be reached as becker@scyld.com, or C/O
9 Scyld Computing Corporation
10 410 Severn Ave., Suite 210
15 #define DRV_NAME "xircom_tulip_cb"
16 #define DRV_VERSION "0.92"
17 #define DRV_RELDATE "June 27, 2006"
19 /* A few user-configurable values. */
21 #define xircom_debug debug
23 static int xircom_debug = XIRCOM_DEBUG;
25 static int xircom_debug = 1;
28 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
29 static int max_interrupt_work = 25;
32 /* Used to pass the full-duplex flag, etc. */
33 static int full_duplex[MAX_UNITS];
34 static int options[MAX_UNITS];
35 static int mtu[MAX_UNITS]; /* Jumbo MTU for interfaces. */
37 /* Keep the ring sizes a power of two for efficiency.
38 Making the Tx ring too large decreases the effectiveness of channel
39 bonding and packet priority.
40 There are no ill effects from too-large receive rings. */
41 #define TX_RING_SIZE 16
42 #define RX_RING_SIZE 32
44 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
46 static int rx_copybreak = 1518;
48 static int rx_copybreak = 100;
52 Set the bus performance register.
53 Typical: Set 16 longword cache alignment, no burst limit.
54 Cache alignment bits 15:14 Burst length 13:8
55 0000 No alignment 0x00000000 unlimited 0800 8 longwords
56 4000 8 longwords 0100 1 longword 1000 16 longwords
57 8000 16 longwords 0200 2 longwords 2000 32 longwords
58 C000 32 longwords 0400 4 longwords
59 Warning: many older 486 systems are broken and require setting 0x00A04800
60 8 longword cache alignment, 8 longword burst.
61 ToDo: Non-Intel setting could be better.
64 #if defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
65 static int csr0 = 0x01A00000 | 0xE000;
66 #elif defined(__powerpc__)
67 static int csr0 = 0x01B00000 | 0x8000;
68 #elif defined(CONFIG_SPARC)
69 static int csr0 = 0x01B00080 | 0x8000;
70 #elif defined(__i386__)
71 static int csr0 = 0x01A00000 | 0x8000;
73 #warning Processor architecture undefined!
74 static int csr0 = 0x00A00000 | 0x4800;
77 /* Operational parameters that usually are not changed. */
78 /* Time in jiffies before concluding the transmitter is hung. */
79 #define TX_TIMEOUT (4 * HZ)
80 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
81 #define PKT_SETUP_SZ 192 /* Size of the setup frame */
84 #define PCI_POWERMGMT 0x40
86 #include <linux/module.h>
87 #include <linux/moduleparam.h>
88 #include <linux/kernel.h>
89 #include <linux/pci.h>
90 #include <linux/netdevice.h>
91 #include <linux/etherdevice.h>
92 #include <linux/delay.h>
93 #include <linux/init.h>
94 #include <linux/mii.h>
95 #include <linux/ethtool.h>
96 #include <linux/crc32.h>
99 #include <asm/processor.h> /* Processor type for cache alignment. */
100 #include <asm/uaccess.h>
103 /* These identify the driver base version and may not be removed. */
104 static char version[] __devinitdata =
105 KERN_INFO DRV_NAME ".c derived from tulip.c:v0.91 4/14/99 becker@scyld.com\n"
106 KERN_INFO " unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE "\n";
108 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
109 MODULE_DESCRIPTION("Xircom CBE-100 ethernet driver");
110 MODULE_LICENSE("GPL v2");
111 MODULE_VERSION(DRV_VERSION);
113 module_param(debug, int, 0);
114 module_param(max_interrupt_work, int, 0);
115 module_param(rx_copybreak, int, 0);
116 module_param(csr0, int, 0);
118 module_param_array(options, int, NULL, 0);
119 module_param_array(full_duplex, int, NULL, 0);
121 #define RUN_AT(x) (jiffies + (x))
126 I. Board Compatibility
128 This device driver was forked from the driver for the DECchip "Tulip",
129 Digital's single-chip ethernet controllers for PCI. It supports Xircom's
130 almost-Tulip-compatible CBE-100 CardBus adapters.
132 II. Board-specific settings
134 PCI bus devices are configured by the system at boot time, so no jumpers
135 need to be set on the board. The system BIOS preferably should assign the
136 PCI INTA signal to an otherwise unused system IRQ line.
138 III. Driver operation
142 The Xircom can use either ring buffers or lists of Tx and Rx descriptors.
143 This driver uses statically allocated rings of Rx and Tx descriptors, set at
144 compile time by RX/TX_RING_SIZE. This version of the driver allocates skbuffs
145 for the Rx ring buffers at open() time and passes the skb->data field to the
146 Xircom as receive data buffers. When an incoming frame is less than
147 RX_COPYBREAK bytes long, a fresh skbuff is allocated and the frame is
148 copied to the new skbuff. When the incoming frame is larger, the skbuff is
149 passed directly up the protocol stack and replaced by a newly allocated
152 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
153 using a full-sized skbuff for small frames vs. the copying costs of larger
154 frames. For small frames the copying cost is negligible (esp. considering
155 that we are pre-loading the cache with immediately useful header
156 information). For large frames the copying cost is non-trivial, and the
157 larger copy might flush the cache of useful data. A subtle aspect of this
158 choice is that the Xircom only receives into longword aligned buffers, thus
159 the IP header at offset 14 isn't longword aligned for further processing.
160 Copied frames are put into the new skbuff at an offset of "+2", thus copying
161 has the beneficial effect of aligning the IP header and preloading the
164 IIIC. Synchronization
165 The driver runs as two independent, single-threaded flows of control. One
166 is the send-packet routine, which enforces single-threaded use by the
167 dev->tbusy flag. The other thread is the interrupt handler, which is single
168 threaded by the hardware and other software.
170 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
171 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
172 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
173 the 'tp->tx_full' flag.
175 The interrupt handler has exclusive control over the Rx ring and records stats
176 from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
177 we can't avoid the interrupt overhead by having the Tx routine reap the Tx
178 stats.) After reaping the stats, it marks the queue entry as empty by setting
179 the 'base' to zero. Iff the 'tp->tx_full' flag is set, it clears both the
180 tx_full and tbusy flags.
186 http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
187 http://www.digital.com (search for current 21*4* datasheets and "21X4 SROM")
188 http://www.national.com/pf/DP/DP83840A.html
194 /* A full-duplex map for media types. */
196 MediaIsFD = 1, MediaAlwaysFD=2, MediaIsMII=4, MediaIsFx=8,
198 static const char media_cap[] =
199 {0,0,0,16, 3,19,16,24, 27,4,7,5, 0,20,23,20 };
201 /* Offsets to the Command and Status Registers, "CSRs". All accesses
202 must be longword instructions and quadword aligned. */
203 enum xircom_offsets {
204 CSR0=0, CSR1=0x08, CSR2=0x10, CSR3=0x18, CSR4=0x20, CSR5=0x28,
205 CSR6=0x30, CSR7=0x38, CSR8=0x40, CSR9=0x48, CSR10=0x50, CSR11=0x58,
206 CSR12=0x60, CSR13=0x68, CSR14=0x70, CSR15=0x78, CSR16=0x04, };
208 /* The bits in the CSR5 status registers, mostly interrupt sources. */
210 LinkChange=0x08000000,
211 NormalIntr=0x10000, NormalIntrMask=0x00014045,
212 AbnormalIntr=0x8000, AbnormalIntrMask=0x0a00a5a2,
213 ReservedIntrMask=0xe0001a18,
214 EarlyRxIntr=0x4000, BusErrorIntr=0x2000,
215 EarlyTxIntr=0x400, RxDied=0x100, RxNoBuf=0x80, RxIntr=0x40,
216 TxFIFOUnderflow=0x20, TxNoBuf=0x04, TxDied=0x02, TxIntr=0x01,
219 enum csr0_control_bits {
220 EnableMWI=0x01000000, EnableMRL=0x00800000,
221 EnableMRM=0x00200000, EqualBusPrio=0x02,
225 enum csr6_control_bits {
226 ReceiveAllBit=0x40000000, AllMultiBit=0x80, PromiscBit=0x40,
227 HashFilterBit=0x01, FullDuplexBit=0x0200,
228 TxThresh10=0x400000, TxStoreForw=0x200000,
229 TxThreshMask=0xc000, TxThreshShift=14,
230 EnableTx=0x2000, EnableRx=0x02,
231 ReservedZeroMask=0x8d930134, ReservedOneMask=0x320c0000,
232 EnableTxRx=(EnableTx | EnableRx),
237 HAS_MII=1, HAS_ACPI=2,
239 static struct xircom_chip_table {
241 int valid_intrs; /* CSR7 interrupt enable settings */
244 { "Xircom Cardbus Adapter",
245 LinkChange | NormalIntr | AbnormalIntr | BusErrorIntr |
246 RxDied | RxNoBuf | RxIntr | TxFIFOUnderflow | TxNoBuf | TxDied | TxIntr,
247 HAS_MII | HAS_ACPI, },
250 /* This matches the table above. */
256 /* The Xircom Rx and Tx buffer descriptors. */
257 struct xircom_rx_desc {
260 u32 buffer1, buffer2;
263 struct xircom_tx_desc {
266 u32 buffer1, buffer2; /* We use only buffer 1. */
269 enum tx_desc0_status_bits {
270 Tx0DescOwned=0x80000000, Tx0DescError=0x8000, Tx0NoCarrier=0x0800,
271 Tx0LateColl=0x0200, Tx0ManyColl=0x0100, Tx0Underflow=0x02,
273 enum tx_desc1_status_bits {
274 Tx1ComplIntr=0x80000000, Tx1LastSeg=0x40000000, Tx1FirstSeg=0x20000000,
275 Tx1SetupPkt=0x08000000, Tx1DisableCRC=0x04000000, Tx1RingWrap=0x02000000,
276 Tx1ChainDesc=0x01000000, Tx1NoPad=0x800000, Tx1HashSetup=0x400000,
277 Tx1WholePkt=(Tx1FirstSeg | Tx1LastSeg),
279 enum rx_desc0_status_bits {
280 Rx0DescOwned=0x80000000, Rx0DescError=0x8000, Rx0NoSpace=0x4000,
281 Rx0Runt=0x0800, Rx0McastPkt=0x0400, Rx0FirstSeg=0x0200, Rx0LastSeg=0x0100,
282 Rx0HugeFrame=0x80, Rx0CRCError=0x02,
283 Rx0WholePkt=(Rx0FirstSeg | Rx0LastSeg),
285 enum rx_desc1_status_bits {
286 Rx1RingWrap=0x02000000, Rx1ChainDesc=0x01000000,
289 struct xircom_private {
290 struct xircom_rx_desc rx_ring[RX_RING_SIZE];
291 struct xircom_tx_desc tx_ring[TX_RING_SIZE];
292 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
293 struct sk_buff* tx_skbuff[TX_RING_SIZE];
295 /* The X3201-3 requires 4-byte aligned tx bufs */
296 struct sk_buff* tx_aligned_skbuff[TX_RING_SIZE];
298 /* The addresses of receive-in-place skbuffs. */
299 struct sk_buff* rx_skbuff[RX_RING_SIZE];
300 u16 setup_frame[PKT_SETUP_SZ / sizeof(u16)]; /* Pseudo-Tx frame to init address table. */
302 struct net_device_stats stats;
303 unsigned int cur_rx, cur_tx; /* The next free ring entry */
304 unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
305 unsigned int tx_full:1; /* The Tx queue is full. */
306 unsigned int speed100:1;
307 unsigned int full_duplex:1; /* Full-duplex operation requested. */
308 unsigned int autoneg:1;
309 unsigned int default_port:4; /* Last dev->if_port value. */
311 unsigned int csr0; /* CSR0 setting. */
312 unsigned int csr6; /* Current CSR6 control settings. */
313 u16 to_advertise; /* NWay capabilities advertised. */
315 signed char phys[4], mii_cnt; /* MII device addresses. */
317 struct pci_dev *pdev;
321 static int mdio_read(struct net_device *dev, int phy_id, int location);
322 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
323 static void xircom_up(struct net_device *dev);
324 static void xircom_down(struct net_device *dev);
325 static int xircom_open(struct net_device *dev);
326 static void xircom_tx_timeout(struct net_device *dev);
327 static void xircom_init_ring(struct net_device *dev);
328 static int xircom_start_xmit(struct sk_buff *skb, struct net_device *dev);
329 static int xircom_rx(struct net_device *dev);
330 static void xircom_media_change(struct net_device *dev);
331 static irqreturn_t xircom_interrupt(int irq, void *dev_instance);
332 static int xircom_close(struct net_device *dev);
333 static struct net_device_stats *xircom_get_stats(struct net_device *dev);
334 static int xircom_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
335 static void set_rx_mode(struct net_device *dev);
336 static void check_duplex(struct net_device *dev);
337 static const struct ethtool_ops ops;
340 /* The Xircom cards are picky about when certain bits in CSR6 can be
341 manipulated. Keith Owens <kaos@ocs.com.au>. */
342 static void outl_CSR6(u32 newcsr6, long ioaddr)
344 const int strict_bits =
345 TxThresh10 | TxStoreForw | TxThreshMask | EnableTxRx | FullDuplexBit;
346 int csr5, csr5_22_20, csr5_19_17, currcsr6, attempts = 200;
350 /* mask out the reserved bits that always read 0 on the Xircom cards */
351 newcsr6 &= ~ReservedZeroMask;
352 /* or in the reserved bits that always read 1 */
353 newcsr6 |= ReservedOneMask;
354 currcsr6 = inl(ioaddr + CSR6);
355 if (((newcsr6 & strict_bits) == (currcsr6 & strict_bits)) ||
356 ((currcsr6 & ~EnableTxRx) == 0)) {
357 outl(newcsr6, ioaddr + CSR6); /* safe */
358 restore_flags(flags);
361 /* make sure the transmitter and receiver are stopped first */
362 currcsr6 &= ~EnableTxRx;
364 csr5 = inl(ioaddr + CSR5);
365 if (csr5 == 0xffffffff)
366 break; /* cannot read csr5, card removed? */
367 csr5_22_20 = csr5 & 0x700000;
368 csr5_19_17 = csr5 & 0x0e0000;
369 if ((csr5_22_20 == 0 || csr5_22_20 == 0x600000) &&
370 (csr5_19_17 == 0 || csr5_19_17 == 0x80000 || csr5_19_17 == 0xc0000))
371 break; /* both are stopped or suspended */
373 printk(KERN_INFO DRV_NAME ": outl_CSR6 too many attempts,"
374 "csr5=0x%08x\n", csr5);
375 outl(newcsr6, ioaddr + CSR6); /* unsafe but do it anyway */
376 restore_flags(flags);
379 outl(currcsr6, ioaddr + CSR6);
382 /* now it is safe to change csr6 */
383 outl(newcsr6, ioaddr + CSR6);
384 restore_flags(flags);
388 static void __devinit read_mac_address(struct net_device *dev)
390 long ioaddr = dev->base_addr;
392 unsigned char tuple, link, data_id, data_count;
394 /* Xircom has its address stored in the CIS;
395 * we access it through the boot rom interface for now
396 * this might not work, as the CIS is not parsed but I
397 * (danilo) use the offset I found on my card's CIS !!!
399 * Doug Ledford: I changed this routine around so that it
400 * walks the CIS memory space, parsing the config items, and
401 * finds the proper lan_node_id tuple and uses the data
404 outl(1 << 12, ioaddr + CSR9); /* enable boot rom access */
405 for (i = 0x100; i < 0x1f7; i += link+2) {
406 outl(i, ioaddr + CSR10);
407 tuple = inl(ioaddr + CSR9) & 0xff;
408 outl(i + 1, ioaddr + CSR10);
409 link = inl(ioaddr + CSR9) & 0xff;
410 outl(i + 2, ioaddr + CSR10);
411 data_id = inl(ioaddr + CSR9) & 0xff;
412 outl(i + 3, ioaddr + CSR10);
413 data_count = inl(ioaddr + CSR9) & 0xff;
414 if ( (tuple == 0x22) &&
415 (data_id == 0x04) && (data_count == 0x06) ) {
417 * This is it. We have the data we want.
419 for (j = 0; j < 6; j++) {
420 outl(i + j + 4, ioaddr + CSR10);
421 dev->dev_addr[j] = inl(ioaddr + CSR9) & 0xff;
424 } else if (link == 0) {
432 * locate the MII interfaces and initialize them.
433 * we disable full-duplex modes here,
434 * because we don't know how to handle them.
436 static void find_mii_transceivers(struct net_device *dev)
438 struct xircom_private *tp = netdev_priv(dev);
441 if (media_cap[tp->default_port] & MediaIsMII) {
442 u16 media2advert[] = { 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200 };
443 tp->to_advertise = media2advert[tp->default_port - 9];
446 /*ADVERTISE_100BASE4 | ADVERTISE_100FULL |*/ ADVERTISE_100HALF |
447 /*ADVERTISE_10FULL |*/ ADVERTISE_10HALF | ADVERTISE_CSMA;
449 /* Find the connected MII xcvrs.
450 Doing this in open() would allow detecting external xcvrs later,
451 but takes much time. */
452 for (phy = 0, phy_idx = 0; phy < 32 && phy_idx < sizeof(tp->phys); phy++) {
453 int mii_status = mdio_read(dev, phy, MII_BMSR);
454 if ((mii_status & (BMSR_100BASE4 | BMSR_100HALF | BMSR_10HALF)) == BMSR_100BASE4 ||
455 ((mii_status & BMSR_100BASE4) == 0 &&
456 (mii_status & (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL | BMSR_10HALF)) != 0)) {
457 int mii_reg0 = mdio_read(dev, phy, MII_BMCR);
458 int mii_advert = mdio_read(dev, phy, MII_ADVERTISE);
459 int reg4 = ((mii_status >> 6) & tp->to_advertise) | ADVERTISE_CSMA;
460 tp->phys[phy_idx] = phy;
461 tp->advertising[phy_idx++] = reg4;
462 printk(KERN_INFO "%s: MII transceiver #%d "
463 "config %4.4x status %4.4x advertising %4.4x.\n",
464 dev->name, phy, mii_reg0, mii_status, mii_advert);
467 tp->mii_cnt = phy_idx;
469 printk(KERN_INFO "%s: ***WARNING***: No MII transceiver found!\n",
477 * To quote Arjan van de Ven:
478 * transceiver_voodoo() enables the external UTP plug thingy.
479 * it's called voodoo as I stole this code and cannot cross-reference
480 * it with the specification.
481 * Actually it seems to go like this:
482 * - GPIO2 enables the MII itself so we can talk to it. The MII gets reset
483 * so any prior MII settings are lost.
484 * - GPIO0 enables the TP port so the MII can talk to the network.
485 * - a software reset will reset both GPIO pins.
486 * I also moved the software reset here, because doing it in xircom_up()
487 * required enabling the GPIO pins each time, which reset the MII each time.
488 * Thus we couldn't control the MII -- which sucks because we don't know
489 * how to handle full-duplex modes so we *must* disable them.
491 static void transceiver_voodoo(struct net_device *dev)
493 struct xircom_private *tp = netdev_priv(dev);
494 long ioaddr = dev->base_addr;
496 /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
497 outl(SoftwareReset, ioaddr + CSR0);
500 /* Deassert reset. */
501 outl(tp->csr0, ioaddr + CSR0);
503 /* Reset the xcvr interface and turn on heartbeat. */
504 outl(0x0008, ioaddr + CSR15);
505 udelay(5); /* The delays are Xircom-recommended to give the
506 * chipset time to reset the actual hardware
509 outl(0xa8050000, ioaddr + CSR15);
511 outl(0xa00f0000, ioaddr + CSR15);
514 outl_CSR6(0, ioaddr);
515 //outl_CSR6(FullDuplexBit, ioaddr);
519 static int __devinit xircom_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
521 struct net_device *dev;
522 struct xircom_private *tp;
523 static int board_idx = -1;
524 int chip_idx = id->driver_data;
528 /* when built into the kernel, we only print version if device is found */
530 static int printed_version;
531 if (!printed_version++)
535 //printk(KERN_INFO "xircom_init_one(%s)\n", pci_name(pdev));
539 if (pci_enable_device(pdev))
542 pci_set_master(pdev);
544 ioaddr = pci_resource_start(pdev, 0);
545 dev = alloc_etherdev(sizeof(*tp));
547 printk (KERN_ERR DRV_NAME "%d: cannot alloc etherdev, aborting\n", board_idx);
550 SET_MODULE_OWNER(dev);
551 SET_NETDEV_DEV(dev, &pdev->dev);
553 dev->base_addr = ioaddr;
554 dev->irq = pdev->irq;
556 if (pci_request_regions(pdev, dev->name)) {
557 printk (KERN_ERR DRV_NAME " %d: cannot reserve PCI resources, aborting\n", board_idx);
558 goto err_out_free_netdev;
561 /* Bring the chip out of sleep mode.
562 Caution: Snooze mode does not work with some boards! */
563 if (xircom_tbl[chip_idx].flags & HAS_ACPI)
564 pci_write_config_dword(pdev, PCI_POWERMGMT, 0);
566 /* Stop the chip's Tx and Rx processes. */
567 outl_CSR6(inl(ioaddr + CSR6) & ~EnableTxRx, ioaddr);
568 /* Clear the missed-packet counter. */
569 (volatile int)inl(ioaddr + CSR8);
571 tp = netdev_priv(dev);
573 spin_lock_init(&tp->lock);
575 tp->chip_id = chip_idx;
576 /* BugFixes: The 21143-TD hangs with PCI Write-and-Invalidate cycles. */
577 /* XXX: is this necessary for Xircom? */
578 tp->csr0 = csr0 & ~EnableMWI;
580 pci_set_drvdata(pdev, dev);
582 /* The lower four bits are the media type. */
583 if (board_idx >= 0 && board_idx < MAX_UNITS) {
584 tp->default_port = options[board_idx] & 15;
585 if ((options[board_idx] & 0x90) || full_duplex[board_idx] > 0)
587 if (mtu[board_idx] > 0)
588 dev->mtu = mtu[board_idx];
591 tp->default_port = dev->mem_start;
592 if (tp->default_port) {
593 if (media_cap[tp->default_port] & MediaAlwaysFD)
602 /* The Xircom-specific entries in the device structure. */
603 dev->open = &xircom_open;
604 dev->hard_start_xmit = &xircom_start_xmit;
605 dev->stop = &xircom_close;
606 dev->get_stats = &xircom_get_stats;
607 dev->do_ioctl = &xircom_ioctl;
608 #ifdef HAVE_MULTICAST
609 dev->set_multicast_list = &set_rx_mode;
611 dev->tx_timeout = xircom_tx_timeout;
612 dev->watchdog_timeo = TX_TIMEOUT;
613 SET_ETHTOOL_OPS(dev, &ops);
615 transceiver_voodoo(dev);
617 read_mac_address(dev);
619 if (register_netdev(dev))
620 goto err_out_cleardev;
622 printk(KERN_INFO "%s: %s rev %d at %#3lx,",
623 dev->name, xircom_tbl[chip_idx].chip_name, pdev->revision, ioaddr);
624 for (i = 0; i < 6; i++)
625 printk("%c%2.2X", i ? ':' : ' ', dev->dev_addr[i]);
626 printk(", IRQ %d.\n", dev->irq);
628 if (xircom_tbl[chip_idx].flags & HAS_MII) {
629 find_mii_transceivers(dev);
636 pci_set_drvdata(pdev, NULL);
637 pci_release_regions(pdev);
644 /* MII transceiver control section.
645 Read and write the MII registers using software-generated serial
646 MDIO protocol. See the MII specifications or DP83840A data sheet
649 /* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
650 met by back-to-back PCI I/O cycles, but we insert a delay to avoid
651 "overclocking" issues or future 66Mhz PCI. */
652 #define mdio_delay() inl(mdio_addr)
654 /* Read and write the MII registers using software-generated serial
655 MDIO protocol. It is just different enough from the EEPROM protocol
656 to not share code. The maxium data clock rate is 2.5 Mhz. */
657 #define MDIO_SHIFT_CLK 0x10000
658 #define MDIO_DATA_WRITE0 0x00000
659 #define MDIO_DATA_WRITE1 0x20000
660 #define MDIO_ENB 0x00000 /* Ignore the 0x02000 databook setting. */
661 #define MDIO_ENB_IN 0x40000
662 #define MDIO_DATA_READ 0x80000
664 static int mdio_read(struct net_device *dev, int phy_id, int location)
667 int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
669 long ioaddr = dev->base_addr;
670 long mdio_addr = ioaddr + CSR9;
672 /* Establish sync by sending at least 32 logic ones. */
673 for (i = 32; i >= 0; i--) {
674 outl(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
676 outl(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
679 /* Shift the read command bits out. */
680 for (i = 15; i >= 0; i--) {
681 int dataval = (read_cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
683 outl(MDIO_ENB | dataval, mdio_addr);
685 outl(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
688 /* Read the two transition, 16 data, and wire-idle bits. */
689 for (i = 19; i > 0; i--) {
690 outl(MDIO_ENB_IN, mdio_addr);
692 retval = (retval << 1) | ((inl(mdio_addr) & MDIO_DATA_READ) ? 1 : 0);
693 outl(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
696 return (retval>>1) & 0xffff;
700 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
703 int cmd = (0x5002 << 16) | (phy_id << 23) | (location << 18) | value;
704 long ioaddr = dev->base_addr;
705 long mdio_addr = ioaddr + CSR9;
707 /* Establish sync by sending 32 logic ones. */
708 for (i = 32; i >= 0; i--) {
709 outl(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
711 outl(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
714 /* Shift the command bits out. */
715 for (i = 31; i >= 0; i--) {
716 int dataval = (cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
717 outl(MDIO_ENB | dataval, mdio_addr);
719 outl(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
722 /* Clear out extra bits. */
723 for (i = 2; i > 0; i--) {
724 outl(MDIO_ENB_IN, mdio_addr);
726 outl(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
734 xircom_up(struct net_device *dev)
736 struct xircom_private *tp = netdev_priv(dev);
737 long ioaddr = dev->base_addr;
740 xircom_init_ring(dev);
741 /* Clear the tx ring */
742 for (i = 0; i < TX_RING_SIZE; i++) {
743 tp->tx_skbuff[i] = NULL;
744 tp->tx_ring[i].status = 0;
747 if (xircom_debug > 1)
748 printk(KERN_DEBUG "%s: xircom_up() irq %d.\n", dev->name, dev->irq);
750 outl(virt_to_bus(tp->rx_ring), ioaddr + CSR3);
751 outl(virt_to_bus(tp->tx_ring), ioaddr + CSR4);
753 tp->saved_if_port = dev->if_port;
754 if (dev->if_port == 0)
755 dev->if_port = tp->default_port;
757 tp->csr6 = TxThresh10 /*| FullDuplexBit*/; /* XXX: why 10 and not 100? */
761 /* Start the chip's Tx to process setup frame. */
762 outl_CSR6(tp->csr6, ioaddr);
763 outl_CSR6(tp->csr6 | EnableTx, ioaddr);
765 /* Acknowledge all outstanding interrupts sources */
766 outl(xircom_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
767 /* Enable interrupts by setting the interrupt mask. */
768 outl(xircom_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
770 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
772 outl(0, ioaddr + CSR2);
774 /* Tell the net layer we're ready */
775 netif_start_queue (dev);
777 /* Check current media state */
778 xircom_media_change(dev);
780 if (xircom_debug > 2) {
781 printk(KERN_DEBUG "%s: Done xircom_up(), CSR0 %8.8x, CSR5 %8.8x CSR6 %8.8x.\n",
782 dev->name, inl(ioaddr + CSR0), inl(ioaddr + CSR5),
789 xircom_open(struct net_device *dev)
791 struct xircom_private *tp = netdev_priv(dev);
793 if (request_irq(dev->irq, &xircom_interrupt, IRQF_SHARED, dev->name, dev))
803 static void xircom_tx_timeout(struct net_device *dev)
805 struct xircom_private *tp = netdev_priv(dev);
806 long ioaddr = dev->base_addr;
808 if (media_cap[dev->if_port] & MediaIsMII) {
809 /* Do nothing -- the media monitor should handle this. */
810 if (xircom_debug > 1)
811 printk(KERN_WARNING "%s: Transmit timeout using MII device.\n",
815 #if defined(way_too_many_messages)
816 if (xircom_debug > 3) {
818 for (i = 0; i < RX_RING_SIZE; i++) {
819 u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
821 printk(KERN_DEBUG "%2d: %8.8x %8.8x %8.8x %8.8x "
822 "%2.2x %2.2x %2.2x.\n",
823 i, (unsigned int)tp->rx_ring[i].status,
824 (unsigned int)tp->rx_ring[i].length,
825 (unsigned int)tp->rx_ring[i].buffer1,
826 (unsigned int)tp->rx_ring[i].buffer2,
827 buf[0], buf[1], buf[2]);
828 for (j = 0; buf[j] != 0xee && j < 1600; j++)
829 if (j < 100) printk(" %2.2x", buf[j]);
830 printk(" j=%d.\n", j);
832 printk(KERN_DEBUG " Rx ring %8.8x: ", (int)tp->rx_ring);
833 for (i = 0; i < RX_RING_SIZE; i++)
834 printk(" %8.8x", (unsigned int)tp->rx_ring[i].status);
835 printk("\n" KERN_DEBUG " Tx ring %8.8x: ", (int)tp->tx_ring);
836 for (i = 0; i < TX_RING_SIZE; i++)
837 printk(" %8.8x", (unsigned int)tp->tx_ring[i].status);
842 /* Stop and restart the chip's Tx/Rx processes . */
843 outl_CSR6(tp->csr6 | EnableRx, ioaddr);
844 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
845 /* Trigger an immediate transmit demand. */
846 outl(0, ioaddr + CSR1);
848 dev->trans_start = jiffies;
849 netif_wake_queue (dev);
850 tp->stats.tx_errors++;
854 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
855 static void xircom_init_ring(struct net_device *dev)
857 struct xircom_private *tp = netdev_priv(dev);
861 tp->cur_rx = tp->cur_tx = 0;
862 tp->dirty_rx = tp->dirty_tx = 0;
864 for (i = 0; i < RX_RING_SIZE; i++) {
865 tp->rx_ring[i].status = 0;
866 tp->rx_ring[i].length = PKT_BUF_SZ;
867 tp->rx_ring[i].buffer2 = virt_to_bus(&tp->rx_ring[i+1]);
868 tp->rx_skbuff[i] = NULL;
870 /* Mark the last entry as wrapping the ring. */
871 tp->rx_ring[i-1].length = PKT_BUF_SZ | Rx1RingWrap;
872 tp->rx_ring[i-1].buffer2 = virt_to_bus(&tp->rx_ring[0]);
874 for (i = 0; i < RX_RING_SIZE; i++) {
875 /* Note the receive buffer must be longword aligned.
876 dev_alloc_skb() provides 16 byte alignment. But do *not*
877 use skb_reserve() to align the IP header! */
878 struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ);
879 tp->rx_skbuff[i] = skb;
882 skb->dev = dev; /* Mark as being used by this device. */
883 tp->rx_ring[i].status = Rx0DescOwned; /* Owned by Xircom chip */
884 tp->rx_ring[i].buffer1 = virt_to_bus(skb->data);
886 tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
888 /* The Tx buffer descriptor is filled in as needed, but we
889 do need to clear the ownership bit. */
890 for (i = 0; i < TX_RING_SIZE; i++) {
891 tp->tx_skbuff[i] = NULL;
892 tp->tx_ring[i].status = 0;
893 tp->tx_ring[i].buffer2 = virt_to_bus(&tp->tx_ring[i+1]);
894 if (tp->chip_id == X3201_3)
895 tp->tx_aligned_skbuff[i] = dev_alloc_skb(PKT_BUF_SZ);
897 tp->tx_ring[i-1].buffer2 = virt_to_bus(&tp->tx_ring[0]);
902 xircom_start_xmit(struct sk_buff *skb, struct net_device *dev)
904 struct xircom_private *tp = netdev_priv(dev);
908 /* Caution: the write order is important here, set the base address
909 with the "ownership" bits last. */
911 /* Calculate the next Tx descriptor entry. */
912 entry = tp->cur_tx % TX_RING_SIZE;
914 tp->tx_skbuff[entry] = skb;
915 if (tp->chip_id == X3201_3) {
916 skb_copy_from_linear_data(skb,
917 tp->tx_aligned_skbuff[entry]->data,
919 tp->tx_ring[entry].buffer1 = virt_to_bus(tp->tx_aligned_skbuff[entry]->data);
921 tp->tx_ring[entry].buffer1 = virt_to_bus(skb->data);
923 if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
924 flag = Tx1WholePkt; /* No interrupt */
925 } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
926 flag = Tx1WholePkt | Tx1ComplIntr; /* Tx-done intr. */
927 } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
928 flag = Tx1WholePkt; /* No Tx-done intr. */
930 /* Leave room for set_rx_mode() to fill entries. */
931 flag = Tx1WholePkt | Tx1ComplIntr; /* Tx-done intr. */
934 if (entry == TX_RING_SIZE - 1)
935 flag |= Tx1WholePkt | Tx1ComplIntr | Tx1RingWrap;
937 tp->tx_ring[entry].length = skb->len | flag;
938 tp->tx_ring[entry].status = Tx0DescOwned; /* Pass ownership to the chip. */
941 netif_stop_queue (dev);
943 netif_wake_queue (dev);
945 /* Trigger an immediate transmit demand. */
946 outl(0, dev->base_addr + CSR1);
948 dev->trans_start = jiffies;
954 static void xircom_media_change(struct net_device *dev)
956 struct xircom_private *tp = netdev_priv(dev);
957 long ioaddr = dev->base_addr;
958 u16 reg0, reg1, reg4, reg5;
959 u32 csr6 = inl(ioaddr + CSR6), newcsr6;
961 /* reset status first */
962 mdio_read(dev, tp->phys[0], MII_BMCR);
963 mdio_read(dev, tp->phys[0], MII_BMSR);
965 reg0 = mdio_read(dev, tp->phys[0], MII_BMCR);
966 reg1 = mdio_read(dev, tp->phys[0], MII_BMSR);
968 if (reg1 & BMSR_LSTATUS) {
970 if (reg0 & BMCR_ANENABLE) {
971 /* autonegotiation is enabled */
972 reg4 = mdio_read(dev, tp->phys[0], MII_ADVERTISE);
973 reg5 = mdio_read(dev, tp->phys[0], MII_LPA);
974 if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) {
977 } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) {
980 } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) {
988 /* autonegotiation is disabled */
989 if (reg0 & BMCR_SPEED100)
993 if (reg0 & BMCR_FULLDPLX)
998 printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n",
1000 tp->speed100 ? "100" : "10",
1001 tp->full_duplex ? "full" : "half");
1002 netif_carrier_on(dev);
1003 newcsr6 = csr6 & ~FullDuplexBit;
1004 if (tp->full_duplex)
1005 newcsr6 |= FullDuplexBit;
1006 if (newcsr6 != csr6)
1007 outl_CSR6(newcsr6, ioaddr + CSR6);
1009 printk(KERN_DEBUG "%s: Link is down\n", dev->name);
1010 netif_carrier_off(dev);
1015 static void check_duplex(struct net_device *dev)
1017 struct xircom_private *tp = netdev_priv(dev);
1020 mdio_write(dev, tp->phys[0], MII_BMCR, BMCR_RESET);
1022 while (mdio_read(dev, tp->phys[0], MII_BMCR) & BMCR_RESET);
1024 reg0 = mdio_read(dev, tp->phys[0], MII_BMCR);
1025 mdio_write(dev, tp->phys[0], MII_ADVERTISE, tp->advertising[0]);
1028 reg0 &= ~(BMCR_SPEED100 | BMCR_FULLDPLX);
1029 reg0 |= BMCR_ANENABLE | BMCR_ANRESTART;
1031 reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
1033 reg0 |= BMCR_SPEED100;
1034 if (tp->full_duplex)
1035 reg0 |= BMCR_FULLDPLX;
1036 printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n",
1038 tp->speed100 ? "100" : "10",
1039 tp->full_duplex ? "full" : "half");
1041 mdio_write(dev, tp->phys[0], MII_BMCR, reg0);
1045 /* The interrupt handler does all of the Rx thread work and cleans up
1046 after the Tx thread. */
1047 static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
1049 struct net_device *dev = dev_instance;
1050 struct xircom_private *tp = netdev_priv(dev);
1051 long ioaddr = dev->base_addr;
1052 int csr5, work_budget = max_interrupt_work;
1055 spin_lock (&tp->lock);
1058 csr5 = inl(ioaddr + CSR5);
1059 /* Acknowledge all of the current interrupt sources ASAP. */
1060 outl(csr5 & 0x0001ffff, ioaddr + CSR5);
1062 if (xircom_debug > 4)
1063 printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
1064 dev->name, csr5, inl(dev->base_addr + CSR5));
1066 if (csr5 == 0xffffffff)
1067 break; /* all bits set, assume PCMCIA card removed */
1069 if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
1074 if (csr5 & (RxIntr | RxNoBuf))
1075 work_budget -= xircom_rx(dev);
1077 if (csr5 & (TxNoBuf | TxDied | TxIntr)) {
1078 unsigned int dirty_tx;
1080 for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
1082 int entry = dirty_tx % TX_RING_SIZE;
1083 int status = tp->tx_ring[entry].status;
1086 break; /* It still hasn't been Txed */
1087 /* Check for Rx filter setup frames. */
1088 if (tp->tx_skbuff[entry] == NULL)
1091 if (status & Tx0DescError) {
1092 /* There was an major error, log it. */
1093 #ifndef final_version
1094 if (xircom_debug > 1)
1095 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1098 tp->stats.tx_errors++;
1099 if (status & Tx0ManyColl) {
1100 tp->stats.tx_aborted_errors++;
1102 if (status & Tx0NoCarrier) tp->stats.tx_carrier_errors++;
1103 if (status & Tx0LateColl) tp->stats.tx_window_errors++;
1104 if (status & Tx0Underflow) tp->stats.tx_fifo_errors++;
1106 tp->stats.tx_bytes += tp->tx_ring[entry].length & 0x7ff;
1107 tp->stats.collisions += (status >> 3) & 15;
1108 tp->stats.tx_packets++;
1111 /* Free the original skb. */
1112 dev_kfree_skb_irq(tp->tx_skbuff[entry]);
1113 tp->tx_skbuff[entry] = NULL;
1116 #ifndef final_version
1117 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
1118 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1119 dev->name, dirty_tx, tp->cur_tx, tp->tx_full);
1120 dirty_tx += TX_RING_SIZE;
1125 tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
1126 /* The ring is no longer full */
1130 netif_stop_queue (dev);
1132 netif_wake_queue (dev);
1134 tp->dirty_tx = dirty_tx;
1135 if (csr5 & TxDied) {
1136 if (xircom_debug > 2)
1137 printk(KERN_WARNING "%s: The transmitter stopped."
1138 " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
1139 dev->name, csr5, inl(ioaddr + CSR6), tp->csr6);
1140 outl_CSR6(tp->csr6 | EnableRx, ioaddr);
1141 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
1146 if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
1147 if (csr5 & LinkChange)
1148 xircom_media_change(dev);
1149 if (csr5 & TxFIFOUnderflow) {
1150 if ((tp->csr6 & TxThreshMask) != TxThreshMask)
1151 tp->csr6 += (1 << TxThreshShift); /* Bump up the Tx threshold */
1153 tp->csr6 |= TxStoreForw; /* Store-n-forward. */
1154 /* Restart the transmit process. */
1155 outl_CSR6(tp->csr6 | EnableRx, ioaddr);
1156 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
1158 if (csr5 & RxDied) { /* Missed a Rx frame. */
1159 tp->stats.rx_errors++;
1160 tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
1161 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
1163 /* Clear all error sources, included undocumented ones! */
1164 outl(0x0800f7ba, ioaddr + CSR5);
1166 if (--work_budget < 0) {
1167 if (xircom_debug > 1)
1168 printk(KERN_WARNING "%s: Too much work during an interrupt, "
1169 "csr5=0x%8.8x.\n", dev->name, csr5);
1170 /* Acknowledge all interrupt sources. */
1171 outl(0x8001ffff, ioaddr + CSR5);
1176 if (xircom_debug > 3)
1177 printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
1178 dev->name, inl(ioaddr + CSR5));
1180 spin_unlock (&tp->lock);
1181 return IRQ_RETVAL(handled);
1186 xircom_rx(struct net_device *dev)
1188 struct xircom_private *tp = netdev_priv(dev);
1189 int entry = tp->cur_rx % RX_RING_SIZE;
1190 int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
1193 if (xircom_debug > 4)
1194 printk(KERN_DEBUG " In xircom_rx(), entry %d %8.8x.\n", entry,
1195 tp->rx_ring[entry].status);
1196 /* If we own the next entry, it's a new packet. Send it up. */
1197 while (tp->rx_ring[entry].status >= 0) {
1198 s32 status = tp->rx_ring[entry].status;
1200 if (xircom_debug > 5)
1201 printk(KERN_DEBUG " In xircom_rx(), entry %d %8.8x.\n", entry,
1202 tp->rx_ring[entry].status);
1203 if (--rx_work_limit < 0)
1205 if ((status & 0x38008300) != 0x0300) {
1206 if ((status & 0x38000300) != 0x0300) {
1207 /* Ignore earlier buffers. */
1208 if ((status & 0xffff) != 0x7fff) {
1209 if (xircom_debug > 1)
1210 printk(KERN_WARNING "%s: Oversized Ethernet frame "
1211 "spanned multiple buffers, status %8.8x!\n",
1213 tp->stats.rx_length_errors++;
1215 } else if (status & Rx0DescError) {
1216 /* There was a fatal error. */
1217 if (xircom_debug > 2)
1218 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
1220 tp->stats.rx_errors++; /* end of a packet.*/
1221 if (status & (Rx0Runt | Rx0HugeFrame)) tp->stats.rx_length_errors++;
1222 if (status & Rx0CRCError) tp->stats.rx_crc_errors++;
1225 /* Omit the four octet CRC from the length. */
1226 short pkt_len = ((status >> 16) & 0x7ff) - 4;
1227 struct sk_buff *skb;
1229 #ifndef final_version
1230 if (pkt_len > 1518) {
1231 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
1232 dev->name, pkt_len, pkt_len);
1234 tp->stats.rx_length_errors++;
1237 /* Check if the packet is long enough to accept without copying
1238 to a minimally-sized skbuff. */
1239 if (pkt_len < rx_copybreak
1240 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1241 skb_reserve(skb, 2); /* 16 byte align the IP header */
1242 #if ! defined(__alpha__)
1243 skb_copy_to_linear_data(skb, bus_to_virt(tp->rx_ring[entry].buffer1),
1245 skb_put(skb, pkt_len);
1247 memcpy(skb_put(skb, pkt_len),
1248 bus_to_virt(tp->rx_ring[entry].buffer1), pkt_len);
1251 } else { /* Pass up the skb already on the Rx ring. */
1252 skb_put(skb = tp->rx_skbuff[entry], pkt_len);
1253 tp->rx_skbuff[entry] = NULL;
1255 skb->protocol = eth_type_trans(skb, dev);
1257 dev->last_rx = jiffies;
1258 tp->stats.rx_packets++;
1259 tp->stats.rx_bytes += pkt_len;
1261 entry = (++tp->cur_rx) % RX_RING_SIZE;
1264 /* Refill the Rx ring buffers. */
1265 for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
1266 entry = tp->dirty_rx % RX_RING_SIZE;
1267 if (tp->rx_skbuff[entry] == NULL) {
1268 struct sk_buff *skb;
1269 skb = tp->rx_skbuff[entry] = dev_alloc_skb(PKT_BUF_SZ);
1272 skb->dev = dev; /* Mark as being used by this device. */
1273 tp->rx_ring[entry].buffer1 = virt_to_bus(skb->data);
1276 tp->rx_ring[entry].status = Rx0DescOwned;
1284 xircom_down(struct net_device *dev)
1286 long ioaddr = dev->base_addr;
1287 struct xircom_private *tp = netdev_priv(dev);
1289 /* Disable interrupts by clearing the interrupt mask. */
1290 outl(0, ioaddr + CSR7);
1291 /* Stop the chip's Tx and Rx processes. */
1292 outl_CSR6(inl(ioaddr + CSR6) & ~EnableTxRx, ioaddr);
1294 if (inl(ioaddr + CSR6) != 0xffffffff)
1295 tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
1297 dev->if_port = tp->saved_if_port;
1302 xircom_close(struct net_device *dev)
1304 long ioaddr = dev->base_addr;
1305 struct xircom_private *tp = netdev_priv(dev);
1308 if (xircom_debug > 1)
1309 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
1310 dev->name, inl(ioaddr + CSR5));
1312 netif_stop_queue(dev);
1314 if (netif_device_present(dev))
1317 free_irq(dev->irq, dev);
1319 /* Free all the skbuffs in the Rx queue. */
1320 for (i = 0; i < RX_RING_SIZE; i++) {
1321 struct sk_buff *skb = tp->rx_skbuff[i];
1322 tp->rx_skbuff[i] = NULL;
1323 tp->rx_ring[i].status = 0; /* Not owned by Xircom chip. */
1324 tp->rx_ring[i].length = 0;
1325 tp->rx_ring[i].buffer1 = 0xBADF00D0; /* An invalid address. */
1330 for (i = 0; i < TX_RING_SIZE; i++) {
1331 if (tp->tx_skbuff[i])
1332 dev_kfree_skb(tp->tx_skbuff[i]);
1333 tp->tx_skbuff[i] = NULL;
1341 static struct net_device_stats *xircom_get_stats(struct net_device *dev)
1343 struct xircom_private *tp = netdev_priv(dev);
1344 long ioaddr = dev->base_addr;
1346 if (netif_device_present(dev))
1347 tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
1352 static int xircom_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1354 struct xircom_private *tp = netdev_priv(dev);
1356 SUPPORTED_10baseT_Half |
1357 SUPPORTED_10baseT_Full |
1358 SUPPORTED_100baseT_Half |
1359 SUPPORTED_100baseT_Full |
1363 ecmd->advertising = ADVERTISED_MII;
1364 if (tp->advertising[0] & ADVERTISE_10HALF)
1365 ecmd->advertising |= ADVERTISED_10baseT_Half;
1366 if (tp->advertising[0] & ADVERTISE_10FULL)
1367 ecmd->advertising |= ADVERTISED_10baseT_Full;
1368 if (tp->advertising[0] & ADVERTISE_100HALF)
1369 ecmd->advertising |= ADVERTISED_100baseT_Half;
1370 if (tp->advertising[0] & ADVERTISE_100FULL)
1371 ecmd->advertising |= ADVERTISED_100baseT_Full;
1373 ecmd->advertising |= ADVERTISED_Autoneg;
1374 ecmd->autoneg = AUTONEG_ENABLE;
1376 ecmd->autoneg = AUTONEG_DISABLE;
1378 ecmd->port = PORT_MII;
1379 ecmd->transceiver = XCVR_INTERNAL;
1380 ecmd->phy_address = tp->phys[0];
1381 ecmd->speed = tp->speed100 ? SPEED_100 : SPEED_10;
1382 ecmd->duplex = tp->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
1383 ecmd->maxtxpkt = TX_RING_SIZE / 2;
1388 static int xircom_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1390 struct xircom_private *tp = netdev_priv(dev);
1391 u16 autoneg, speed100, full_duplex;
1393 autoneg = (ecmd->autoneg == AUTONEG_ENABLE);
1394 speed100 = (ecmd->speed == SPEED_100);
1395 full_duplex = (ecmd->duplex == DUPLEX_FULL);
1397 tp->autoneg = autoneg;
1398 if (speed100 != tp->speed100 ||
1399 full_duplex != tp->full_duplex) {
1400 tp->speed100 = speed100;
1401 tp->full_duplex = full_duplex;
1402 /* change advertising bits */
1403 tp->advertising[0] &= ~(ADVERTISE_10HALF |
1407 ADVERTISE_100BASE4);
1410 tp->advertising[0] |= ADVERTISE_100FULL;
1412 tp->advertising[0] |= ADVERTISE_100HALF;
1415 tp->advertising[0] |= ADVERTISE_10FULL;
1417 tp->advertising[0] |= ADVERTISE_10HALF;
1424 static void xircom_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1426 struct xircom_private *tp = netdev_priv(dev);
1427 strcpy(info->driver, DRV_NAME);
1428 strcpy(info->version, DRV_VERSION);
1429 strcpy(info->bus_info, pci_name(tp->pdev));
1432 static const struct ethtool_ops ops = {
1433 .get_settings = xircom_get_settings,
1434 .set_settings = xircom_set_settings,
1435 .get_drvinfo = xircom_get_drvinfo,
1438 /* Provide ioctl() calls to examine the MII xcvr state. */
1439 static int xircom_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1441 struct xircom_private *tp = netdev_priv(dev);
1442 u16 *data = (u16 *)&rq->ifr_ifru;
1443 int phy = tp->phys[0] & 0x1f;
1444 unsigned long flags;
1447 /* Legacy mii-diag interface */
1448 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
1454 case SIOCGMIIREG: /* Read MII PHY register. */
1457 data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
1458 restore_flags(flags);
1460 case SIOCSMIIREG: /* Write MII PHY register. */
1461 if (!capable(CAP_NET_ADMIN))
1465 if (data[0] == tp->phys[0]) {
1466 u16 value = data[2];
1469 if (value & (BMCR_RESET | BMCR_ANENABLE))
1470 /* Autonegotiation. */
1473 tp->full_duplex = (value & BMCR_FULLDPLX) ? 1 : 0;
1478 tp->advertising[0] = value;
1483 mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
1484 restore_flags(flags);
1493 /* Set or clear the multicast filter for this adaptor.
1494 Note that we only use exclusion around actually queueing the
1495 new frame, not around filling tp->setup_frame. This is non-deterministic
1496 when re-entered but still correct. */
1497 static void set_rx_mode(struct net_device *dev)
1499 struct xircom_private *tp = netdev_priv(dev);
1500 struct dev_mc_list *mclist;
1501 long ioaddr = dev->base_addr;
1502 int csr6 = inl(ioaddr + CSR6);
1503 u16 *eaddrs, *setup_frm;
1507 tp->csr6 &= ~(AllMultiBit | PromiscBit | HashFilterBit);
1508 csr6 &= ~(AllMultiBit | PromiscBit | HashFilterBit);
1509 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1510 tp->csr6 |= PromiscBit;
1515 if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
1516 /* Too many to filter well -- accept all multicasts. */
1517 tp->csr6 |= AllMultiBit;
1518 csr6 |= AllMultiBit;
1522 tx_flags = Tx1WholePkt | Tx1SetupPkt | PKT_SETUP_SZ;
1524 /* Note that only the low-address shortword of setup_frame is valid! */
1525 setup_frm = tp->setup_frame;
1526 mclist = dev->mc_list;
1528 /* Fill the first entry with our physical address. */
1529 eaddrs = (u16 *)dev->dev_addr;
1530 *setup_frm = cpu_to_le16(eaddrs[0]); setup_frm += 2;
1531 *setup_frm = cpu_to_le16(eaddrs[1]); setup_frm += 2;
1532 *setup_frm = cpu_to_le16(eaddrs[2]); setup_frm += 2;
1534 if (dev->mc_count > 14) { /* Must use a multicast hash table. */
1535 u32 *hash_table = (u32 *)(tp->setup_frame + 4 * 12);
1538 tx_flags |= Tx1HashSetup;
1539 tp->csr6 |= HashFilterBit;
1540 csr6 |= HashFilterBit;
1542 /* Fill the unused 3 entries with the broadcast address.
1543 At least one entry *must* contain the broadcast address!!!*/
1544 for (i = 0; i < 3; i++) {
1545 *setup_frm = 0xffff; setup_frm += 2;
1546 *setup_frm = 0xffff; setup_frm += 2;
1547 *setup_frm = 0xffff; setup_frm += 2;
1550 /* Truly brain-damaged hash filter layout */
1551 /* XXX: not sure if I should take the last or the first 9 bits */
1552 for (i = 0; i < dev->mc_count; i++, mclist = mclist->next) {
1554 hash = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
1556 hash2 = hash + ((hash >> 4) << 4) +
1560 hash2 = 64 + hash + (hash >> 4) * 80;
1562 hptr = &hash_table[hash2 & ~0x1f];
1563 *hptr |= cpu_to_le32(1 << (hash2 & 0x1f));
1566 /* We have <= 14 mcast addresses so we can use Xircom's
1567 wonderful 16-address perfect filter. */
1568 for (i = 0; i < dev->mc_count; i++, mclist = mclist->next) {
1569 eaddrs = (u16 *)mclist->dmi_addr;
1570 *setup_frm = cpu_to_le16(eaddrs[0]); setup_frm += 2;
1571 *setup_frm = cpu_to_le16(eaddrs[1]); setup_frm += 2;
1572 *setup_frm = cpu_to_le16(eaddrs[2]); setup_frm += 2;
1574 /* Fill the unused entries with the broadcast address.
1575 At least one entry *must* contain the broadcast address!!!*/
1576 for (; i < 15; i++) {
1577 *setup_frm = 0xffff; setup_frm += 2;
1578 *setup_frm = 0xffff; setup_frm += 2;
1579 *setup_frm = 0xffff; setup_frm += 2;
1583 /* Now add this frame to the Tx list. */
1584 if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
1585 /* Same setup recently queued, we need not add it. */
1586 /* XXX: Huh? All it means is that the Tx list is full...*/
1588 unsigned long flags;
1592 save_flags(flags); cli();
1593 entry = tp->cur_tx++ % TX_RING_SIZE;
1596 /* Avoid a chip errata by prefixing a dummy entry. */
1597 tp->tx_skbuff[entry] = NULL;
1598 tp->tx_ring[entry].length =
1599 (entry == TX_RING_SIZE - 1) ? Tx1RingWrap : 0;
1600 tp->tx_ring[entry].buffer1 = 0;
1601 /* race with chip, set Tx0DescOwned later */
1603 entry = tp->cur_tx++ % TX_RING_SIZE;
1606 tp->tx_skbuff[entry] = NULL;
1607 /* Put the setup frame on the Tx list. */
1608 if (entry == TX_RING_SIZE - 1)
1609 tx_flags |= Tx1RingWrap; /* Wrap ring. */
1610 tp->tx_ring[entry].length = tx_flags;
1611 tp->tx_ring[entry].buffer1 = virt_to_bus(tp->setup_frame);
1612 tp->tx_ring[entry].status = Tx0DescOwned;
1613 if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2) {
1615 netif_stop_queue (dev);
1618 tp->tx_ring[dummy].status = Tx0DescOwned;
1619 restore_flags(flags);
1620 /* Trigger an immediate transmit demand. */
1621 outl(0, ioaddr + CSR1);
1625 outl_CSR6(csr6, ioaddr);
1629 static struct pci_device_id xircom_pci_table[] = {
1630 { 0x115D, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, X3201_3 },
1633 MODULE_DEVICE_TABLE(pci, xircom_pci_table);
1637 static int xircom_suspend(struct pci_dev *pdev, pm_message_t state)
1639 struct net_device *dev = pci_get_drvdata(pdev);
1640 struct xircom_private *tp = netdev_priv(dev);
1641 printk(KERN_INFO "xircom_suspend(%s)\n", dev->name);
1645 pci_save_state(pdev);
1646 pci_disable_device(pdev);
1647 pci_set_power_state(pdev, 3);
1653 static int xircom_resume(struct pci_dev *pdev)
1655 struct net_device *dev = pci_get_drvdata(pdev);
1656 struct xircom_private *tp = netdev_priv(dev);
1657 printk(KERN_INFO "xircom_resume(%s)\n", dev->name);
1659 pci_set_power_state(pdev,0);
1660 pci_enable_device(pdev);
1661 pci_restore_state(pdev);
1663 /* Bring the chip out of sleep mode.
1664 Caution: Snooze mode does not work with some boards! */
1665 if (xircom_tbl[tp->chip_id].flags & HAS_ACPI)
1666 pci_write_config_dword(tp->pdev, PCI_POWERMGMT, 0);
1668 transceiver_voodoo(dev);
1669 if (xircom_tbl[tp->chip_id].flags & HAS_MII)
1676 #endif /* CONFIG_PM */
1679 static void __devexit xircom_remove_one(struct pci_dev *pdev)
1681 struct net_device *dev = pci_get_drvdata(pdev);
1683 printk(KERN_INFO "xircom_remove_one(%s)\n", dev->name);
1684 unregister_netdev(dev);
1685 pci_release_regions(pdev);
1687 pci_set_drvdata(pdev, NULL);
1691 static struct pci_driver xircom_driver = {
1693 .id_table = xircom_pci_table,
1694 .probe = xircom_init_one,
1695 .remove = __devexit_p(xircom_remove_one),
1697 .suspend = xircom_suspend,
1698 .resume = xircom_resume
1699 #endif /* CONFIG_PM */
1703 static int __init xircom_init(void)
1705 /* when a module, this is printed whether or not devices are found in probe */
1709 return pci_register_driver(&xircom_driver);
1713 static void __exit xircom_exit(void)
1715 pci_unregister_driver(&xircom_driver);
1718 module_init(xircom_init)
1719 module_exit(xircom_exit)