1 /* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */
3 Copyright 2001-2004 Jeff Garzik <jgarzik@pobox.com>
5 Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com) [tg3.c]
6 Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com) [sungem.c]
7 Copyright 2001 Manfred Spraul [natsemi.c]
8 Copyright 1999-2001 by Donald Becker. [natsemi.c]
9 Written 1997-2001 by Donald Becker. [8139too.c]
10 Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. [acenic.c]
12 This software may be used and distributed according to the terms of
13 the GNU General Public License (GPL), incorporated herein by reference.
14 Drivers based on or derived from this code fall under the GPL and must
15 retain the authorship, copyright and license notice. This file is not
16 a complete program and may only be used when the entire operating
17 system is licensed under the GPL.
19 See the file COPYING in this distribution for more information.
23 Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
24 PCI suspend/resume - Felipe Damasio <felipewd@terra.com.br>
25 LinkChg interrupt - Felipe Damasio <felipewd@terra.com.br>
28 * Test Tx checksumming thoroughly
29 * Implement dev->tx_timeout
32 * Complete reset on PciErr
33 * Consider Rx interrupt mitigation using TimerIntr
34 * Investigate using skb->priority with h/w VLAN priority
35 * Investigate using High Priority Tx Queue with skb->priority
36 * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
37 * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
38 * Implement Tx software interrupt mitigation via
40 * The real minimum of CP_MIN_MTU is 4 bytes. However,
41 for this to be supported, one must(?) turn on packet padding.
42 * Support external MII transceivers (patch available)
45 * TX checksumming is considered experimental. It is off by
46 default, use ethtool to turn it on.
50 #define DRV_NAME "8139cp"
51 #define DRV_VERSION "1.3"
52 #define DRV_RELDATE "Mar 22, 2004"
55 #include <linux/module.h>
56 #include <linux/moduleparam.h>
57 #include <linux/kernel.h>
58 #include <linux/compiler.h>
59 #include <linux/netdevice.h>
60 #include <linux/etherdevice.h>
61 #include <linux/init.h>
62 #include <linux/pci.h>
63 #include <linux/dma-mapping.h>
64 #include <linux/delay.h>
65 #include <linux/ethtool.h>
66 #include <linux/mii.h>
67 #include <linux/if_vlan.h>
68 #include <linux/crc32.h>
71 #include <linux/tcp.h>
72 #include <linux/udp.h>
73 #include <linux/cache.h>
76 #include <asm/uaccess.h>
78 /* VLAN tagging feature enable/disable */
79 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
80 #define CP_VLAN_TAG_USED 1
81 #define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
82 do { (tx_desc)->opts2 = (vlan_tag_value); } while (0)
84 #define CP_VLAN_TAG_USED 0
85 #define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
86 do { (tx_desc)->opts2 = 0; } while (0)
89 /* These identify the driver base version and may not be removed. */
90 static char version[] =
91 KERN_INFO DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
93 MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
94 MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
95 MODULE_VERSION(DRV_VERSION);
96 MODULE_LICENSE("GPL");
98 static int debug = -1;
99 module_param(debug, int, 0);
100 MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
102 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
103 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
104 static int multicast_filter_limit = 32;
105 module_param(multicast_filter_limit, int, 0);
106 MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
108 #define PFX DRV_NAME ": "
112 #define TRUE (!FALSE)
115 #define CP_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
118 #define CP_NUM_STATS 14 /* struct cp_dma_stats, plus one */
119 #define CP_STATS_SIZE 64 /* size in bytes of DMA stats block */
120 #define CP_REGS_SIZE (0xff + 1)
121 #define CP_REGS_VER 1 /* version 1 */
122 #define CP_RX_RING_SIZE 64
123 #define CP_TX_RING_SIZE 64
124 #define CP_RING_BYTES \
125 ((sizeof(struct cp_desc) * CP_RX_RING_SIZE) + \
126 (sizeof(struct cp_desc) * CP_TX_RING_SIZE) + \
128 #define NEXT_TX(N) (((N) + 1) & (CP_TX_RING_SIZE - 1))
129 #define NEXT_RX(N) (((N) + 1) & (CP_RX_RING_SIZE - 1))
130 #define TX_BUFFS_AVAIL(CP) \
131 (((CP)->tx_tail <= (CP)->tx_head) ? \
132 (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head : \
133 (CP)->tx_tail - (CP)->tx_head - 1)
135 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
137 #define CP_INTERNAL_PHY 32
139 /* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */
140 #define RX_FIFO_THRESH 5 /* Rx buffer level before first PCI xfer. */
141 #define RX_DMA_BURST 4 /* Maximum PCI burst, '4' is 256 */
142 #define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
143 #define TX_EARLY_THRESH 256 /* Early Tx threshold, in bytes */
145 /* Time in jiffies before concluding the transmitter is hung. */
146 #define TX_TIMEOUT (6*HZ)
148 /* hardware minimum and maximum for a single frame's data payload */
149 #define CP_MIN_MTU 60 /* TODO: allow lower, but pad */
150 #define CP_MAX_MTU 4096
153 /* NIC register offsets */
154 MAC0 = 0x00, /* Ethernet hardware address. */
155 MAR0 = 0x08, /* Multicast filter. */
156 StatsAddr = 0x10, /* 64-bit start addr of 64-byte DMA stats blk */
157 TxRingAddr = 0x20, /* 64-bit start addr of Tx ring */
158 HiTxRingAddr = 0x28, /* 64-bit start addr of high priority Tx ring */
159 Cmd = 0x37, /* Command register */
160 IntrMask = 0x3C, /* Interrupt mask */
161 IntrStatus = 0x3E, /* Interrupt status */
162 TxConfig = 0x40, /* Tx configuration */
163 ChipVersion = 0x43, /* 8-bit chip version, inside TxConfig */
164 RxConfig = 0x44, /* Rx configuration */
165 RxMissed = 0x4C, /* 24 bits valid, write clears */
166 Cfg9346 = 0x50, /* EEPROM select/control; Cfg reg [un]lock */
167 Config1 = 0x52, /* Config1 */
168 Config3 = 0x59, /* Config3 */
169 Config4 = 0x5A, /* Config4 */
170 MultiIntr = 0x5C, /* Multiple interrupt select */
171 BasicModeCtrl = 0x62, /* MII BMCR */
172 BasicModeStatus = 0x64, /* MII BMSR */
173 NWayAdvert = 0x66, /* MII ADVERTISE */
174 NWayLPAR = 0x68, /* MII LPA */
175 NWayExpansion = 0x6A, /* MII Expansion */
176 Config5 = 0xD8, /* Config5 */
177 TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */
178 RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */
179 CpCmd = 0xE0, /* C+ Command register (C+ mode only) */
180 IntrMitigate = 0xE2, /* rx/tx interrupt mitigation control */
181 RxRingAddr = 0xE4, /* 64-bit start addr of Rx ring */
182 TxThresh = 0xEC, /* Early Tx threshold */
183 OldRxBufAddr = 0x30, /* DMA address of Rx ring buffer (C mode) */
184 OldTSD0 = 0x10, /* DMA address of first Tx desc (C mode) */
186 /* Tx and Rx status descriptors */
187 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
188 RingEnd = (1 << 30), /* End of descriptor ring */
189 FirstFrag = (1 << 29), /* First segment of a packet */
190 LastFrag = (1 << 28), /* Final segment of a packet */
191 LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */
192 MSSShift = 16, /* MSS value position */
193 MSSMask = 0xfff, /* MSS value: 11 bits */
194 TxError = (1 << 23), /* Tx error summary */
195 RxError = (1 << 20), /* Rx error summary */
196 IPCS = (1 << 18), /* Calculate IP checksum */
197 UDPCS = (1 << 17), /* Calculate UDP/IP checksum */
198 TCPCS = (1 << 16), /* Calculate TCP/IP checksum */
199 TxVlanTag = (1 << 17), /* Add VLAN tag */
200 RxVlanTagged = (1 << 16), /* Rx VLAN tag available */
201 IPFail = (1 << 15), /* IP checksum failed */
202 UDPFail = (1 << 14), /* UDP/IP checksum failed */
203 TCPFail = (1 << 13), /* TCP/IP checksum failed */
204 NormalTxPoll = (1 << 6), /* One or more normal Tx packets to send */
205 PID1 = (1 << 17), /* 2 protocol id bits: 0==non-IP, */
206 PID0 = (1 << 16), /* 1==UDP/IP, 2==TCP/IP, 3==IP */
210 TxFIFOUnder = (1 << 25), /* Tx FIFO underrun */
211 TxOWC = (1 << 22), /* Tx Out-of-window collision */
212 TxLinkFail = (1 << 21), /* Link failed during Tx of packet */
213 TxMaxCol = (1 << 20), /* Tx aborted due to excessive collisions */
214 TxColCntShift = 16, /* Shift, to get 4-bit Tx collision cnt */
215 TxColCntMask = 0x01 | 0x02 | 0x04 | 0x08, /* 4-bit collision count */
216 RxErrFrame = (1 << 27), /* Rx frame alignment error */
217 RxMcast = (1 << 26), /* Rx multicast packet rcv'd */
218 RxErrCRC = (1 << 18), /* Rx CRC error */
219 RxErrRunt = (1 << 19), /* Rx error, packet < 64 bytes */
220 RxErrLong = (1 << 21), /* Rx error, packet > 4096 bytes */
221 RxErrFIFO = (1 << 22), /* Rx error, FIFO overflowed, pkt bad */
223 /* StatsAddr register */
224 DumpStats = (1 << 3), /* Begin stats dump */
226 /* RxConfig register */
227 RxCfgFIFOShift = 13, /* Shift, to get Rx FIFO thresh value */
228 RxCfgDMAShift = 8, /* Shift, to get Rx Max DMA value */
229 AcceptErr = 0x20, /* Accept packets with CRC errors */
230 AcceptRunt = 0x10, /* Accept runt (<64 bytes) packets */
231 AcceptBroadcast = 0x08, /* Accept broadcast packets */
232 AcceptMulticast = 0x04, /* Accept multicast packets */
233 AcceptMyPhys = 0x02, /* Accept pkts with our MAC as dest */
234 AcceptAllPhys = 0x01, /* Accept all pkts w/ physical dest */
236 /* IntrMask / IntrStatus registers */
237 PciErr = (1 << 15), /* System error on the PCI bus */
238 TimerIntr = (1 << 14), /* Asserted when TCTR reaches TimerInt value */
239 LenChg = (1 << 13), /* Cable length change */
240 SWInt = (1 << 8), /* Software-requested interrupt */
241 TxEmpty = (1 << 7), /* No Tx descriptors available */
242 RxFIFOOvr = (1 << 6), /* Rx FIFO Overflow */
243 LinkChg = (1 << 5), /* Packet underrun, or link change */
244 RxEmpty = (1 << 4), /* No Rx descriptors available */
245 TxErr = (1 << 3), /* Tx error */
246 TxOK = (1 << 2), /* Tx packet sent */
247 RxErr = (1 << 1), /* Rx error */
248 RxOK = (1 << 0), /* Rx packet received */
249 IntrResvd = (1 << 10), /* reserved, according to RealTek engineers,
250 but hardware likes to raise it */
252 IntrAll = PciErr | TimerIntr | LenChg | SWInt | TxEmpty |
253 RxFIFOOvr | LinkChg | RxEmpty | TxErr | TxOK |
254 RxErr | RxOK | IntrResvd,
256 /* C mode command register */
257 CmdReset = (1 << 4), /* Enable to reset; self-clearing */
258 RxOn = (1 << 3), /* Rx mode enable */
259 TxOn = (1 << 2), /* Tx mode enable */
261 /* C+ mode command register */
262 RxVlanOn = (1 << 6), /* Rx VLAN de-tagging enable */
263 RxChkSum = (1 << 5), /* Rx checksum offload enable */
264 PCIDAC = (1 << 4), /* PCI Dual Address Cycle (64-bit PCI) */
265 PCIMulRW = (1 << 3), /* Enable PCI read/write multiple */
266 CpRxOn = (1 << 1), /* Rx mode enable */
267 CpTxOn = (1 << 0), /* Tx mode enable */
269 /* Cfg9436 EEPROM control register */
270 Cfg9346_Lock = 0x00, /* Lock ConfigX/MII register access */
271 Cfg9346_Unlock = 0xC0, /* Unlock ConfigX/MII register access */
273 /* TxConfig register */
274 IFG = (1 << 25) | (1 << 24), /* standard IEEE interframe gap */
275 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
277 /* Early Tx Threshold register */
278 TxThreshMask = 0x3f, /* Mask bits 5-0 */
279 TxThreshMax = 2048, /* Max early Tx threshold */
281 /* Config1 register */
282 DriverLoaded = (1 << 5), /* Software marker, driver is loaded */
283 LWACT = (1 << 4), /* LWAKE active mode */
284 PMEnable = (1 << 0), /* Enable various PM features of chip */
286 /* Config3 register */
287 PARMEnable = (1 << 6), /* Enable auto-loading of PHY parms */
288 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
289 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
291 /* Config4 register */
292 LWPTN = (1 << 1), /* LWAKE Pattern */
293 LWPME = (1 << 4), /* LANWAKE vs PMEB */
295 /* Config5 register */
296 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
297 MWF = (1 << 5), /* Accept Multicast wakeup frame */
298 UWF = (1 << 4), /* Accept Unicast wakeup frame */
299 LANWake = (1 << 1), /* Enable LANWake signal */
300 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
302 cp_norx_intr_mask = PciErr | LinkChg | TxOK | TxErr | TxEmpty,
303 cp_rx_intr_mask = RxOK | RxErr | RxEmpty | RxFIFOOvr,
304 cp_intr_mask = cp_rx_intr_mask | cp_norx_intr_mask,
307 static const unsigned int cp_rx_config =
308 (RX_FIFO_THRESH << RxCfgFIFOShift) |
309 (RX_DMA_BURST << RxCfgDMAShift);
317 struct cp_dma_stats {
331 } __attribute__((packed));
333 struct cp_extra_stats {
334 unsigned long rx_frags;
339 struct net_device *dev;
343 struct pci_dev *pdev;
347 struct net_device_stats net_stats;
348 struct cp_extra_stats cp_stats;
350 unsigned rx_head ____cacheline_aligned;
352 struct cp_desc *rx_ring;
353 struct sk_buff *rx_skb[CP_RX_RING_SIZE];
355 unsigned tx_head ____cacheline_aligned;
357 struct cp_desc *tx_ring;
358 struct sk_buff *tx_skb[CP_TX_RING_SIZE];
361 unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */
364 struct vlan_group *vlgrp;
368 struct mii_if_info mii_if;
371 #define cpr8(reg) readb(cp->regs + (reg))
372 #define cpr16(reg) readw(cp->regs + (reg))
373 #define cpr32(reg) readl(cp->regs + (reg))
374 #define cpw8(reg,val) writeb((val), cp->regs + (reg))
375 #define cpw16(reg,val) writew((val), cp->regs + (reg))
376 #define cpw32(reg,val) writel((val), cp->regs + (reg))
377 #define cpw8_f(reg,val) do { \
378 writeb((val), cp->regs + (reg)); \
379 readb(cp->regs + (reg)); \
381 #define cpw16_f(reg,val) do { \
382 writew((val), cp->regs + (reg)); \
383 readw(cp->regs + (reg)); \
385 #define cpw32_f(reg,val) do { \
386 writel((val), cp->regs + (reg)); \
387 readl(cp->regs + (reg)); \
391 static void __cp_set_rx_mode (struct net_device *dev);
392 static void cp_tx (struct cp_private *cp);
393 static void cp_clean_rings (struct cp_private *cp);
394 #ifdef CONFIG_NET_POLL_CONTROLLER
395 static void cp_poll_controller(struct net_device *dev);
397 static int cp_get_eeprom_len(struct net_device *dev);
398 static int cp_get_eeprom(struct net_device *dev,
399 struct ethtool_eeprom *eeprom, u8 *data);
400 static int cp_set_eeprom(struct net_device *dev,
401 struct ethtool_eeprom *eeprom, u8 *data);
403 static struct pci_device_id cp_pci_tbl[] = {
404 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139), },
405 { PCI_DEVICE(PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322), },
408 MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
411 const char str[ETH_GSTRING_LEN];
412 } ethtool_stats_keys[] = {
431 static void cp_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
433 struct cp_private *cp = netdev_priv(dev);
436 spin_lock_irqsave(&cp->lock, flags);
438 cp->cpcmd |= RxVlanOn;
439 cpw16(CpCmd, cp->cpcmd);
440 spin_unlock_irqrestore(&cp->lock, flags);
443 static void cp_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
445 struct cp_private *cp = netdev_priv(dev);
448 spin_lock_irqsave(&cp->lock, flags);
449 cp->cpcmd &= ~RxVlanOn;
450 cpw16(CpCmd, cp->cpcmd);
452 cp->vlgrp->vlan_devices[vid] = NULL;
453 spin_unlock_irqrestore(&cp->lock, flags);
455 #endif /* CP_VLAN_TAG_USED */
457 static inline void cp_set_rxbufsize (struct cp_private *cp)
459 unsigned int mtu = cp->dev->mtu;
461 if (mtu > ETH_DATA_LEN)
462 /* MTU + ethernet header + FCS + optional VLAN tag */
463 cp->rx_buf_sz = mtu + ETH_HLEN + 8;
465 cp->rx_buf_sz = PKT_BUF_SZ;
468 static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
469 struct cp_desc *desc)
471 skb->protocol = eth_type_trans (skb, cp->dev);
473 cp->net_stats.rx_packets++;
474 cp->net_stats.rx_bytes += skb->len;
475 cp->dev->last_rx = jiffies;
478 if (cp->vlgrp && (desc->opts2 & RxVlanTagged)) {
479 vlan_hwaccel_receive_skb(skb, cp->vlgrp,
480 be16_to_cpu(desc->opts2 & 0xffff));
483 netif_receive_skb(skb);
486 static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
489 if (netif_msg_rx_err (cp))
491 "%s: rx err, slot %d status 0x%x len %d\n",
492 cp->dev->name, rx_tail, status, len);
493 cp->net_stats.rx_errors++;
494 if (status & RxErrFrame)
495 cp->net_stats.rx_frame_errors++;
496 if (status & RxErrCRC)
497 cp->net_stats.rx_crc_errors++;
498 if ((status & RxErrRunt) || (status & RxErrLong))
499 cp->net_stats.rx_length_errors++;
500 if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag))
501 cp->net_stats.rx_length_errors++;
502 if (status & RxErrFIFO)
503 cp->net_stats.rx_fifo_errors++;
506 static inline unsigned int cp_rx_csum_ok (u32 status)
508 unsigned int protocol = (status >> 16) & 0x3;
510 if (likely((protocol == RxProtoTCP) && (!(status & TCPFail))))
512 else if ((protocol == RxProtoUDP) && (!(status & UDPFail)))
514 else if ((protocol == RxProtoIP) && (!(status & IPFail)))
519 static int cp_rx_poll (struct net_device *dev, int *budget)
521 struct cp_private *cp = netdev_priv(dev);
522 unsigned rx_tail = cp->rx_tail;
523 unsigned rx_work = dev->quota;
528 cpw16(IntrStatus, cp_rx_intr_mask);
533 struct sk_buff *skb, *new_skb;
534 struct cp_desc *desc;
537 skb = cp->rx_skb[rx_tail];
540 desc = &cp->rx_ring[rx_tail];
541 status = le32_to_cpu(desc->opts1);
542 if (status & DescOwn)
545 len = (status & 0x1fff) - 4;
546 mapping = le64_to_cpu(desc->addr);
548 if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
549 /* we don't support incoming fragmented frames.
550 * instead, we attempt to ensure that the
551 * pre-allocated RX skbs are properly sized such
552 * that RX fragments are never encountered
554 cp_rx_err_acct(cp, rx_tail, status, len);
555 cp->net_stats.rx_dropped++;
556 cp->cp_stats.rx_frags++;
560 if (status & (RxError | RxErrFIFO)) {
561 cp_rx_err_acct(cp, rx_tail, status, len);
565 if (netif_msg_rx_status(cp))
566 printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d\n",
567 dev->name, rx_tail, status, len);
569 buflen = cp->rx_buf_sz + RX_OFFSET;
570 new_skb = dev_alloc_skb (buflen);
572 cp->net_stats.rx_dropped++;
576 skb_reserve(new_skb, RX_OFFSET);
579 pci_unmap_single(cp->pdev, mapping,
580 buflen, PCI_DMA_FROMDEVICE);
582 /* Handle checksum offloading for incoming packets. */
583 if (cp_rx_csum_ok(status))
584 skb->ip_summed = CHECKSUM_UNNECESSARY;
586 skb->ip_summed = CHECKSUM_NONE;
590 mapping = pci_map_single(cp->pdev, new_skb->data, buflen,
592 cp->rx_skb[rx_tail] = new_skb;
594 cp_rx_skb(cp, skb, desc);
598 cp->rx_ring[rx_tail].opts2 = 0;
599 cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping);
600 if (rx_tail == (CP_RX_RING_SIZE - 1))
601 desc->opts1 = cpu_to_le32(DescOwn | RingEnd |
604 desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
605 rx_tail = NEXT_RX(rx_tail);
611 cp->rx_tail = rx_tail;
616 /* if we did not reach work limit, then we're done with
617 * this round of polling
622 if (cpr16(IntrStatus) & cp_rx_intr_mask)
625 local_irq_save(flags);
626 cpw16_f(IntrMask, cp_intr_mask);
627 __netif_rx_complete(dev);
628 local_irq_restore(flags);
633 return 1; /* not done */
636 static irqreturn_t cp_interrupt (int irq, void *dev_instance)
638 struct net_device *dev = dev_instance;
639 struct cp_private *cp;
642 if (unlikely(dev == NULL))
644 cp = netdev_priv(dev);
646 status = cpr16(IntrStatus);
647 if (!status || (status == 0xFFFF))
650 if (netif_msg_intr(cp))
651 printk(KERN_DEBUG "%s: intr, status %04x cmd %02x cpcmd %04x\n",
652 dev->name, status, cpr8(Cmd), cpr16(CpCmd));
654 cpw16(IntrStatus, status & ~cp_rx_intr_mask);
656 spin_lock(&cp->lock);
658 /* close possible race's with dev_close */
659 if (unlikely(!netif_running(dev))) {
661 spin_unlock(&cp->lock);
665 if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
666 if (netif_rx_schedule_prep(dev)) {
667 cpw16_f(IntrMask, cp_norx_intr_mask);
668 __netif_rx_schedule(dev);
671 if (status & (TxOK | TxErr | TxEmpty | SWInt))
673 if (status & LinkChg)
674 mii_check_media(&cp->mii_if, netif_msg_link(cp), FALSE);
676 spin_unlock(&cp->lock);
678 if (status & PciErr) {
681 pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
682 pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
683 printk(KERN_ERR "%s: PCI bus error, status=%04x, PCI status=%04x\n",
684 dev->name, status, pci_status);
686 /* TODO: reset hardware */
692 #ifdef CONFIG_NET_POLL_CONTROLLER
694 * Polling receive - used by netconsole and other diagnostic tools
695 * to allow network i/o with interrupts disabled.
697 static void cp_poll_controller(struct net_device *dev)
699 disable_irq(dev->irq);
700 cp_interrupt(dev->irq, dev);
701 enable_irq(dev->irq);
705 static void cp_tx (struct cp_private *cp)
707 unsigned tx_head = cp->tx_head;
708 unsigned tx_tail = cp->tx_tail;
710 while (tx_tail != tx_head) {
711 struct cp_desc *txd = cp->tx_ring + tx_tail;
716 status = le32_to_cpu(txd->opts1);
717 if (status & DescOwn)
720 skb = cp->tx_skb[tx_tail];
723 pci_unmap_single(cp->pdev, le64_to_cpu(txd->addr),
724 le32_to_cpu(txd->opts1) & 0xffff,
727 if (status & LastFrag) {
728 if (status & (TxError | TxFIFOUnder)) {
729 if (netif_msg_tx_err(cp))
730 printk(KERN_DEBUG "%s: tx err, status 0x%x\n",
731 cp->dev->name, status);
732 cp->net_stats.tx_errors++;
734 cp->net_stats.tx_window_errors++;
735 if (status & TxMaxCol)
736 cp->net_stats.tx_aborted_errors++;
737 if (status & TxLinkFail)
738 cp->net_stats.tx_carrier_errors++;
739 if (status & TxFIFOUnder)
740 cp->net_stats.tx_fifo_errors++;
742 cp->net_stats.collisions +=
743 ((status >> TxColCntShift) & TxColCntMask);
744 cp->net_stats.tx_packets++;
745 cp->net_stats.tx_bytes += skb->len;
746 if (netif_msg_tx_done(cp))
747 printk(KERN_DEBUG "%s: tx done, slot %d\n", cp->dev->name, tx_tail);
749 dev_kfree_skb_irq(skb);
752 cp->tx_skb[tx_tail] = NULL;
754 tx_tail = NEXT_TX(tx_tail);
757 cp->tx_tail = tx_tail;
759 if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
760 netif_wake_queue(cp->dev);
763 static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
765 struct cp_private *cp = netdev_priv(dev);
773 spin_lock_irq(&cp->lock);
775 /* This is a hard error, log it. */
776 if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
777 netif_stop_queue(dev);
778 spin_unlock_irq(&cp->lock);
779 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
785 if (cp->vlgrp && vlan_tx_tag_present(skb))
786 vlan_tag = TxVlanTag | cpu_to_be16(vlan_tx_tag_get(skb));
790 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
791 if (dev->features & NETIF_F_TSO)
792 mss = skb_shinfo(skb)->gso_size;
794 if (skb_shinfo(skb)->nr_frags == 0) {
795 struct cp_desc *txd = &cp->tx_ring[entry];
800 mapping = pci_map_single(cp->pdev, skb->data, len, PCI_DMA_TODEVICE);
801 CP_VLAN_TX_TAG(txd, vlan_tag);
802 txd->addr = cpu_to_le64(mapping);
805 flags = eor | len | DescOwn | FirstFrag | LastFrag;
808 flags |= LargeSend | ((mss & MSSMask) << MSSShift);
809 else if (skb->ip_summed == CHECKSUM_PARTIAL) {
810 const struct iphdr *ip = skb->nh.iph;
811 if (ip->protocol == IPPROTO_TCP)
812 flags |= IPCS | TCPCS;
813 else if (ip->protocol == IPPROTO_UDP)
814 flags |= IPCS | UDPCS;
816 WARN_ON(1); /* we need a WARN() */
819 txd->opts1 = cpu_to_le32(flags);
822 cp->tx_skb[entry] = skb;
823 entry = NEXT_TX(entry);
826 u32 first_len, first_eor;
827 dma_addr_t first_mapping;
828 int frag, first_entry = entry;
829 const struct iphdr *ip = skb->nh.iph;
831 /* We must give this initial chunk to the device last.
832 * Otherwise we could race with the device.
835 first_len = skb_headlen(skb);
836 first_mapping = pci_map_single(cp->pdev, skb->data,
837 first_len, PCI_DMA_TODEVICE);
838 cp->tx_skb[entry] = skb;
839 entry = NEXT_TX(entry);
841 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
842 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
847 len = this_frag->size;
848 mapping = pci_map_single(cp->pdev,
849 ((void *) page_address(this_frag->page) +
850 this_frag->page_offset),
851 len, PCI_DMA_TODEVICE);
852 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
854 ctrl = eor | len | DescOwn;
858 ((mss & MSSMask) << MSSShift);
859 else if (skb->ip_summed == CHECKSUM_PARTIAL) {
860 if (ip->protocol == IPPROTO_TCP)
861 ctrl |= IPCS | TCPCS;
862 else if (ip->protocol == IPPROTO_UDP)
863 ctrl |= IPCS | UDPCS;
868 if (frag == skb_shinfo(skb)->nr_frags - 1)
871 txd = &cp->tx_ring[entry];
872 CP_VLAN_TX_TAG(txd, vlan_tag);
873 txd->addr = cpu_to_le64(mapping);
876 txd->opts1 = cpu_to_le32(ctrl);
879 cp->tx_skb[entry] = skb;
880 entry = NEXT_TX(entry);
883 txd = &cp->tx_ring[first_entry];
884 CP_VLAN_TX_TAG(txd, vlan_tag);
885 txd->addr = cpu_to_le64(first_mapping);
888 if (skb->ip_summed == CHECKSUM_PARTIAL) {
889 if (ip->protocol == IPPROTO_TCP)
890 txd->opts1 = cpu_to_le32(first_eor | first_len |
891 FirstFrag | DescOwn |
893 else if (ip->protocol == IPPROTO_UDP)
894 txd->opts1 = cpu_to_le32(first_eor | first_len |
895 FirstFrag | DescOwn |
900 txd->opts1 = cpu_to_le32(first_eor | first_len |
901 FirstFrag | DescOwn);
905 if (netif_msg_tx_queued(cp))
906 printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
907 dev->name, entry, skb->len);
908 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
909 netif_stop_queue(dev);
911 spin_unlock_irq(&cp->lock);
913 cpw8(TxPoll, NormalTxPoll);
914 dev->trans_start = jiffies;
919 /* Set or clear the multicast filter for this adaptor.
920 This routine is not state sensitive and need not be SMP locked. */
922 static void __cp_set_rx_mode (struct net_device *dev)
924 struct cp_private *cp = netdev_priv(dev);
925 u32 mc_filter[2]; /* Multicast hash filter */
929 /* Note: do not reorder, GCC is clever about common statements. */
930 if (dev->flags & IFF_PROMISC) {
931 /* Unconditionally log net taps. */
933 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
935 mc_filter[1] = mc_filter[0] = 0xffffffff;
936 } else if ((dev->mc_count > multicast_filter_limit)
937 || (dev->flags & IFF_ALLMULTI)) {
938 /* Too many to filter perfectly -- accept all multicasts. */
939 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
940 mc_filter[1] = mc_filter[0] = 0xffffffff;
942 struct dev_mc_list *mclist;
943 rx_mode = AcceptBroadcast | AcceptMyPhys;
944 mc_filter[1] = mc_filter[0] = 0;
945 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
946 i++, mclist = mclist->next) {
947 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
949 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
950 rx_mode |= AcceptMulticast;
954 /* We can safely update without stopping the chip. */
955 tmp = cp_rx_config | rx_mode;
956 if (cp->rx_config != tmp) {
957 cpw32_f (RxConfig, tmp);
960 cpw32_f (MAR0 + 0, mc_filter[0]);
961 cpw32_f (MAR0 + 4, mc_filter[1]);
964 static void cp_set_rx_mode (struct net_device *dev)
967 struct cp_private *cp = netdev_priv(dev);
969 spin_lock_irqsave (&cp->lock, flags);
970 __cp_set_rx_mode(dev);
971 spin_unlock_irqrestore (&cp->lock, flags);
974 static void __cp_get_stats(struct cp_private *cp)
976 /* only lower 24 bits valid; write any value to clear */
977 cp->net_stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
981 static struct net_device_stats *cp_get_stats(struct net_device *dev)
983 struct cp_private *cp = netdev_priv(dev);
986 /* The chip only need report frame silently dropped. */
987 spin_lock_irqsave(&cp->lock, flags);
988 if (netif_running(dev) && netif_device_present(dev))
990 spin_unlock_irqrestore(&cp->lock, flags);
992 return &cp->net_stats;
995 static void cp_stop_hw (struct cp_private *cp)
997 cpw16(IntrStatus, ~(cpr16(IntrStatus)));
998 cpw16_f(IntrMask, 0);
1001 cpw16_f(IntrStatus, ~(cpr16(IntrStatus)));
1004 cp->tx_head = cp->tx_tail = 0;
1007 static void cp_reset_hw (struct cp_private *cp)
1009 unsigned work = 1000;
1011 cpw8(Cmd, CmdReset);
1014 if (!(cpr8(Cmd) & CmdReset))
1017 schedule_timeout_uninterruptible(10);
1020 printk(KERN_ERR "%s: hardware reset timeout\n", cp->dev->name);
1023 static inline void cp_start_hw (struct cp_private *cp)
1025 cpw16(CpCmd, cp->cpcmd);
1026 cpw8(Cmd, RxOn | TxOn);
1029 static void cp_init_hw (struct cp_private *cp)
1031 struct net_device *dev = cp->dev;
1032 dma_addr_t ring_dma;
1036 cpw8_f (Cfg9346, Cfg9346_Unlock);
1038 /* Restore our idea of the MAC address. */
1039 cpw32_f (MAC0 + 0, cpu_to_le32 (*(u32 *) (dev->dev_addr + 0)));
1040 cpw32_f (MAC0 + 4, cpu_to_le32 (*(u32 *) (dev->dev_addr + 4)));
1043 cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
1045 __cp_set_rx_mode(dev);
1046 cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift));
1048 cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable);
1049 /* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */
1050 cpw8(Config3, PARMEnable);
1051 cp->wol_enabled = 0;
1053 cpw8(Config5, cpr8(Config5) & PMEStatus);
1055 cpw32_f(HiTxRingAddr, 0);
1056 cpw32_f(HiTxRingAddr + 4, 0);
1058 ring_dma = cp->ring_dma;
1059 cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
1060 cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
1062 ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
1063 cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
1064 cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
1066 cpw16(MultiIntr, 0);
1068 cpw16_f(IntrMask, cp_intr_mask);
1070 cpw8_f(Cfg9346, Cfg9346_Lock);
1073 static int cp_refill_rx (struct cp_private *cp)
1077 for (i = 0; i < CP_RX_RING_SIZE; i++) {
1078 struct sk_buff *skb;
1081 skb = dev_alloc_skb(cp->rx_buf_sz + RX_OFFSET);
1086 skb_reserve(skb, RX_OFFSET);
1088 mapping = pci_map_single(cp->pdev, skb->data, cp->rx_buf_sz,
1089 PCI_DMA_FROMDEVICE);
1090 cp->rx_skb[i] = skb;
1092 cp->rx_ring[i].opts2 = 0;
1093 cp->rx_ring[i].addr = cpu_to_le64(mapping);
1094 if (i == (CP_RX_RING_SIZE - 1))
1095 cp->rx_ring[i].opts1 =
1096 cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);
1098 cp->rx_ring[i].opts1 =
1099 cpu_to_le32(DescOwn | cp->rx_buf_sz);
1109 static void cp_init_rings_index (struct cp_private *cp)
1112 cp->tx_head = cp->tx_tail = 0;
1115 static int cp_init_rings (struct cp_private *cp)
1117 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1118 cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
1120 cp_init_rings_index(cp);
1122 return cp_refill_rx (cp);
1125 static int cp_alloc_rings (struct cp_private *cp)
1129 mem = pci_alloc_consistent(cp->pdev, CP_RING_BYTES, &cp->ring_dma);
1134 cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
1136 return cp_init_rings(cp);
1139 static void cp_clean_rings (struct cp_private *cp)
1141 struct cp_desc *desc;
1144 for (i = 0; i < CP_RX_RING_SIZE; i++) {
1145 if (cp->rx_skb[i]) {
1146 desc = cp->rx_ring + i;
1147 pci_unmap_single(cp->pdev, le64_to_cpu(desc->addr),
1148 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1149 dev_kfree_skb(cp->rx_skb[i]);
1153 for (i = 0; i < CP_TX_RING_SIZE; i++) {
1154 if (cp->tx_skb[i]) {
1155 struct sk_buff *skb = cp->tx_skb[i];
1157 desc = cp->tx_ring + i;
1158 pci_unmap_single(cp->pdev, le64_to_cpu(desc->addr),
1159 le32_to_cpu(desc->opts1) & 0xffff,
1161 if (le32_to_cpu(desc->opts1) & LastFrag)
1163 cp->net_stats.tx_dropped++;
1167 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1168 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1170 memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
1171 memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
1174 static void cp_free_rings (struct cp_private *cp)
1177 pci_free_consistent(cp->pdev, CP_RING_BYTES, cp->rx_ring, cp->ring_dma);
1182 static int cp_open (struct net_device *dev)
1184 struct cp_private *cp = netdev_priv(dev);
1187 if (netif_msg_ifup(cp))
1188 printk(KERN_DEBUG "%s: enabling interface\n", dev->name);
1190 rc = cp_alloc_rings(cp);
1196 rc = request_irq(dev->irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
1200 netif_carrier_off(dev);
1201 mii_check_media(&cp->mii_if, netif_msg_link(cp), TRUE);
1202 netif_start_queue(dev);
1212 static int cp_close (struct net_device *dev)
1214 struct cp_private *cp = netdev_priv(dev);
1215 unsigned long flags;
1217 if (netif_msg_ifdown(cp))
1218 printk(KERN_DEBUG "%s: disabling interface\n", dev->name);
1220 spin_lock_irqsave(&cp->lock, flags);
1222 netif_stop_queue(dev);
1223 netif_carrier_off(dev);
1227 spin_unlock_irqrestore(&cp->lock, flags);
1229 synchronize_irq(dev->irq);
1230 free_irq(dev->irq, dev);
1237 static int cp_change_mtu(struct net_device *dev, int new_mtu)
1239 struct cp_private *cp = netdev_priv(dev);
1241 unsigned long flags;
1243 /* check for invalid MTU, according to hardware limits */
1244 if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU)
1247 /* if network interface not up, no need for complexity */
1248 if (!netif_running(dev)) {
1250 cp_set_rxbufsize(cp); /* set new rx buf size */
1254 spin_lock_irqsave(&cp->lock, flags);
1256 cp_stop_hw(cp); /* stop h/w and free rings */
1260 cp_set_rxbufsize(cp); /* set new rx buf size */
1262 rc = cp_init_rings(cp); /* realloc and restart h/w */
1265 spin_unlock_irqrestore(&cp->lock, flags);
1271 static const char mii_2_8139_map[8] = {
1282 static int mdio_read(struct net_device *dev, int phy_id, int location)
1284 struct cp_private *cp = netdev_priv(dev);
1286 return location < 8 && mii_2_8139_map[location] ?
1287 readw(cp->regs + mii_2_8139_map[location]) : 0;
1291 static void mdio_write(struct net_device *dev, int phy_id, int location,
1294 struct cp_private *cp = netdev_priv(dev);
1296 if (location == 0) {
1297 cpw8(Cfg9346, Cfg9346_Unlock);
1298 cpw16(BasicModeCtrl, value);
1299 cpw8(Cfg9346, Cfg9346_Lock);
1300 } else if (location < 8 && mii_2_8139_map[location])
1301 cpw16(mii_2_8139_map[location], value);
1304 /* Set the ethtool Wake-on-LAN settings */
1305 static int netdev_set_wol (struct cp_private *cp,
1306 const struct ethtool_wolinfo *wol)
1310 options = cpr8 (Config3) & ~(LinkUp | MagicPacket);
1311 /* If WOL is being disabled, no need for complexity */
1313 if (wol->wolopts & WAKE_PHY) options |= LinkUp;
1314 if (wol->wolopts & WAKE_MAGIC) options |= MagicPacket;
1317 cpw8 (Cfg9346, Cfg9346_Unlock);
1318 cpw8 (Config3, options);
1319 cpw8 (Cfg9346, Cfg9346_Lock);
1321 options = 0; /* Paranoia setting */
1322 options = cpr8 (Config5) & ~(UWF | MWF | BWF);
1323 /* If WOL is being disabled, no need for complexity */
1325 if (wol->wolopts & WAKE_UCAST) options |= UWF;
1326 if (wol->wolopts & WAKE_BCAST) options |= BWF;
1327 if (wol->wolopts & WAKE_MCAST) options |= MWF;
1330 cpw8 (Config5, options);
1332 cp->wol_enabled = (wol->wolopts) ? 1 : 0;
1337 /* Get the ethtool Wake-on-LAN settings */
1338 static void netdev_get_wol (struct cp_private *cp,
1339 struct ethtool_wolinfo *wol)
1343 wol->wolopts = 0; /* Start from scratch */
1344 wol->supported = WAKE_PHY | WAKE_BCAST | WAKE_MAGIC |
1345 WAKE_MCAST | WAKE_UCAST;
1346 /* We don't need to go on if WOL is disabled */
1347 if (!cp->wol_enabled) return;
1349 options = cpr8 (Config3);
1350 if (options & LinkUp) wol->wolopts |= WAKE_PHY;
1351 if (options & MagicPacket) wol->wolopts |= WAKE_MAGIC;
1353 options = 0; /* Paranoia setting */
1354 options = cpr8 (Config5);
1355 if (options & UWF) wol->wolopts |= WAKE_UCAST;
1356 if (options & BWF) wol->wolopts |= WAKE_BCAST;
1357 if (options & MWF) wol->wolopts |= WAKE_MCAST;
1360 static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1362 struct cp_private *cp = netdev_priv(dev);
1364 strcpy (info->driver, DRV_NAME);
1365 strcpy (info->version, DRV_VERSION);
1366 strcpy (info->bus_info, pci_name(cp->pdev));
1369 static int cp_get_regs_len(struct net_device *dev)
1371 return CP_REGS_SIZE;
1374 static int cp_get_stats_count (struct net_device *dev)
1376 return CP_NUM_STATS;
1379 static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1381 struct cp_private *cp = netdev_priv(dev);
1383 unsigned long flags;
1385 spin_lock_irqsave(&cp->lock, flags);
1386 rc = mii_ethtool_gset(&cp->mii_if, cmd);
1387 spin_unlock_irqrestore(&cp->lock, flags);
1392 static int cp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1394 struct cp_private *cp = netdev_priv(dev);
1396 unsigned long flags;
1398 spin_lock_irqsave(&cp->lock, flags);
1399 rc = mii_ethtool_sset(&cp->mii_if, cmd);
1400 spin_unlock_irqrestore(&cp->lock, flags);
1405 static int cp_nway_reset(struct net_device *dev)
1407 struct cp_private *cp = netdev_priv(dev);
1408 return mii_nway_restart(&cp->mii_if);
1411 static u32 cp_get_msglevel(struct net_device *dev)
1413 struct cp_private *cp = netdev_priv(dev);
1414 return cp->msg_enable;
1417 static void cp_set_msglevel(struct net_device *dev, u32 value)
1419 struct cp_private *cp = netdev_priv(dev);
1420 cp->msg_enable = value;
1423 static u32 cp_get_rx_csum(struct net_device *dev)
1425 struct cp_private *cp = netdev_priv(dev);
1426 return (cpr16(CpCmd) & RxChkSum) ? 1 : 0;
1429 static int cp_set_rx_csum(struct net_device *dev, u32 data)
1431 struct cp_private *cp = netdev_priv(dev);
1432 u16 cmd = cp->cpcmd, newcmd;
1439 newcmd &= ~RxChkSum;
1441 if (newcmd != cmd) {
1442 unsigned long flags;
1444 spin_lock_irqsave(&cp->lock, flags);
1446 cpw16_f(CpCmd, newcmd);
1447 spin_unlock_irqrestore(&cp->lock, flags);
1453 static void cp_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1456 struct cp_private *cp = netdev_priv(dev);
1457 unsigned long flags;
1459 if (regs->len < CP_REGS_SIZE)
1460 return /* -EINVAL */;
1462 regs->version = CP_REGS_VER;
1464 spin_lock_irqsave(&cp->lock, flags);
1465 memcpy_fromio(p, cp->regs, CP_REGS_SIZE);
1466 spin_unlock_irqrestore(&cp->lock, flags);
1469 static void cp_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1471 struct cp_private *cp = netdev_priv(dev);
1472 unsigned long flags;
1474 spin_lock_irqsave (&cp->lock, flags);
1475 netdev_get_wol (cp, wol);
1476 spin_unlock_irqrestore (&cp->lock, flags);
1479 static int cp_set_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1481 struct cp_private *cp = netdev_priv(dev);
1482 unsigned long flags;
1485 spin_lock_irqsave (&cp->lock, flags);
1486 rc = netdev_set_wol (cp, wol);
1487 spin_unlock_irqrestore (&cp->lock, flags);
1492 static void cp_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
1494 switch (stringset) {
1496 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
1504 static void cp_get_ethtool_stats (struct net_device *dev,
1505 struct ethtool_stats *estats, u64 *tmp_stats)
1507 struct cp_private *cp = netdev_priv(dev);
1508 struct cp_dma_stats *nic_stats;
1512 nic_stats = pci_alloc_consistent(cp->pdev, sizeof(*nic_stats), &dma);
1516 /* begin NIC statistics dump */
1517 cpw32(StatsAddr + 4, (u64)dma >> 32);
1518 cpw32(StatsAddr, ((u64)dma & DMA_32BIT_MASK) | DumpStats);
1521 for (i = 0; i < 1000; i++) {
1522 if ((cpr32(StatsAddr) & DumpStats) == 0)
1526 cpw32(StatsAddr, 0);
1527 cpw32(StatsAddr + 4, 0);
1531 tmp_stats[i++] = le64_to_cpu(nic_stats->tx_ok);
1532 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok);
1533 tmp_stats[i++] = le64_to_cpu(nic_stats->tx_err);
1534 tmp_stats[i++] = le32_to_cpu(nic_stats->rx_err);
1535 tmp_stats[i++] = le16_to_cpu(nic_stats->rx_fifo);
1536 tmp_stats[i++] = le16_to_cpu(nic_stats->frame_align);
1537 tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_1col);
1538 tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_mcol);
1539 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_phys);
1540 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_bcast);
1541 tmp_stats[i++] = le32_to_cpu(nic_stats->rx_ok_mcast);
1542 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
1543 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
1544 tmp_stats[i++] = cp->cp_stats.rx_frags;
1545 BUG_ON(i != CP_NUM_STATS);
1547 pci_free_consistent(cp->pdev, sizeof(*nic_stats), nic_stats, dma);
1550 static const struct ethtool_ops cp_ethtool_ops = {
1551 .get_drvinfo = cp_get_drvinfo,
1552 .get_regs_len = cp_get_regs_len,
1553 .get_stats_count = cp_get_stats_count,
1554 .get_settings = cp_get_settings,
1555 .set_settings = cp_set_settings,
1556 .nway_reset = cp_nway_reset,
1557 .get_link = ethtool_op_get_link,
1558 .get_msglevel = cp_get_msglevel,
1559 .set_msglevel = cp_set_msglevel,
1560 .get_rx_csum = cp_get_rx_csum,
1561 .set_rx_csum = cp_set_rx_csum,
1562 .get_tx_csum = ethtool_op_get_tx_csum,
1563 .set_tx_csum = ethtool_op_set_tx_csum, /* local! */
1564 .get_sg = ethtool_op_get_sg,
1565 .set_sg = ethtool_op_set_sg,
1566 .get_tso = ethtool_op_get_tso,
1567 .set_tso = ethtool_op_set_tso,
1568 .get_regs = cp_get_regs,
1569 .get_wol = cp_get_wol,
1570 .set_wol = cp_set_wol,
1571 .get_strings = cp_get_strings,
1572 .get_ethtool_stats = cp_get_ethtool_stats,
1573 .get_perm_addr = ethtool_op_get_perm_addr,
1574 .get_eeprom_len = cp_get_eeprom_len,
1575 .get_eeprom = cp_get_eeprom,
1576 .set_eeprom = cp_set_eeprom,
1579 static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1581 struct cp_private *cp = netdev_priv(dev);
1583 unsigned long flags;
1585 if (!netif_running(dev))
1588 spin_lock_irqsave(&cp->lock, flags);
1589 rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL);
1590 spin_unlock_irqrestore(&cp->lock, flags);
1594 /* Serial EEPROM section. */
1596 /* EEPROM_Ctrl bits. */
1597 #define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
1598 #define EE_CS 0x08 /* EEPROM chip select. */
1599 #define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */
1600 #define EE_WRITE_0 0x00
1601 #define EE_WRITE_1 0x02
1602 #define EE_DATA_READ 0x01 /* EEPROM chip data out. */
1603 #define EE_ENB (0x80 | EE_CS)
1605 /* Delay between EEPROM clock transitions.
1606 No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
1609 #define eeprom_delay() readl(ee_addr)
1611 /* The EEPROM commands include the alway-set leading bit. */
1612 #define EE_EXTEND_CMD (4)
1613 #define EE_WRITE_CMD (5)
1614 #define EE_READ_CMD (6)
1615 #define EE_ERASE_CMD (7)
1617 #define EE_EWDS_ADDR (0)
1618 #define EE_WRAL_ADDR (1)
1619 #define EE_ERAL_ADDR (2)
1620 #define EE_EWEN_ADDR (3)
1622 #define CP_EEPROM_MAGIC PCI_DEVICE_ID_REALTEK_8139
1624 static void eeprom_cmd_start(void __iomem *ee_addr)
1626 writeb (EE_ENB & ~EE_CS, ee_addr);
1627 writeb (EE_ENB, ee_addr);
1631 static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len)
1635 /* Shift the command bits out. */
1636 for (i = cmd_len - 1; i >= 0; i--) {
1637 int dataval = (cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1638 writeb (EE_ENB | dataval, ee_addr);
1640 writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1643 writeb (EE_ENB, ee_addr);
1647 static void eeprom_cmd_end(void __iomem *ee_addr)
1649 writeb (~EE_CS, ee_addr);
1653 static void eeprom_extend_cmd(void __iomem *ee_addr, int extend_cmd,
1656 int cmd = (EE_EXTEND_CMD << addr_len) | (extend_cmd << (addr_len - 2));
1658 eeprom_cmd_start(ee_addr);
1659 eeprom_cmd(ee_addr, cmd, 3 + addr_len);
1660 eeprom_cmd_end(ee_addr);
1663 static u16 read_eeprom (void __iomem *ioaddr, int location, int addr_len)
1667 void __iomem *ee_addr = ioaddr + Cfg9346;
1668 int read_cmd = location | (EE_READ_CMD << addr_len);
1670 eeprom_cmd_start(ee_addr);
1671 eeprom_cmd(ee_addr, read_cmd, 3 + addr_len);
1673 for (i = 16; i > 0; i--) {
1674 writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
1677 (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
1679 writeb (EE_ENB, ee_addr);
1683 eeprom_cmd_end(ee_addr);
1688 static void write_eeprom(void __iomem *ioaddr, int location, u16 val,
1692 void __iomem *ee_addr = ioaddr + Cfg9346;
1693 int write_cmd = location | (EE_WRITE_CMD << addr_len);
1695 eeprom_extend_cmd(ee_addr, EE_EWEN_ADDR, addr_len);
1697 eeprom_cmd_start(ee_addr);
1698 eeprom_cmd(ee_addr, write_cmd, 3 + addr_len);
1699 eeprom_cmd(ee_addr, val, 16);
1700 eeprom_cmd_end(ee_addr);
1702 eeprom_cmd_start(ee_addr);
1703 for (i = 0; i < 20000; i++)
1704 if (readb(ee_addr) & EE_DATA_READ)
1706 eeprom_cmd_end(ee_addr);
1708 eeprom_extend_cmd(ee_addr, EE_EWDS_ADDR, addr_len);
1711 static int cp_get_eeprom_len(struct net_device *dev)
1713 struct cp_private *cp = netdev_priv(dev);
1716 spin_lock_irq(&cp->lock);
1717 size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128;
1718 spin_unlock_irq(&cp->lock);
1723 static int cp_get_eeprom(struct net_device *dev,
1724 struct ethtool_eeprom *eeprom, u8 *data)
1726 struct cp_private *cp = netdev_priv(dev);
1727 unsigned int addr_len;
1729 u32 offset = eeprom->offset >> 1;
1730 u32 len = eeprom->len;
1733 eeprom->magic = CP_EEPROM_MAGIC;
1735 spin_lock_irq(&cp->lock);
1737 addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1739 if (eeprom->offset & 1) {
1740 val = read_eeprom(cp->regs, offset, addr_len);
1741 data[i++] = (u8)(val >> 8);
1745 while (i < len - 1) {
1746 val = read_eeprom(cp->regs, offset, addr_len);
1747 data[i++] = (u8)val;
1748 data[i++] = (u8)(val >> 8);
1753 val = read_eeprom(cp->regs, offset, addr_len);
1757 spin_unlock_irq(&cp->lock);
1761 static int cp_set_eeprom(struct net_device *dev,
1762 struct ethtool_eeprom *eeprom, u8 *data)
1764 struct cp_private *cp = netdev_priv(dev);
1765 unsigned int addr_len;
1767 u32 offset = eeprom->offset >> 1;
1768 u32 len = eeprom->len;
1771 if (eeprom->magic != CP_EEPROM_MAGIC)
1774 spin_lock_irq(&cp->lock);
1776 addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1778 if (eeprom->offset & 1) {
1779 val = read_eeprom(cp->regs, offset, addr_len) & 0xff;
1780 val |= (u16)data[i++] << 8;
1781 write_eeprom(cp->regs, offset, val, addr_len);
1785 while (i < len - 1) {
1786 val = (u16)data[i++];
1787 val |= (u16)data[i++] << 8;
1788 write_eeprom(cp->regs, offset, val, addr_len);
1793 val = read_eeprom(cp->regs, offset, addr_len) & 0xff00;
1794 val |= (u16)data[i];
1795 write_eeprom(cp->regs, offset, val, addr_len);
1798 spin_unlock_irq(&cp->lock);
1802 /* Put the board into D3cold state and wait for WakeUp signal */
1803 static void cp_set_d3_state (struct cp_private *cp)
1805 pci_enable_wake (cp->pdev, 0, 1); /* Enable PME# generation */
1806 pci_set_power_state (cp->pdev, PCI_D3hot);
1809 static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1811 struct net_device *dev;
1812 struct cp_private *cp;
1815 resource_size_t pciaddr;
1816 unsigned int addr_len, i, pci_using_dac;
1820 static int version_printed;
1821 if (version_printed++ == 0)
1822 printk("%s", version);
1825 pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev);
1827 if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
1828 pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pci_rev < 0x20) {
1830 "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip\n",
1831 pdev->vendor, pdev->device, pci_rev);
1832 dev_err(&pdev->dev, "Try the \"8139too\" driver instead.\n");
1836 dev = alloc_etherdev(sizeof(struct cp_private));
1839 SET_MODULE_OWNER(dev);
1840 SET_NETDEV_DEV(dev, &pdev->dev);
1842 cp = netdev_priv(dev);
1845 cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug);
1846 spin_lock_init (&cp->lock);
1847 cp->mii_if.dev = dev;
1848 cp->mii_if.mdio_read = mdio_read;
1849 cp->mii_if.mdio_write = mdio_write;
1850 cp->mii_if.phy_id = CP_INTERNAL_PHY;
1851 cp->mii_if.phy_id_mask = 0x1f;
1852 cp->mii_if.reg_num_mask = 0x1f;
1853 cp_set_rxbufsize(cp);
1855 rc = pci_enable_device(pdev);
1859 rc = pci_set_mwi(pdev);
1861 goto err_out_disable;
1863 rc = pci_request_regions(pdev, DRV_NAME);
1867 pciaddr = pci_resource_start(pdev, 1);
1870 dev_err(&pdev->dev, "no MMIO resource\n");
1873 if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) {
1875 dev_err(&pdev->dev, "MMIO resource (%llx) too small\n",
1876 (unsigned long long)pci_resource_len(pdev, 1));
1880 /* Configure DMA attributes. */
1881 if ((sizeof(dma_addr_t) > 4) &&
1882 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) &&
1883 !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1888 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1891 "No usable DMA configuration, aborting.\n");
1894 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1897 "No usable consistent DMA configuration, "
1903 cp->cpcmd = (pci_using_dac ? PCIDAC : 0) |
1904 PCIMulRW | RxChkSum | CpRxOn | CpTxOn;
1906 regs = ioremap(pciaddr, CP_REGS_SIZE);
1909 dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n",
1910 (unsigned long long)pci_resource_len(pdev, 1),
1911 (unsigned long long)pciaddr);
1914 dev->base_addr = (unsigned long) regs;
1919 /* read MAC address from EEPROM */
1920 addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
1921 for (i = 0; i < 3; i++)
1922 ((u16 *) (dev->dev_addr))[i] =
1923 le16_to_cpu (read_eeprom (regs, i + 7, addr_len));
1924 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1926 dev->open = cp_open;
1927 dev->stop = cp_close;
1928 dev->set_multicast_list = cp_set_rx_mode;
1929 dev->hard_start_xmit = cp_start_xmit;
1930 dev->get_stats = cp_get_stats;
1931 dev->do_ioctl = cp_ioctl;
1932 dev->poll = cp_rx_poll;
1933 #ifdef CONFIG_NET_POLL_CONTROLLER
1934 dev->poll_controller = cp_poll_controller;
1936 dev->weight = 16; /* arbitrary? from NAPI_HOWTO.txt. */
1938 dev->change_mtu = cp_change_mtu;
1940 dev->ethtool_ops = &cp_ethtool_ops;
1942 dev->tx_timeout = cp_tx_timeout;
1943 dev->watchdog_timeo = TX_TIMEOUT;
1946 #if CP_VLAN_TAG_USED
1947 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1948 dev->vlan_rx_register = cp_vlan_rx_register;
1949 dev->vlan_rx_kill_vid = cp_vlan_rx_kill_vid;
1953 dev->features |= NETIF_F_HIGHDMA;
1955 #if 0 /* disabled by default until verified */
1956 dev->features |= NETIF_F_TSO;
1959 dev->irq = pdev->irq;
1961 rc = register_netdev(dev);
1965 printk (KERN_INFO "%s: RTL-8139C+ at 0x%lx, "
1966 "%02x:%02x:%02x:%02x:%02x:%02x, "
1970 dev->dev_addr[0], dev->dev_addr[1],
1971 dev->dev_addr[2], dev->dev_addr[3],
1972 dev->dev_addr[4], dev->dev_addr[5],
1975 pci_set_drvdata(pdev, dev);
1977 /* enable busmastering and memory-write-invalidate */
1978 pci_set_master(pdev);
1980 if (cp->wol_enabled)
1981 cp_set_d3_state (cp);
1988 pci_release_regions(pdev);
1990 pci_clear_mwi(pdev);
1992 pci_disable_device(pdev);
1998 static void cp_remove_one (struct pci_dev *pdev)
2000 struct net_device *dev = pci_get_drvdata(pdev);
2001 struct cp_private *cp = netdev_priv(dev);
2003 unregister_netdev(dev);
2005 if (cp->wol_enabled)
2006 pci_set_power_state (pdev, PCI_D0);
2007 pci_release_regions(pdev);
2008 pci_clear_mwi(pdev);
2009 pci_disable_device(pdev);
2010 pci_set_drvdata(pdev, NULL);
2015 static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
2017 struct net_device *dev = pci_get_drvdata(pdev);
2018 struct cp_private *cp = netdev_priv(dev);
2019 unsigned long flags;
2021 if (!netif_running(dev))
2024 netif_device_detach (dev);
2025 netif_stop_queue (dev);
2027 spin_lock_irqsave (&cp->lock, flags);
2029 /* Disable Rx and Tx */
2030 cpw16 (IntrMask, 0);
2031 cpw8 (Cmd, cpr8 (Cmd) & (~RxOn | ~TxOn));
2033 spin_unlock_irqrestore (&cp->lock, flags);
2035 pci_save_state(pdev);
2036 pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled);
2037 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2042 static int cp_resume (struct pci_dev *pdev)
2044 struct net_device *dev = pci_get_drvdata (pdev);
2045 struct cp_private *cp = netdev_priv(dev);
2046 unsigned long flags;
2048 if (!netif_running(dev))
2051 netif_device_attach (dev);
2053 pci_set_power_state(pdev, PCI_D0);
2054 pci_restore_state(pdev);
2055 pci_enable_wake(pdev, PCI_D0, 0);
2057 /* FIXME: sh*t may happen if the Rx ring buffer is depleted */
2058 cp_init_rings_index (cp);
2060 netif_start_queue (dev);
2062 spin_lock_irqsave (&cp->lock, flags);
2064 mii_check_media(&cp->mii_if, netif_msg_link(cp), FALSE);
2066 spin_unlock_irqrestore (&cp->lock, flags);
2070 #endif /* CONFIG_PM */
2072 static struct pci_driver cp_driver = {
2074 .id_table = cp_pci_tbl,
2075 .probe = cp_init_one,
2076 .remove = cp_remove_one,
2078 .resume = cp_resume,
2079 .suspend = cp_suspend,
2083 static int __init cp_init (void)
2086 printk("%s", version);
2088 return pci_register_driver(&cp_driver);
2091 static void __exit cp_exit (void)
2093 pci_unregister_driver (&cp_driver);
2096 module_init(cp_init);
2097 module_exit(cp_exit);