2 sis190.c: Silicon Integrated Systems SiS190 ethernet driver
4 Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
5 Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
6 Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
8 Based on r8169.c, tg3.c, 8139cp.c, skge.c and probably even epic100.c.
10 This software may be used and distributed according to the terms of
11 the GNU General Public License (GPL), incorporated herein by reference.
12 Drivers based on or derived from this code fall under the GPL and must
13 retain the authorship, copyright and license notice. This file is not
14 a complete program and may only be used when the entire operating
15 system is licensed under the GPL.
17 See the file COPYING in this distribution for more information.
21 #include <linux/module.h>
22 #include <linux/moduleparam.h>
23 #include <linux/netdevice.h>
24 #include <linux/etherdevice.h>
25 #include <linux/ethtool.h>
26 #include <linux/pci.h>
27 #include <linux/mii.h>
28 #include <linux/delay.h>
29 #include <linux/crc32.h>
30 #include <linux/dma-mapping.h>
33 #define net_drv(p, arg...) if (netif_msg_drv(p)) \
35 #define net_probe(p, arg...) if (netif_msg_probe(p)) \
37 #define net_link(p, arg...) if (netif_msg_link(p)) \
39 #define net_intr(p, arg...) if (netif_msg_intr(p)) \
41 #define net_tx_err(p, arg...) if (netif_msg_tx_err(p)) \
44 #ifdef CONFIG_SIS190_NAPI
45 #define NAPI_SUFFIX "-NAPI"
47 #define NAPI_SUFFIX ""
50 #define DRV_VERSION "1.2" NAPI_SUFFIX
51 #define DRV_NAME "sis190"
52 #define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
53 #define PFX DRV_NAME ": "
55 #ifdef CONFIG_SIS190_NAPI
56 #define sis190_rx_skb netif_receive_skb
57 #define sis190_rx_quota(count, quota) min(count, quota)
59 #define sis190_rx_skb netif_rx
60 #define sis190_rx_quota(count, quota) count
63 #define MAC_ADDR_LEN 6
65 #define NUM_TX_DESC 64
66 #define NUM_RX_DESC 64
67 #define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
68 #define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
69 #define RX_BUF_SIZE 1536
71 #define SIS190_REGS_SIZE 0x80
72 #define SIS190_TX_TIMEOUT (6*HZ)
73 #define SIS190_PHY_TIMEOUT (10*HZ)
74 #define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
75 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
78 /* Enhanced PHY access register bit definitions */
79 #define EhnMIIread 0x0000
80 #define EhnMIIwrite 0x0020
81 #define EhnMIIdataShift 16
82 #define EhnMIIpmdShift 6 /* 7016 only */
83 #define EhnMIIregShift 11
84 #define EhnMIIreq 0x0010
85 #define EhnMIInotDone 0x0010
87 /* Write/read MMIO register */
88 #define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
89 #define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
90 #define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
91 #define SIS_R8(reg) readb (ioaddr + (reg))
92 #define SIS_R16(reg) readw (ioaddr + (reg))
93 #define SIS_R32(reg) readl (ioaddr + (reg))
95 #define SIS_PCI_COMMIT() SIS_R32(IntrControl)
97 enum sis190_registers {
99 TxDescStartAddr = 0x04,
100 TxNextDescAddr = 0x0c, // unused
102 RxDescStartAddr = 0x14,
103 RxNextDescAddr = 0x1c, // unused
107 IntrTimer = 0x2c, // unused
108 PMControl = 0x30, // unused
111 StationControl = 0x40,
117 // Undocumented = 0x6c,
119 // Undocumented = 0x74,
120 RxMPSControl = 0x78, // unused
123 enum sis190_register_content {
125 SoftInt = 0x40000000, // unused
126 Timeup = 0x20000000, // unused
127 PauseFrame = 0x00080000, // unused
128 MagicPacket = 0x00040000, // unused
129 WakeupFrame = 0x00020000, // unused
130 LinkChange = 0x00010000,
131 RxQEmpty = 0x00000080,
133 TxQ1Empty = 0x00000020, // unused
134 TxQ1Int = 0x00000010,
135 TxQ0Empty = 0x00000008, // unused
136 TxQ0Int = 0x00000004,
141 RxRES = 0x00200000, // unused
143 RxRUNT = 0x00100000, // unused
144 RxRWT = 0x00400000, // unused
148 CmdRxEnb = 0x08, // unused
150 RxBufEmpty = 0x01, // unused
153 Cfg9346_Lock = 0x00, // unused
154 Cfg9346_Unlock = 0xc0, // unused
157 AcceptErr = 0x20, // unused
158 AcceptRunt = 0x10, // unused
159 AcceptBroadcast = 0x0800,
160 AcceptMulticast = 0x0400,
161 AcceptMyPhys = 0x0200,
162 AcceptAllPhys = 0x0100,
166 RxCfgDMAShift = 8, // 0x1a in RxControl ?
169 TxInterFrameGapShift = 24,
170 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
180 LinkStatus = 0x02, // unused
181 FullDup = 0x01, // unused
184 TBILinkOK = 0x02000000, // unused
201 enum _DescStatusBit {
211 RxSizeMask = 0x0000ffff
214 struct sis190_private {
215 void __iomem *mmio_addr;
216 struct pci_dev *pci_dev;
217 struct net_device_stats stats;
226 struct RxDesc *RxDescRing;
227 struct TxDesc *TxDescRing;
228 struct sk_buff *Rx_skbuff[NUM_RX_DESC];
229 struct sk_buff *Tx_skbuff[NUM_TX_DESC];
230 struct work_struct phy_task;
231 struct timer_list timer;
235 const static struct {
237 u8 version; /* depend on docs */
238 u32 RxConfigMask; /* clear the bits supported by this chip */
239 } sis_chip_info[] = {
240 { DRV_NAME, 0x00, 0xff7e1880, },
243 static struct pci_device_id sis190_pci_tbl[] __devinitdata = {
244 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
248 MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
250 static int rx_copybreak = 200;
256 MODULE_DESCRIPTION("SiS sis190 Gigabit Ethernet driver");
257 module_param(rx_copybreak, int, 0);
258 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
259 module_param_named(debug, debug.msg_enable, int, 0);
260 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
261 MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
262 MODULE_VERSION(DRV_VERSION);
263 MODULE_LICENSE("GPL");
265 static const u32 sis190_intr_mask =
266 RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt;
269 * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
270 * The chips use a 64 element hash table based on the Ethernet CRC.
272 static int multicast_filter_limit = 32;
274 static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
278 SIS_W32(GMIIControl, ctl);
282 for (i = 0; i < 100; i++) {
283 if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
289 printk(KERN_ERR PFX "PHY command failed !\n");
292 static void mdio_write(void __iomem *ioaddr, int reg, int val)
296 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
297 (((u32) reg) << EhnMIIregShift) | (pmd << EhnMIIpmdShift) |
298 (((u32) val) << EhnMIIdataShift));
301 static int mdio_read(void __iomem *ioaddr, int reg)
305 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
306 (((u32) reg) << EhnMIIregShift) | (pmd << EhnMIIpmdShift));
308 return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
311 static int sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
317 if (!(SIS_R32(ROMControl) & 0x0002))
320 val = (0x0080 | (0x2 << 8) | (reg << 10));
322 SIS_W32(ROMInterface, val);
324 for (i = 0; i < 200; i++) {
325 if (!(SIS_R32(ROMInterface) & 0x0080))
330 data = (u16) ((SIS_R32(ROMInterface) & 0xffff0000) >> 16);
335 static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
337 SIS_W32(IntrMask, 0x00);
338 SIS_W32(IntrStatus, 0xffffffff);
342 static void sis190_asic_down(void __iomem *ioaddr)
344 /* Stop the chip's Tx and Rx DMA processes. */
346 SIS_W32(TxControl, 0x1a00);
347 SIS_W32(RxControl, 0x1a00);
349 sis190_irq_mask_and_ack(ioaddr);
352 static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
354 desc->size |= cpu_to_le32(RingEnd);
357 static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
359 u32 eor = le32_to_cpu(desc->size) & RingEnd;
362 desc->size = cpu_to_le32(rx_buf_sz | eor);
364 desc->status = cpu_to_le32(OWNbit | INTbit);
367 static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
370 desc->addr = cpu_to_le32(mapping);
371 sis190_give_to_asic(desc, rx_buf_sz);
374 static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
377 desc->addr = 0xdeadbeef;
378 desc->size &= cpu_to_le32(RingEnd);
383 static int sis190_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
384 struct RxDesc *desc, u32 rx_buf_sz)
390 skb = dev_alloc_skb(rx_buf_sz);
396 mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
399 sis190_map_to_asic(desc, mapping, rx_buf_sz);
405 sis190_make_unusable_by_asic(desc);
409 static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
414 for (cur = start; cur < end; cur++) {
415 int ret, i = cur % NUM_RX_DESC;
417 if (tp->Rx_skbuff[i])
420 ret = sis190_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
421 tp->RxDescRing + i, tp->rx_buf_sz);
428 static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
429 struct RxDesc *desc, int rx_buf_sz)
433 if (pkt_size < rx_copybreak) {
436 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
438 skb_reserve(skb, NET_IP_ALIGN);
439 eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
441 sis190_give_to_asic(desc, rx_buf_sz);
448 static int sis190_rx_interrupt(struct net_device *dev,
449 struct sis190_private *tp, void __iomem *ioaddr)
451 struct net_device_stats *stats = &tp->stats;
452 u32 rx_left, cur_rx = tp->cur_rx;
455 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
456 rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
458 for (; rx_left > 0; rx_left--, cur_rx++) {
459 unsigned int entry = cur_rx % NUM_RX_DESC;
460 struct RxDesc *desc = tp->RxDescRing + entry;
463 if (desc->status & OWNbit)
466 status = le32_to_cpu(desc->PSize);
468 // net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name,
471 if (status & RxCRC) {
472 net_intr(tp, KERN_INFO "%s: bad crc. status = %08x.\n",
475 stats->rx_crc_errors++;
476 sis190_give_to_asic(desc, tp->rx_buf_sz);
477 } else if (!(status & PADbit)) {
478 net_intr(tp, KERN_INFO "%s: bad pad. status = %08x.\n",
481 stats->rx_length_errors++;
482 sis190_give_to_asic(desc, tp->rx_buf_sz);
484 struct sk_buff *skb = tp->Rx_skbuff[entry];
485 int pkt_size = (status & RxSizeMask) - 4;
486 void (*pci_action)(struct pci_dev *, dma_addr_t,
487 size_t, int) = pci_dma_sync_single_for_device;
489 if (unlikely(pkt_size > tp->rx_buf_sz)) {
490 net_intr(tp, KERN_INFO
491 "%s: (frag) status = %08x.\n",
494 stats->rx_length_errors++;
495 sis190_give_to_asic(desc, tp->rx_buf_sz);
499 pci_dma_sync_single_for_cpu(tp->pci_dev,
500 le32_to_cpu(desc->addr), tp->rx_buf_sz,
503 if (sis190_try_rx_copy(&skb, pkt_size, desc,
505 pci_action = pci_unmap_single;
506 tp->Rx_skbuff[entry] = NULL;
507 sis190_make_unusable_by_asic(desc);
510 pci_action(tp->pci_dev, le32_to_cpu(desc->addr),
511 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
514 skb_put(skb, pkt_size);
515 skb->protocol = eth_type_trans(skb, dev);
519 dev->last_rx = jiffies;
520 stats->rx_bytes += pkt_size;
524 count = cur_rx - tp->cur_rx;
527 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
528 if (!delta && count && netif_msg_intr(tp))
529 printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name);
530 tp->dirty_rx += delta;
532 if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp))
533 printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name);
538 static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
543 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
545 pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
547 memset(desc, 0x00, sizeof(*desc));
550 static void sis190_tx_interrupt(struct net_device *dev,
551 struct sis190_private *tp, void __iomem *ioaddr)
553 u32 pending, dirty_tx = tp->dirty_tx;
555 * It would not be needed if queueing was allowed to be enabled
556 * again too early (hint: think preempt and unclocked smp systems).
558 unsigned int queue_stopped;
561 pending = tp->cur_tx - dirty_tx;
562 queue_stopped = (pending == NUM_TX_DESC);
564 for (; pending; pending--, dirty_tx++) {
565 unsigned int entry = dirty_tx % NUM_TX_DESC;
566 struct TxDesc *txd = tp->TxDescRing + entry;
569 if (le32_to_cpu(txd->status) & OWNbit)
572 skb = tp->Tx_skbuff[entry];
574 tp->stats.tx_packets++;
575 tp->stats.tx_bytes += skb->len;
577 sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
578 tp->Tx_skbuff[entry] = NULL;
579 dev_kfree_skb_irq(skb);
582 if (tp->dirty_tx != dirty_tx) {
583 tp->dirty_tx = dirty_tx;
586 netif_wake_queue(dev);
591 * The interrupt handler does all of the Rx thread work and cleans up after
594 static irqreturn_t sis190_interrupt(int irq, void *__dev, struct pt_regs *regs)
596 struct net_device *dev = __dev;
597 struct sis190_private *tp = netdev_priv(dev);
598 void __iomem *ioaddr = tp->mmio_addr;
599 unsigned int handled = 0;
602 status = SIS_R32(IntrStatus);
604 if ((status == 0xffffffff) || !status)
609 if (unlikely(!netif_running(dev))) {
610 sis190_asic_down(ioaddr);
614 SIS_W32(IntrStatus, status);
616 // net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status);
618 if (status & LinkChange) {
619 net_intr(tp, KERN_INFO "%s: link change.\n", dev->name);
620 schedule_work(&tp->phy_task);
624 sis190_rx_interrupt(dev, tp, ioaddr);
626 if (status & TxQ0Int)
627 sis190_tx_interrupt(dev, tp, ioaddr);
629 return IRQ_RETVAL(handled);
632 static void sis190_free_rx_skb(struct sis190_private *tp,
633 struct sk_buff **sk_buff, struct RxDesc *desc)
635 struct pci_dev *pdev = tp->pci_dev;
637 pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
639 dev_kfree_skb(*sk_buff);
641 sis190_make_unusable_by_asic(desc);
644 static void sis190_rx_clear(struct sis190_private *tp)
648 for (i = 0; i < NUM_RX_DESC; i++) {
649 if (!tp->Rx_skbuff[i])
651 sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
655 static void sis190_init_ring_indexes(struct sis190_private *tp)
657 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
660 static int sis190_init_ring(struct net_device *dev)
662 struct sis190_private *tp = netdev_priv(dev);
664 sis190_init_ring_indexes(tp);
666 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
667 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
669 if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
672 sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
681 static void sis190_set_rx_mode(struct net_device *dev)
683 struct sis190_private *tp = netdev_priv(dev);
684 void __iomem *ioaddr = tp->mmio_addr;
686 u32 mc_filter[2]; /* Multicast hash filter */
689 if (dev->flags & IFF_PROMISC) {
690 /* Unconditionally log net taps. */
691 net_drv(tp, KERN_NOTICE "%s: Promiscuous mode enabled.\n",
694 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
696 mc_filter[1] = mc_filter[0] = 0xffffffff;
697 } else if ((dev->mc_count > multicast_filter_limit) ||
698 (dev->flags & IFF_ALLMULTI)) {
699 /* Too many to filter perfectly -- accept all multicasts. */
700 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
701 mc_filter[1] = mc_filter[0] = 0xffffffff;
703 struct dev_mc_list *mclist;
706 rx_mode = AcceptBroadcast | AcceptMyPhys;
707 mc_filter[1] = mc_filter[0] = 0;
708 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
709 i++, mclist = mclist->next) {
711 ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
712 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
713 rx_mode |= AcceptMulticast;
717 spin_lock_irqsave(&tp->lock, flags);
719 SIS_W16(RxMacControl, rx_mode | 0x2);
720 SIS_W32(RxHashTable, mc_filter[0]);
721 SIS_W32(RxHashTable + 4, mc_filter[1]);
723 spin_unlock_irqrestore(&tp->lock, flags);
726 static void sis190_soft_reset(void __iomem *ioaddr)
728 SIS_W32(IntrControl, 0x8000);
731 SIS_W32(IntrControl, 0x0);
732 sis190_asic_down(ioaddr);
736 static void sis190_hw_start(struct net_device *dev)
738 struct sis190_private *tp = netdev_priv(dev);
739 void __iomem *ioaddr = tp->mmio_addr;
741 sis190_soft_reset(ioaddr);
743 SIS_W32(TxDescStartAddr, tp->tx_dma);
744 SIS_W32(RxDescStartAddr, tp->rx_dma);
746 SIS_W32(IntrStatus, 0xffffffff);
747 SIS_W32(IntrMask, 0x0);
749 * Default is 100Mbps.
750 * A bit strange: 100Mbps is 0x1801 elsewhere -- FR 2005/06/09
752 SIS_W16(StationControl, 0x1901);
753 SIS_W32(GMIIControl, 0x0);
754 SIS_W32(TxMacControl, 0x60);
755 SIS_W16(RxMacControl, 0x02);
756 SIS_W32(RxHashTable, 0x0);
758 SIS_W32(RxWakeOnLan, 0x0);
763 sis190_set_rx_mode(dev);
765 /* Enable all known interrupts by setting the interrupt mask. */
766 SIS_W32(IntrMask, sis190_intr_mask);
768 SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
769 SIS_W32(RxControl, 0x1a1d);
771 netif_start_queue(dev);
774 static void sis190_phy_task(void * data)
776 struct net_device *dev = data;
777 struct sis190_private *tp = netdev_priv(dev);
778 void __iomem *ioaddr = tp->mmio_addr;
781 val = mdio_read(ioaddr, MII_BMCR);
782 if (val & BMCR_RESET) {
783 // FIXME: needlessly high ? -- FR 02/07/2005
784 mod_timer(&tp->timer, jiffies + HZ/10);
785 } else if (!(mdio_read(ioaddr, MII_BMSR) & BMSR_ANEGCOMPLETE)) {
786 net_link(tp, KERN_WARNING "%s: PHY reset until link up.\n",
788 mdio_write(ioaddr, MII_BMCR, val | BMCR_RESET);
789 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
797 { LPA_1000XFULL | LPA_SLCT,
798 "1000 Mbps Full Duplex",
800 { LPA_1000XHALF | LPA_SLCT,
801 "1000 Mbps Half Duplex",
804 "100 Mbps Full Duplex",
807 "100 Mbps Half Duplex",
810 "10 Mbps Full Duplex",
813 "10 Mbps Half Duplex",
815 { 0, "unknown", 0x0000 }
818 val = mdio_read(ioaddr, 0x1f);
819 net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
821 val = mdio_read(ioaddr, MII_LPA);
822 net_link(tp, KERN_INFO "%s: mii lpa = %04x.\n", dev->name, val);
824 for (p = reg31; p->ctl; p++) {
825 if ((val & p->val) == p->val)
829 SIS_W16(StationControl, p->ctl);
830 net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name,
832 netif_carrier_on(dev);
836 static void sis190_phy_timer(unsigned long __opaque)
838 struct net_device *dev = (struct net_device *)__opaque;
839 struct sis190_private *tp = netdev_priv(dev);
841 if (likely(netif_running(dev)))
842 schedule_work(&tp->phy_task);
845 static inline void sis190_delete_timer(struct net_device *dev)
847 struct sis190_private *tp = netdev_priv(dev);
849 del_timer_sync(&tp->timer);
852 static inline void sis190_request_timer(struct net_device *dev)
854 struct sis190_private *tp = netdev_priv(dev);
855 struct timer_list *timer = &tp->timer;
858 timer->expires = jiffies + SIS190_PHY_TIMEOUT;
859 timer->data = (unsigned long)dev;
860 timer->function = sis190_phy_timer;
864 static void sis190_set_rxbufsize(struct sis190_private *tp,
865 struct net_device *dev)
867 unsigned int mtu = dev->mtu;
869 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
872 static int sis190_open(struct net_device *dev)
874 struct sis190_private *tp = netdev_priv(dev);
875 struct pci_dev *pdev = tp->pci_dev;
878 sis190_set_rxbufsize(tp, dev);
881 * Rx and Tx descriptors need 256 bytes alignment.
882 * pci_alloc_consistent() guarantees a stronger alignment.
884 tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
888 tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
892 rc = sis190_init_ring(dev);
896 INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
898 sis190_request_timer(dev);
900 rc = request_irq(dev->irq, sis190_interrupt, SA_SHIRQ, dev->name, dev);
902 goto err_release_timer_2;
904 sis190_hw_start(dev);
909 sis190_delete_timer(dev);
912 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
915 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
920 static void sis190_tx_clear(struct sis190_private *tp)
924 for (i = 0; i < NUM_TX_DESC; i++) {
925 struct sk_buff *skb = tp->Tx_skbuff[i];
930 sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
931 tp->Tx_skbuff[i] = NULL;
934 tp->stats.tx_dropped++;
936 tp->cur_tx = tp->dirty_tx = 0;
939 static void sis190_down(struct net_device *dev)
941 struct sis190_private *tp = netdev_priv(dev);
942 void __iomem *ioaddr = tp->mmio_addr;
943 unsigned int poll_locked = 0;
945 sis190_delete_timer(dev);
947 netif_stop_queue(dev);
949 flush_scheduled_work();
952 spin_lock_irq(&tp->lock);
954 sis190_asic_down(ioaddr);
956 spin_unlock_irq(&tp->lock);
958 synchronize_irq(dev->irq);
961 netif_poll_disable(dev);
967 } while (SIS_R32(IntrMask));
973 static int sis190_close(struct net_device *dev)
975 struct sis190_private *tp = netdev_priv(dev);
976 struct pci_dev *pdev = tp->pci_dev;
980 free_irq(dev->irq, dev);
982 netif_poll_enable(dev);
984 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
985 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
987 tp->TxDescRing = NULL;
988 tp->RxDescRing = NULL;
993 static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev)
995 struct sis190_private *tp = netdev_priv(dev);
996 void __iomem *ioaddr = tp->mmio_addr;
997 u32 len, entry, dirty_tx;
1001 if (unlikely(skb->len < ETH_ZLEN)) {
1002 skb = skb_padto(skb, ETH_ZLEN);
1004 tp->stats.tx_dropped++;
1012 entry = tp->cur_tx % NUM_TX_DESC;
1013 desc = tp->TxDescRing + entry;
1015 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1016 netif_stop_queue(dev);
1017 net_tx_err(tp, KERN_ERR PFX
1018 "%s: BUG! Tx Ring full when queue awake!\n",
1020 return NETDEV_TX_BUSY;
1023 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1025 tp->Tx_skbuff[entry] = skb;
1027 desc->PSize = cpu_to_le32(len);
1028 desc->addr = cpu_to_le32(mapping);
1030 desc->size = cpu_to_le32(len);
1031 if (entry == (NUM_TX_DESC - 1))
1032 desc->size |= cpu_to_le32(RingEnd);
1036 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1042 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1044 dev->trans_start = jiffies;
1046 dirty_tx = tp->dirty_tx;
1047 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1048 netif_stop_queue(dev);
1050 if (dirty_tx != tp->dirty_tx)
1051 netif_wake_queue(dev);
1054 return NETDEV_TX_OK;
1057 static struct net_device_stats *sis190_get_stats(struct net_device *dev)
1059 struct sis190_private *tp = netdev_priv(dev);
1064 static void sis190_release_board(struct pci_dev *pdev)
1066 struct net_device *dev = pci_get_drvdata(pdev);
1067 struct sis190_private *tp = netdev_priv(dev);
1069 iounmap(tp->mmio_addr);
1070 pci_release_regions(pdev);
1071 pci_disable_device(pdev);
1075 static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1077 struct sis190_private *tp;
1078 struct net_device *dev;
1079 void __iomem *ioaddr;
1082 dev = alloc_etherdev(sizeof(*tp));
1084 net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n");
1089 SET_MODULE_OWNER(dev);
1090 SET_NETDEV_DEV(dev, &pdev->dev);
1092 tp = netdev_priv(dev);
1093 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1095 rc = pci_enable_device(pdev);
1097 net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev));
1098 goto err_free_dev_1;
1103 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1104 net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n",
1106 goto err_pci_disable_2;
1108 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1109 net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n",
1111 goto err_pci_disable_2;
1114 rc = pci_request_regions(pdev, DRV_NAME);
1116 net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n",
1118 goto err_pci_disable_2;
1121 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1123 net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n",
1125 goto err_free_res_3;
1128 pci_set_master(pdev);
1130 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1132 net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n",
1135 goto err_free_res_3;
1139 tp->mmio_addr = ioaddr;
1141 sis190_irq_mask_and_ack(ioaddr);
1143 sis190_soft_reset(ioaddr);
1148 pci_release_regions(pdev);
1150 pci_disable_device(pdev);
1158 static void sis190_tx_timeout(struct net_device *dev)
1160 struct sis190_private *tp = netdev_priv(dev);
1161 void __iomem *ioaddr = tp->mmio_addr;
1164 /* Disable Tx, if not already */
1165 tmp8 = SIS_R8(TxControl);
1166 if (tmp8 & CmdTxEnb)
1167 SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1169 /* Disable interrupts by clearing the interrupt mask. */
1170 SIS_W32(IntrMask, 0x0000);
1172 /* Stop a shared interrupt from scavenging while we are. */
1173 spin_lock_irq(&tp->lock);
1174 sis190_tx_clear(tp);
1175 spin_unlock_irq(&tp->lock);
1177 /* ...and finally, reset everything. */
1178 sis190_hw_start(dev);
1180 netif_wake_queue(dev);
1183 static void sis190_set_speed_auto(struct net_device *dev)
1185 struct sis190_private *tp = netdev_priv(dev);
1186 void __iomem *ioaddr = tp->mmio_addr;
1189 net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name);
1191 val = mdio_read(ioaddr, MII_ADVERTISE);
1193 // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1195 mdio_write(ioaddr, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1196 ADVERTISE_100FULL | ADVERTISE_10FULL |
1197 ADVERTISE_100HALF | ADVERTISE_10HALF);
1199 // Enable 1000 Full Mode.
1200 mdio_write(ioaddr, MII_CTRL1000, ADVERTISE_1000FULL);
1202 // Enable auto-negotiation and restart auto-negotiation.
1203 mdio_write(ioaddr, MII_BMCR,
1204 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1207 static void sis190_get_drvinfo(struct net_device *dev,
1208 struct ethtool_drvinfo *info)
1210 struct sis190_private *tp = netdev_priv(dev);
1212 strcpy(info->driver, DRV_NAME);
1213 strcpy(info->version, DRV_VERSION);
1214 strcpy(info->bus_info, pci_name(tp->pci_dev));
1217 static int sis190_get_regs_len(struct net_device *dev)
1219 return SIS190_REGS_SIZE;
1222 static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1225 struct sis190_private *tp = netdev_priv(dev);
1226 unsigned long flags;
1228 if (regs->len > SIS190_REGS_SIZE)
1229 regs->len = SIS190_REGS_SIZE;
1231 spin_lock_irqsave(&tp->lock, flags);
1232 memcpy_fromio(p, tp->mmio_addr, regs->len);
1233 spin_unlock_irqrestore(&tp->lock, flags);
1236 static u32 sis190_get_msglevel(struct net_device *dev)
1238 struct sis190_private *tp = netdev_priv(dev);
1240 return tp->msg_enable;
1243 static void sis190_set_msglevel(struct net_device *dev, u32 value)
1245 struct sis190_private *tp = netdev_priv(dev);
1247 tp->msg_enable = value;
1250 static struct ethtool_ops sis190_ethtool_ops = {
1251 .get_drvinfo = sis190_get_drvinfo,
1252 .get_regs_len = sis190_get_regs_len,
1253 .get_regs = sis190_get_regs,
1254 .get_link = ethtool_op_get_link,
1255 .get_msglevel = sis190_get_msglevel,
1256 .set_msglevel = sis190_set_msglevel,
1259 static int __devinit sis190_init_one(struct pci_dev *pdev,
1260 const struct pci_device_id *ent)
1262 static int printed_version = 0;
1263 struct sis190_private *tp;
1264 struct net_device *dev;
1265 void __iomem *ioaddr;
1268 if (!printed_version) {
1269 net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n");
1270 printed_version = 1;
1273 dev = sis190_init_board(pdev);
1279 tp = netdev_priv(dev);
1280 ioaddr = tp->mmio_addr;
1282 /* Get MAC address */
1283 /* Read node address from the EEPROM */
1285 if (SIS_R32(ROMControl) & 0x4) {
1286 for (i = 0; i < 3; i++) {
1287 SIS_W16(RxMacAddr + 2*i,
1288 sis190_read_eeprom(ioaddr, 3 + i));
1292 for (i = 0; i < MAC_ADDR_LEN; i++)
1293 dev->dev_addr[i] = SIS_R8(RxMacAddr + i);
1295 INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
1297 dev->open = sis190_open;
1298 dev->stop = sis190_close;
1299 dev->get_stats = sis190_get_stats;
1300 dev->tx_timeout = sis190_tx_timeout;
1301 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1302 dev->hard_start_xmit = sis190_start_xmit;
1303 dev->set_multicast_list = sis190_set_rx_mode;
1304 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1305 dev->irq = pdev->irq;
1306 dev->base_addr = (unsigned long) 0xdead;
1308 spin_lock_init(&tp->lock);
1309 rc = register_netdev(dev);
1311 sis190_release_board(pdev);
1315 pci_set_drvdata(pdev, dev);
1317 net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), "
1318 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
1319 pci_name(pdev), sis_chip_info[ent->driver_data].name,
1321 dev->dev_addr[0], dev->dev_addr[1],
1322 dev->dev_addr[2], dev->dev_addr[3],
1323 dev->dev_addr[4], dev->dev_addr[5]);
1325 netif_carrier_off(dev);
1327 sis190_set_speed_auto(dev);
1332 static void __devexit sis190_remove_one(struct pci_dev *pdev)
1334 struct net_device *dev = pci_get_drvdata(pdev);
1336 unregister_netdev(dev);
1337 sis190_release_board(pdev);
1338 pci_set_drvdata(pdev, NULL);
1341 static struct pci_driver sis190_pci_driver = {
1343 .id_table = sis190_pci_tbl,
1344 .probe = sis190_init_one,
1345 .remove = __devexit_p(sis190_remove_one),
1348 static int __init sis190_init_module(void)
1350 return pci_module_init(&sis190_pci_driver);
1353 static void __exit sis190_cleanup_module(void)
1355 pci_unregister_driver(&sis190_pci_driver);
1358 module_init(sis190_init_module);
1359 module_exit(sis190_cleanup_module);