2 sis190.c: Silicon Integrated Systems SiS190 ethernet driver
4 Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
5 Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
6 Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
8 Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191
11 This software may be used and distributed according to the terms of
12 the GNU General Public License (GPL), incorporated herein by reference.
13 Drivers based on or derived from this code fall under the GPL and must
14 retain the authorship, copyright and license notice. This file is not
15 a complete program and may only be used when the entire operating
16 system is licensed under the GPL.
18 See the file COPYING in this distribution for more information.
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/netdevice.h>
25 #include <linux/rtnetlink.h>
26 #include <linux/etherdevice.h>
27 #include <linux/ethtool.h>
28 #include <linux/pci.h>
29 #include <linux/mii.h>
30 #include <linux/delay.h>
31 #include <linux/crc32.h>
32 #include <linux/dma-mapping.h>
35 #define net_drv(p, arg...) if (netif_msg_drv(p)) \
37 #define net_probe(p, arg...) if (netif_msg_probe(p)) \
39 #define net_link(p, arg...) if (netif_msg_link(p)) \
41 #define net_intr(p, arg...) if (netif_msg_intr(p)) \
43 #define net_tx_err(p, arg...) if (netif_msg_tx_err(p)) \
46 #define PHY_MAX_ADDR 32
47 #define PHY_ID_ANY 0x1f
48 #define MII_REG_ANY 0x1f
50 #ifdef CONFIG_SIS190_NAPI
51 #define NAPI_SUFFIX "-NAPI"
53 #define NAPI_SUFFIX ""
56 #define DRV_VERSION "1.2" NAPI_SUFFIX
57 #define DRV_NAME "sis190"
58 #define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
59 #define PFX DRV_NAME ": "
61 #ifdef CONFIG_SIS190_NAPI
62 #define sis190_rx_skb netif_receive_skb
63 #define sis190_rx_quota(count, quota) min(count, quota)
65 #define sis190_rx_skb netif_rx
66 #define sis190_rx_quota(count, quota) count
69 #define MAC_ADDR_LEN 6
71 #define NUM_TX_DESC 64 /* [8..1024] */
72 #define NUM_RX_DESC 64 /* [8..8192] */
73 #define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
74 #define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
75 #define RX_BUF_SIZE 1536
76 #define RX_BUF_MASK 0xfff8
78 #define SIS190_REGS_SIZE 0x80
79 #define SIS190_TX_TIMEOUT (6*HZ)
80 #define SIS190_PHY_TIMEOUT (10*HZ)
81 #define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
82 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
85 /* Enhanced PHY access register bit definitions */
86 #define EhnMIIread 0x0000
87 #define EhnMIIwrite 0x0020
88 #define EhnMIIdataShift 16
89 #define EhnMIIpmdShift 6 /* 7016 only */
90 #define EhnMIIregShift 11
91 #define EhnMIIreq 0x0010
92 #define EhnMIInotDone 0x0010
94 /* Write/read MMIO register */
95 #define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
96 #define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
97 #define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
98 #define SIS_R8(reg) readb (ioaddr + (reg))
99 #define SIS_R16(reg) readw (ioaddr + (reg))
100 #define SIS_R32(reg) readl (ioaddr + (reg))
102 #define SIS_PCI_COMMIT() SIS_R32(IntrControl)
104 enum sis190_registers {
106 TxDescStartAddr = 0x04,
107 rsv0 = 0x08, // reserved
108 TxSts = 0x0c, // unused (Control/Status)
110 RxDescStartAddr = 0x14,
111 rsv1 = 0x18, // reserved
112 RxSts = 0x1c, // unused
116 IntrTimer = 0x2c, // unused (Interupt Timer)
117 PMControl = 0x30, // unused (Power Mgmt Control/Status)
118 rsv2 = 0x34, // reserved
121 StationControl = 0x40,
123 GIoCR = 0x48, // unused (GMAC IO Compensation)
124 GIoCtrl = 0x4c, // unused (GMAC IO Control)
126 TxLimit = 0x54, // unused (Tx MAC Timer/TryLimit)
127 RGDelay = 0x58, // unused (RGMII Tx Internal Delay)
128 rsv3 = 0x5c, // reserved
132 // Undocumented = 0x6c,
134 RxWolData = 0x74, // unused (Rx WOL Data Access)
135 RxMPSControl = 0x78, // unused (Rx MPS Control)
136 rsv4 = 0x7c, // reserved
139 enum sis190_register_content {
141 SoftInt = 0x40000000, // unused
142 Timeup = 0x20000000, // unused
143 PauseFrame = 0x00080000, // unused
144 MagicPacket = 0x00040000, // unused
145 WakeupFrame = 0x00020000, // unused
146 LinkChange = 0x00010000,
147 RxQEmpty = 0x00000080,
149 TxQ1Empty = 0x00000020, // unused
150 TxQ1Int = 0x00000010,
151 TxQ0Empty = 0x00000008, // unused
152 TxQ0Int = 0x00000004,
158 CmdRxEnb = 0x08, // unused
160 RxBufEmpty = 0x01, // unused
163 Cfg9346_Lock = 0x00, // unused
164 Cfg9346_Unlock = 0xc0, // unused
167 AcceptErr = 0x20, // unused
168 AcceptRunt = 0x10, // unused
169 AcceptBroadcast = 0x0800,
170 AcceptMulticast = 0x0400,
171 AcceptMyPhys = 0x0200,
172 AcceptAllPhys = 0x0100,
176 RxCfgDMAShift = 8, // 0x1a in RxControl ?
179 TxInterFrameGapShift = 24,
180 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
182 LinkStatus = 0x02, // unused
183 FullDup = 0x01, // unused
186 TBILinkOK = 0x02000000, // unused
203 enum _DescStatusBit {
205 OWNbit = 0x80000000, // RXOWN/TXOWN
206 INTbit = 0x40000000, // RXINT/TXINT
207 CRCbit = 0x00020000, // CRCOFF/CRCEN
208 PADbit = 0x00010000, // PREADD/PADEN
210 RingEnd = 0x80000000,
212 LSEN = 0x08000000, // TSO ? -- FR
239 RxDescCountMask = 0x7f000000, // multi-desc pkt when > 1 ? -- FR
248 RxSizeMask = 0x0000ffff
250 * The asic could apparently do vlan, TSO, jumbo (sis191 only) and
251 * provide two (unused with Linux) Tx queues. No publically
252 * available documentation alas.
256 enum sis190_eeprom_access_register_bits {
257 EECS = 0x00000001, // unused
258 EECLK = 0x00000002, // unused
259 EEDO = 0x00000008, // unused
260 EEDI = 0x00000004, // unused
263 EEWOP = 0x00000100 // unused
266 /* EEPROM Addresses */
267 enum sis190_eeprom_address {
268 EEPROMSignature = 0x00,
269 EEPROMCLK = 0x01, // unused
274 enum sis190_feature {
280 struct sis190_private {
281 void __iomem *mmio_addr;
282 struct pci_dev *pci_dev;
283 struct net_device_stats stats;
292 struct RxDesc *RxDescRing;
293 struct TxDesc *TxDescRing;
294 struct sk_buff *Rx_skbuff[NUM_RX_DESC];
295 struct sk_buff *Tx_skbuff[NUM_TX_DESC];
296 struct work_struct phy_task;
297 struct timer_list timer;
299 struct mii_if_info mii_if;
300 struct list_head first_phy;
305 struct list_head list;
312 enum sis190_phy_type {
319 static struct mii_chip_info {
324 } mii_chip_table[] = {
325 { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 },
326 { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN, 0 },
327 { "Marvell PHY 88E1111", { 0x0141, 0x0cc0 }, LAN, F_PHY_88E1111 },
328 { "Realtek PHY RTL8201", { 0x0000, 0x8200 }, LAN, 0 },
332 static const struct {
334 } sis_chip_info[] = {
335 { "SiS 190 PCI Fast Ethernet adapter" },
336 { "SiS 191 PCI Gigabit Ethernet adapter" },
339 static struct pci_device_id sis190_pci_tbl[] __devinitdata = {
340 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
341 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
345 MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
347 static int rx_copybreak = 200;
353 MODULE_DESCRIPTION("SiS sis190 Gigabit Ethernet driver");
354 module_param(rx_copybreak, int, 0);
355 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
356 module_param_named(debug, debug.msg_enable, int, 0);
357 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
358 MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
359 MODULE_VERSION(DRV_VERSION);
360 MODULE_LICENSE("GPL");
362 static const u32 sis190_intr_mask =
363 RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt | LinkChange;
366 * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
367 * The chips use a 64 element hash table based on the Ethernet CRC.
369 static const int multicast_filter_limit = 32;
371 static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
375 SIS_W32(GMIIControl, ctl);
379 for (i = 0; i < 100; i++) {
380 if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
386 printk(KERN_ERR PFX "PHY command failed !\n");
389 static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
391 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
392 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) |
393 (((u32) val) << EhnMIIdataShift));
396 static int mdio_read(void __iomem *ioaddr, int phy_id, int reg)
398 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
399 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift));
401 return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
404 static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
406 struct sis190_private *tp = netdev_priv(dev);
408 mdio_write(tp->mmio_addr, phy_id, reg, val);
411 static int __mdio_read(struct net_device *dev, int phy_id, int reg)
413 struct sis190_private *tp = netdev_priv(dev);
415 return mdio_read(tp->mmio_addr, phy_id, reg);
418 static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg)
420 mdio_read(ioaddr, phy_id, reg);
421 return mdio_read(ioaddr, phy_id, reg);
424 static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
429 if (!(SIS_R32(ROMControl) & 0x0002))
432 SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
434 for (i = 0; i < 200; i++) {
435 if (!(SIS_R32(ROMInterface) & EEREQ)) {
436 data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
445 static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
447 SIS_W32(IntrMask, 0x00);
448 SIS_W32(IntrStatus, 0xffffffff);
452 static void sis190_asic_down(void __iomem *ioaddr)
454 /* Stop the chip's Tx and Rx DMA processes. */
456 SIS_W32(TxControl, 0x1a00);
457 SIS_W32(RxControl, 0x1a00);
459 sis190_irq_mask_and_ack(ioaddr);
462 static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
464 desc->size |= cpu_to_le32(RingEnd);
467 static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
469 u32 eor = le32_to_cpu(desc->size) & RingEnd;
472 desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor);
474 desc->status = cpu_to_le32(OWNbit | INTbit);
477 static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
480 desc->addr = cpu_to_le32(mapping);
481 sis190_give_to_asic(desc, rx_buf_sz);
484 static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
487 desc->addr = 0xdeadbeef;
488 desc->size &= cpu_to_le32(RingEnd);
493 static int sis190_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
494 struct RxDesc *desc, u32 rx_buf_sz)
500 skb = dev_alloc_skb(rx_buf_sz);
506 mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
509 sis190_map_to_asic(desc, mapping, rx_buf_sz);
515 sis190_make_unusable_by_asic(desc);
519 static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
524 for (cur = start; cur < end; cur++) {
525 int ret, i = cur % NUM_RX_DESC;
527 if (tp->Rx_skbuff[i])
530 ret = sis190_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
531 tp->RxDescRing + i, tp->rx_buf_sz);
538 static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
539 struct RxDesc *desc, int rx_buf_sz)
543 if (pkt_size < rx_copybreak) {
546 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
548 skb_reserve(skb, NET_IP_ALIGN);
549 eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
551 sis190_give_to_asic(desc, rx_buf_sz);
558 static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
560 #define ErrMask (OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT)
562 if ((status & CRCOK) && !(status & ErrMask))
565 if (!(status & CRCOK))
566 stats->rx_crc_errors++;
567 else if (status & OVRUN)
568 stats->rx_over_errors++;
569 else if (status & (SHORT | LIMIT))
570 stats->rx_length_errors++;
571 else if (status & (MIIER | NIBON | COLON))
572 stats->rx_frame_errors++;
578 static int sis190_rx_interrupt(struct net_device *dev,
579 struct sis190_private *tp, void __iomem *ioaddr)
581 struct net_device_stats *stats = &tp->stats;
582 u32 rx_left, cur_rx = tp->cur_rx;
585 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
586 rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
588 for (; rx_left > 0; rx_left--, cur_rx++) {
589 unsigned int entry = cur_rx % NUM_RX_DESC;
590 struct RxDesc *desc = tp->RxDescRing + entry;
593 if (desc->status & OWNbit)
596 status = le32_to_cpu(desc->PSize);
598 // net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name,
601 if (sis190_rx_pkt_err(status, stats) < 0)
602 sis190_give_to_asic(desc, tp->rx_buf_sz);
604 struct sk_buff *skb = tp->Rx_skbuff[entry];
605 int pkt_size = (status & RxSizeMask) - 4;
606 void (*pci_action)(struct pci_dev *, dma_addr_t,
607 size_t, int) = pci_dma_sync_single_for_device;
609 if (unlikely(pkt_size > tp->rx_buf_sz)) {
610 net_intr(tp, KERN_INFO
611 "%s: (frag) status = %08x.\n",
614 stats->rx_length_errors++;
615 sis190_give_to_asic(desc, tp->rx_buf_sz);
619 pci_dma_sync_single_for_cpu(tp->pci_dev,
620 le32_to_cpu(desc->addr), tp->rx_buf_sz,
623 if (sis190_try_rx_copy(&skb, pkt_size, desc,
625 pci_action = pci_unmap_single;
626 tp->Rx_skbuff[entry] = NULL;
627 sis190_make_unusable_by_asic(desc);
630 pci_action(tp->pci_dev, le32_to_cpu(desc->addr),
631 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
634 skb_put(skb, pkt_size);
635 skb->protocol = eth_type_trans(skb, dev);
639 dev->last_rx = jiffies;
641 stats->rx_bytes += pkt_size;
642 if ((status & BCAST) == MCAST)
646 count = cur_rx - tp->cur_rx;
649 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
650 if (!delta && count && netif_msg_intr(tp))
651 printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name);
652 tp->dirty_rx += delta;
654 if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp))
655 printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name);
660 static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
665 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
667 pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
669 memset(desc, 0x00, sizeof(*desc));
672 static void sis190_tx_interrupt(struct net_device *dev,
673 struct sis190_private *tp, void __iomem *ioaddr)
675 u32 pending, dirty_tx = tp->dirty_tx;
677 * It would not be needed if queueing was allowed to be enabled
678 * again too early (hint: think preempt and unclocked smp systems).
680 unsigned int queue_stopped;
683 pending = tp->cur_tx - dirty_tx;
684 queue_stopped = (pending == NUM_TX_DESC);
686 for (; pending; pending--, dirty_tx++) {
687 unsigned int entry = dirty_tx % NUM_TX_DESC;
688 struct TxDesc *txd = tp->TxDescRing + entry;
691 if (le32_to_cpu(txd->status) & OWNbit)
694 skb = tp->Tx_skbuff[entry];
696 tp->stats.tx_packets++;
697 tp->stats.tx_bytes += skb->len;
699 sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
700 tp->Tx_skbuff[entry] = NULL;
701 dev_kfree_skb_irq(skb);
704 if (tp->dirty_tx != dirty_tx) {
705 tp->dirty_tx = dirty_tx;
708 netif_wake_queue(dev);
713 * The interrupt handler does all of the Rx thread work and cleans up after
716 static irqreturn_t sis190_interrupt(int irq, void *__dev, struct pt_regs *regs)
718 struct net_device *dev = __dev;
719 struct sis190_private *tp = netdev_priv(dev);
720 void __iomem *ioaddr = tp->mmio_addr;
721 unsigned int handled = 0;
724 status = SIS_R32(IntrStatus);
726 if ((status == 0xffffffff) || !status)
731 if (unlikely(!netif_running(dev))) {
732 sis190_asic_down(ioaddr);
736 SIS_W32(IntrStatus, status);
738 // net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status);
740 if (status & LinkChange) {
741 net_intr(tp, KERN_INFO "%s: link change.\n", dev->name);
742 schedule_work(&tp->phy_task);
746 sis190_rx_interrupt(dev, tp, ioaddr);
748 if (status & TxQ0Int)
749 sis190_tx_interrupt(dev, tp, ioaddr);
751 return IRQ_RETVAL(handled);
754 #ifdef CONFIG_NET_POLL_CONTROLLER
755 static void sis190_netpoll(struct net_device *dev)
757 struct sis190_private *tp = netdev_priv(dev);
758 struct pci_dev *pdev = tp->pci_dev;
760 disable_irq(pdev->irq);
761 sis190_interrupt(pdev->irq, dev, NULL);
762 enable_irq(pdev->irq);
766 static void sis190_free_rx_skb(struct sis190_private *tp,
767 struct sk_buff **sk_buff, struct RxDesc *desc)
769 struct pci_dev *pdev = tp->pci_dev;
771 pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
773 dev_kfree_skb(*sk_buff);
775 sis190_make_unusable_by_asic(desc);
778 static void sis190_rx_clear(struct sis190_private *tp)
782 for (i = 0; i < NUM_RX_DESC; i++) {
783 if (!tp->Rx_skbuff[i])
785 sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
789 static void sis190_init_ring_indexes(struct sis190_private *tp)
791 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
794 static int sis190_init_ring(struct net_device *dev)
796 struct sis190_private *tp = netdev_priv(dev);
798 sis190_init_ring_indexes(tp);
800 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
801 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
803 if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
806 sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
815 static void sis190_set_rx_mode(struct net_device *dev)
817 struct sis190_private *tp = netdev_priv(dev);
818 void __iomem *ioaddr = tp->mmio_addr;
820 u32 mc_filter[2]; /* Multicast hash filter */
823 if (dev->flags & IFF_PROMISC) {
824 /* Unconditionally log net taps. */
825 net_drv(tp, KERN_NOTICE "%s: Promiscuous mode enabled.\n",
828 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
830 mc_filter[1] = mc_filter[0] = 0xffffffff;
831 } else if ((dev->mc_count > multicast_filter_limit) ||
832 (dev->flags & IFF_ALLMULTI)) {
833 /* Too many to filter perfectly -- accept all multicasts. */
834 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
835 mc_filter[1] = mc_filter[0] = 0xffffffff;
837 struct dev_mc_list *mclist;
840 rx_mode = AcceptBroadcast | AcceptMyPhys;
841 mc_filter[1] = mc_filter[0] = 0;
842 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
843 i++, mclist = mclist->next) {
845 ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
846 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
847 rx_mode |= AcceptMulticast;
851 spin_lock_irqsave(&tp->lock, flags);
853 SIS_W16(RxMacControl, rx_mode | 0x2);
854 SIS_W32(RxHashTable, mc_filter[0]);
855 SIS_W32(RxHashTable + 4, mc_filter[1]);
857 spin_unlock_irqrestore(&tp->lock, flags);
860 static void sis190_soft_reset(void __iomem *ioaddr)
862 SIS_W32(IntrControl, 0x8000);
865 SIS_W32(IntrControl, 0x0);
866 sis190_asic_down(ioaddr);
870 static void sis190_hw_start(struct net_device *dev)
872 struct sis190_private *tp = netdev_priv(dev);
873 void __iomem *ioaddr = tp->mmio_addr;
875 sis190_soft_reset(ioaddr);
877 SIS_W32(TxDescStartAddr, tp->tx_dma);
878 SIS_W32(RxDescStartAddr, tp->rx_dma);
880 SIS_W32(IntrStatus, 0xffffffff);
881 SIS_W32(IntrMask, 0x0);
882 SIS_W32(GMIIControl, 0x0);
883 SIS_W32(TxMacControl, 0x60);
884 SIS_W16(RxMacControl, 0x02);
885 SIS_W32(RxHashTable, 0x0);
887 SIS_W32(RxWolCtrl, 0x0);
888 SIS_W32(RxWolData, 0x0);
892 sis190_set_rx_mode(dev);
894 /* Enable all known interrupts by setting the interrupt mask. */
895 SIS_W32(IntrMask, sis190_intr_mask);
897 SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
898 SIS_W32(RxControl, 0x1a1d);
900 netif_start_queue(dev);
903 static void sis190_phy_task(void * data)
905 struct net_device *dev = data;
906 struct sis190_private *tp = netdev_priv(dev);
907 void __iomem *ioaddr = tp->mmio_addr;
908 int phy_id = tp->mii_if.phy_id;
913 val = mdio_read(ioaddr, phy_id, MII_BMCR);
914 if (val & BMCR_RESET) {
915 // FIXME: needlessly high ? -- FR 02/07/2005
916 mod_timer(&tp->timer, jiffies + HZ/10);
917 } else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) &
918 BMSR_ANEGCOMPLETE)) {
919 net_link(tp, KERN_WARNING "%s: PHY reset until link up.\n",
921 netif_carrier_off(dev);
922 mdio_write(ioaddr, phy_id, MII_BMCR, val | BMCR_RESET);
923 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
931 { LPA_1000XFULL | LPA_SLCT, 0x07000c00 | 0x00001000,
932 "1000 Mbps Full Duplex" },
933 { LPA_1000XHALF | LPA_SLCT, 0x07000c00,
934 "1000 Mbps Half Duplex" },
935 { LPA_100FULL, 0x04000800 | 0x00001000,
936 "100 Mbps Full Duplex" },
937 { LPA_100HALF, 0x04000800,
938 "100 Mbps Half Duplex" },
939 { LPA_10FULL, 0x04000400 | 0x00001000,
940 "10 Mbps Full Duplex" },
941 { LPA_10HALF, 0x04000400,
942 "10 Mbps Half Duplex" },
943 { 0, 0x04000400, "unknown" }
947 val = mdio_read(ioaddr, phy_id, 0x1f);
948 net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
950 val = mdio_read(ioaddr, phy_id, MII_LPA);
951 adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
952 net_link(tp, KERN_INFO "%s: mii lpa = %04x adv = %04x.\n",
953 dev->name, val, adv);
957 for (p = reg31; p->val; p++) {
958 if ((val & p->val) == p->val)
962 p->ctl |= SIS_R32(StationControl) & ~0x0f001c00;
964 if ((tp->features & F_HAS_RGMII) &&
965 (tp->features & F_PHY_BCM5461)) {
966 // Set Tx Delay in RGMII mode.
967 mdio_write(ioaddr, phy_id, 0x18, 0xf1c7);
969 mdio_write(ioaddr, phy_id, 0x1c, 0x8c00);
970 p->ctl |= 0x03000000;
973 SIS_W32(StationControl, p->ctl);
975 if (tp->features & F_HAS_RGMII) {
976 SIS_W32(RGDelay, 0x0441);
977 SIS_W32(RGDelay, 0x0440);
980 net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name,
982 netif_carrier_on(dev);
988 static void sis190_phy_timer(unsigned long __opaque)
990 struct net_device *dev = (struct net_device *)__opaque;
991 struct sis190_private *tp = netdev_priv(dev);
993 if (likely(netif_running(dev)))
994 schedule_work(&tp->phy_task);
997 static inline void sis190_delete_timer(struct net_device *dev)
999 struct sis190_private *tp = netdev_priv(dev);
1001 del_timer_sync(&tp->timer);
1004 static inline void sis190_request_timer(struct net_device *dev)
1006 struct sis190_private *tp = netdev_priv(dev);
1007 struct timer_list *timer = &tp->timer;
1010 timer->expires = jiffies + SIS190_PHY_TIMEOUT;
1011 timer->data = (unsigned long)dev;
1012 timer->function = sis190_phy_timer;
1016 static void sis190_set_rxbufsize(struct sis190_private *tp,
1017 struct net_device *dev)
1019 unsigned int mtu = dev->mtu;
1021 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
1022 /* RxDesc->size has a licence to kill the lower bits */
1023 if (tp->rx_buf_sz & 0x07) {
1025 tp->rx_buf_sz &= RX_BUF_MASK;
1029 static int sis190_open(struct net_device *dev)
1031 struct sis190_private *tp = netdev_priv(dev);
1032 struct pci_dev *pdev = tp->pci_dev;
1035 sis190_set_rxbufsize(tp, dev);
1038 * Rx and Tx descriptors need 256 bytes alignment.
1039 * pci_alloc_consistent() guarantees a stronger alignment.
1041 tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
1042 if (!tp->TxDescRing)
1045 tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
1046 if (!tp->RxDescRing)
1049 rc = sis190_init_ring(dev);
1053 INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
1055 sis190_request_timer(dev);
1057 rc = request_irq(dev->irq, sis190_interrupt, SA_SHIRQ, dev->name, dev);
1059 goto err_release_timer_2;
1061 sis190_hw_start(dev);
1065 err_release_timer_2:
1066 sis190_delete_timer(dev);
1067 sis190_rx_clear(tp);
1069 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
1072 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
1077 static void sis190_tx_clear(struct sis190_private *tp)
1081 for (i = 0; i < NUM_TX_DESC; i++) {
1082 struct sk_buff *skb = tp->Tx_skbuff[i];
1087 sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
1088 tp->Tx_skbuff[i] = NULL;
1091 tp->stats.tx_dropped++;
1093 tp->cur_tx = tp->dirty_tx = 0;
1096 static void sis190_down(struct net_device *dev)
1098 struct sis190_private *tp = netdev_priv(dev);
1099 void __iomem *ioaddr = tp->mmio_addr;
1100 unsigned int poll_locked = 0;
1102 sis190_delete_timer(dev);
1104 netif_stop_queue(dev);
1106 flush_scheduled_work();
1109 spin_lock_irq(&tp->lock);
1111 sis190_asic_down(ioaddr);
1113 spin_unlock_irq(&tp->lock);
1115 synchronize_irq(dev->irq);
1118 netif_poll_disable(dev);
1122 synchronize_sched();
1124 } while (SIS_R32(IntrMask));
1126 sis190_tx_clear(tp);
1127 sis190_rx_clear(tp);
1130 static int sis190_close(struct net_device *dev)
1132 struct sis190_private *tp = netdev_priv(dev);
1133 struct pci_dev *pdev = tp->pci_dev;
1137 free_irq(dev->irq, dev);
1139 netif_poll_enable(dev);
1141 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1142 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1144 tp->TxDescRing = NULL;
1145 tp->RxDescRing = NULL;
1150 static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev)
1152 struct sis190_private *tp = netdev_priv(dev);
1153 void __iomem *ioaddr = tp->mmio_addr;
1154 u32 len, entry, dirty_tx;
1155 struct TxDesc *desc;
1158 if (unlikely(skb->len < ETH_ZLEN)) {
1159 skb = skb_padto(skb, ETH_ZLEN);
1161 tp->stats.tx_dropped++;
1169 entry = tp->cur_tx % NUM_TX_DESC;
1170 desc = tp->TxDescRing + entry;
1172 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1173 netif_stop_queue(dev);
1174 net_tx_err(tp, KERN_ERR PFX
1175 "%s: BUG! Tx Ring full when queue awake!\n",
1177 return NETDEV_TX_BUSY;
1180 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1182 tp->Tx_skbuff[entry] = skb;
1184 desc->PSize = cpu_to_le32(len);
1185 desc->addr = cpu_to_le32(mapping);
1187 desc->size = cpu_to_le32(len);
1188 if (entry == (NUM_TX_DESC - 1))
1189 desc->size |= cpu_to_le32(RingEnd);
1193 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1199 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1201 dev->trans_start = jiffies;
1203 dirty_tx = tp->dirty_tx;
1204 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1205 netif_stop_queue(dev);
1207 if (dirty_tx != tp->dirty_tx)
1208 netif_wake_queue(dev);
1211 return NETDEV_TX_OK;
1214 static struct net_device_stats *sis190_get_stats(struct net_device *dev)
1216 struct sis190_private *tp = netdev_priv(dev);
1221 static void sis190_free_phy(struct list_head *first_phy)
1223 struct sis190_phy *cur, *next;
1225 list_for_each_entry_safe(cur, next, first_phy, list) {
1231 * sis190_default_phy - Select default PHY for sis190 mac.
1232 * @dev: the net device to probe for
1234 * Select first detected PHY with link as default.
1235 * If no one is link on, select PHY whose types is HOME as default.
1236 * If HOME doesn't exist, select LAN.
1238 static u16 sis190_default_phy(struct net_device *dev)
1240 struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan;
1241 struct sis190_private *tp = netdev_priv(dev);
1242 struct mii_if_info *mii_if = &tp->mii_if;
1243 void __iomem *ioaddr = tp->mmio_addr;
1246 phy_home = phy_default = phy_lan = NULL;
1248 list_for_each_entry(phy, &tp->first_phy, list) {
1249 status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR);
1251 // Link ON & Not select default PHY & not ghost PHY.
1252 if ((status & BMSR_LSTATUS) &&
1254 (phy->type != UNKNOWN)) {
1257 status = mdio_read(ioaddr, phy->phy_id, MII_BMCR);
1258 mdio_write(ioaddr, phy->phy_id, MII_BMCR,
1259 status | BMCR_ANENABLE | BMCR_ISOLATE);
1260 if (phy->type == HOME)
1262 else if (phy->type == LAN)
1269 phy_default = phy_home;
1271 phy_default = phy_lan;
1273 phy_default = list_entry(&tp->first_phy,
1274 struct sis190_phy, list);
1277 if (mii_if->phy_id != phy_default->phy_id) {
1278 mii_if->phy_id = phy_default->phy_id;
1279 net_probe(tp, KERN_INFO
1280 "%s: Using transceiver at address %d as default.\n",
1281 pci_name(tp->pci_dev), mii_if->phy_id);
1284 status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR);
1285 status &= (~BMCR_ISOLATE);
1287 mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status);
1288 status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR);
1293 static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
1294 struct sis190_phy *phy, unsigned int phy_id,
1297 void __iomem *ioaddr = tp->mmio_addr;
1298 struct mii_chip_info *p;
1300 INIT_LIST_HEAD(&phy->list);
1301 phy->status = mii_status;
1302 phy->phy_id = phy_id;
1304 phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1);
1305 phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2);
1307 for (p = mii_chip_table; p->type; p++) {
1308 if ((p->id[0] == phy->id[0]) &&
1309 (p->id[1] == (phy->id[1] & 0xfff0))) {
1315 phy->type = (p->type == MIX) ?
1316 ((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
1317 LAN : HOME) : p->type;
1318 tp->features |= p->feature;
1320 phy->type = UNKNOWN;
1322 net_probe(tp, KERN_INFO "%s: %s transceiver at address %d.\n",
1323 pci_name(tp->pci_dev),
1324 (phy->type == UNKNOWN) ? "Unknown PHY" : p->name, phy_id);
1327 static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp)
1329 if (tp->features & F_PHY_88E1111) {
1330 void __iomem *ioaddr = tp->mmio_addr;
1331 int phy_id = tp->mii_if.phy_id;
1337 p = (tp->features & F_HAS_RGMII) ? reg[0] : reg[1];
1339 mdio_write(ioaddr, phy_id, 0x1b, p[0]);
1341 mdio_write(ioaddr, phy_id, 0x14, p[1]);
1347 * sis190_mii_probe - Probe MII PHY for sis190
1348 * @dev: the net device to probe for
1350 * Search for total of 32 possible mii phy addresses.
1351 * Identify and set current phy if found one,
1352 * return error if it failed to found.
1354 static int __devinit sis190_mii_probe(struct net_device *dev)
1356 struct sis190_private *tp = netdev_priv(dev);
1357 struct mii_if_info *mii_if = &tp->mii_if;
1358 void __iomem *ioaddr = tp->mmio_addr;
1362 INIT_LIST_HEAD(&tp->first_phy);
1364 for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1365 struct sis190_phy *phy;
1368 status = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
1370 // Try next mii if the current one is not accessible.
1371 if (status == 0xffff || status == 0x0000)
1374 phy = kmalloc(sizeof(*phy), GFP_KERNEL);
1376 sis190_free_phy(&tp->first_phy);
1381 sis190_init_phy(dev, tp, phy, phy_id, status);
1383 list_add(&tp->first_phy, &phy->list);
1386 if (list_empty(&tp->first_phy)) {
1387 net_probe(tp, KERN_INFO "%s: No MII transceivers found!\n",
1388 pci_name(tp->pci_dev));
1393 /* Select default PHY for mac */
1394 sis190_default_phy(dev);
1396 sis190_mii_probe_88e1111_fixup(tp);
1399 mii_if->mdio_read = __mdio_read;
1400 mii_if->mdio_write = __mdio_write;
1401 mii_if->phy_id_mask = PHY_ID_ANY;
1402 mii_if->reg_num_mask = MII_REG_ANY;
1407 static void __devexit sis190_mii_remove(struct net_device *dev)
1409 struct sis190_private *tp = netdev_priv(dev);
1411 sis190_free_phy(&tp->first_phy);
1414 static void sis190_release_board(struct pci_dev *pdev)
1416 struct net_device *dev = pci_get_drvdata(pdev);
1417 struct sis190_private *tp = netdev_priv(dev);
1419 iounmap(tp->mmio_addr);
1420 pci_release_regions(pdev);
1421 pci_disable_device(pdev);
1425 static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1427 struct sis190_private *tp;
1428 struct net_device *dev;
1429 void __iomem *ioaddr;
1432 dev = alloc_etherdev(sizeof(*tp));
1434 net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n");
1439 SET_MODULE_OWNER(dev);
1440 SET_NETDEV_DEV(dev, &pdev->dev);
1442 tp = netdev_priv(dev);
1443 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1445 rc = pci_enable_device(pdev);
1447 net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev));
1448 goto err_free_dev_1;
1453 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1454 net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n",
1456 goto err_pci_disable_2;
1458 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1459 net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n",
1461 goto err_pci_disable_2;
1464 rc = pci_request_regions(pdev, DRV_NAME);
1466 net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n",
1468 goto err_pci_disable_2;
1471 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1473 net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n",
1475 goto err_free_res_3;
1478 pci_set_master(pdev);
1480 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1482 net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n",
1485 goto err_free_res_3;
1489 tp->mmio_addr = ioaddr;
1491 sis190_irq_mask_and_ack(ioaddr);
1493 sis190_soft_reset(ioaddr);
1498 pci_release_regions(pdev);
1500 pci_disable_device(pdev);
1508 static void sis190_tx_timeout(struct net_device *dev)
1510 struct sis190_private *tp = netdev_priv(dev);
1511 void __iomem *ioaddr = tp->mmio_addr;
1514 /* Disable Tx, if not already */
1515 tmp8 = SIS_R8(TxControl);
1516 if (tmp8 & CmdTxEnb)
1517 SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1520 net_tx_err(tp, KERN_INFO "%s: Transmit timeout, status %08x %08x.\n",
1521 dev->name, SIS_R32(TxControl), SIS_R32(TxSts));
1523 /* Disable interrupts by clearing the interrupt mask. */
1524 SIS_W32(IntrMask, 0x0000);
1526 /* Stop a shared interrupt from scavenging while we are. */
1527 spin_lock_irq(&tp->lock);
1528 sis190_tx_clear(tp);
1529 spin_unlock_irq(&tp->lock);
1531 /* ...and finally, reset everything. */
1532 sis190_hw_start(dev);
1534 netif_wake_queue(dev);
1537 static void sis190_set_rgmii(struct sis190_private *tp, u8 reg)
1539 tp->features |= (reg & 0x80) ? F_HAS_RGMII : 0;
1542 static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
1543 struct net_device *dev)
1545 struct sis190_private *tp = netdev_priv(dev);
1546 void __iomem *ioaddr = tp->mmio_addr;
1550 net_probe(tp, KERN_INFO "%s: Read MAC address from EEPROM\n",
1553 /* Check to see if there is a sane EEPROM */
1554 sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
1556 if ((sig == 0xffff) || (sig == 0x0000)) {
1557 net_probe(tp, KERN_INFO "%s: Error EEPROM read %x.\n",
1558 pci_name(pdev), sig);
1562 /* Get MAC address from EEPROM */
1563 for (i = 0; i < MAC_ADDR_LEN / 2; i++) {
1564 __le16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
1566 ((u16 *)dev->dev_addr)[0] = le16_to_cpu(w);
1569 sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo));
1575 * sis190_get_mac_addr_from_apc - Get MAC address for SiS965 model
1577 * @dev: network device to get address for
1579 * SiS965 model, use APC CMOS RAM to store MAC address.
1580 * APC CMOS RAM is accessed through ISA bridge.
1581 * MAC address is read into @net_dev->dev_addr.
1583 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
1584 struct net_device *dev)
1586 struct sis190_private *tp = netdev_priv(dev);
1587 struct pci_dev *isa_bridge;
1591 net_probe(tp, KERN_INFO "%s: Read MAC address from APC.\n",
1594 isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, 0x0965, NULL);
1596 net_probe(tp, KERN_INFO "%s: Can not find ISA bridge.\n",
1601 /* Enable port 78h & 79h to access APC Registers. */
1602 pci_read_config_byte(isa_bridge, 0x48, &tmp8);
1603 reg = (tmp8 & ~0x02);
1604 pci_write_config_byte(isa_bridge, 0x48, reg);
1606 pci_read_config_byte(isa_bridge, 0x48, ®);
1608 for (i = 0; i < MAC_ADDR_LEN; i++) {
1609 outb(0x9 + i, 0x78);
1610 dev->dev_addr[i] = inb(0x79);
1616 sis190_set_rgmii(tp, reg);
1618 /* Restore the value to ISA Bridge */
1619 pci_write_config_byte(isa_bridge, 0x48, tmp8);
1620 pci_dev_put(isa_bridge);
1626 * sis190_init_rxfilter - Initialize the Rx filter
1627 * @dev: network device to initialize
1629 * Set receive filter address to our MAC address
1630 * and enable packet filtering.
1632 static inline void sis190_init_rxfilter(struct net_device *dev)
1634 struct sis190_private *tp = netdev_priv(dev);
1635 void __iomem *ioaddr = tp->mmio_addr;
1639 ctl = SIS_R16(RxMacControl);
1641 * Disable packet filtering before setting filter.
1642 * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits
1643 * only and followed by RxMacAddr (6 bytes). Strange. -- FR
1645 SIS_W16(RxMacControl, ctl & ~0x0f00);
1647 for (i = 0; i < MAC_ADDR_LEN; i++)
1648 SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
1650 SIS_W16(RxMacControl, ctl);
1654 static int sis190_get_mac_addr(struct pci_dev *pdev, struct net_device *dev)
1658 pci_read_config_byte(pdev, 0x73, &from);
1660 return (from & 0x00000001) ?
1661 sis190_get_mac_addr_from_apc(pdev, dev) :
1662 sis190_get_mac_addr_from_eeprom(pdev, dev);
1665 static void sis190_set_speed_auto(struct net_device *dev)
1667 struct sis190_private *tp = netdev_priv(dev);
1668 void __iomem *ioaddr = tp->mmio_addr;
1669 int phy_id = tp->mii_if.phy_id;
1672 net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name);
1674 val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
1676 // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1678 mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1679 ADVERTISE_100FULL | ADVERTISE_10FULL |
1680 ADVERTISE_100HALF | ADVERTISE_10HALF);
1682 // Enable 1000 Full Mode.
1683 mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL);
1685 // Enable auto-negotiation and restart auto-negotiation.
1686 mdio_write(ioaddr, phy_id, MII_BMCR,
1687 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1690 static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1692 struct sis190_private *tp = netdev_priv(dev);
1694 return mii_ethtool_gset(&tp->mii_if, cmd);
1697 static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1699 struct sis190_private *tp = netdev_priv(dev);
1701 return mii_ethtool_sset(&tp->mii_if, cmd);
1704 static void sis190_get_drvinfo(struct net_device *dev,
1705 struct ethtool_drvinfo *info)
1707 struct sis190_private *tp = netdev_priv(dev);
1709 strcpy(info->driver, DRV_NAME);
1710 strcpy(info->version, DRV_VERSION);
1711 strcpy(info->bus_info, pci_name(tp->pci_dev));
1714 static int sis190_get_regs_len(struct net_device *dev)
1716 return SIS190_REGS_SIZE;
1719 static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1722 struct sis190_private *tp = netdev_priv(dev);
1723 unsigned long flags;
1725 if (regs->len > SIS190_REGS_SIZE)
1726 regs->len = SIS190_REGS_SIZE;
1728 spin_lock_irqsave(&tp->lock, flags);
1729 memcpy_fromio(p, tp->mmio_addr, regs->len);
1730 spin_unlock_irqrestore(&tp->lock, flags);
1733 static int sis190_nway_reset(struct net_device *dev)
1735 struct sis190_private *tp = netdev_priv(dev);
1737 return mii_nway_restart(&tp->mii_if);
1740 static u32 sis190_get_msglevel(struct net_device *dev)
1742 struct sis190_private *tp = netdev_priv(dev);
1744 return tp->msg_enable;
1747 static void sis190_set_msglevel(struct net_device *dev, u32 value)
1749 struct sis190_private *tp = netdev_priv(dev);
1751 tp->msg_enable = value;
1754 static struct ethtool_ops sis190_ethtool_ops = {
1755 .get_settings = sis190_get_settings,
1756 .set_settings = sis190_set_settings,
1757 .get_drvinfo = sis190_get_drvinfo,
1758 .get_regs_len = sis190_get_regs_len,
1759 .get_regs = sis190_get_regs,
1760 .get_link = ethtool_op_get_link,
1761 .get_msglevel = sis190_get_msglevel,
1762 .set_msglevel = sis190_set_msglevel,
1763 .nway_reset = sis190_nway_reset,
1766 static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1768 struct sis190_private *tp = netdev_priv(dev);
1770 return !netif_running(dev) ? -EINVAL :
1771 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1774 static int __devinit sis190_init_one(struct pci_dev *pdev,
1775 const struct pci_device_id *ent)
1777 static int printed_version = 0;
1778 struct sis190_private *tp;
1779 struct net_device *dev;
1780 void __iomem *ioaddr;
1783 if (!printed_version) {
1784 net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n");
1785 printed_version = 1;
1788 dev = sis190_init_board(pdev);
1794 pci_set_drvdata(pdev, dev);
1796 tp = netdev_priv(dev);
1797 ioaddr = tp->mmio_addr;
1799 rc = sis190_get_mac_addr(pdev, dev);
1801 goto err_release_board;
1803 sis190_init_rxfilter(dev);
1805 INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
1807 dev->open = sis190_open;
1808 dev->stop = sis190_close;
1809 dev->do_ioctl = sis190_ioctl;
1810 dev->get_stats = sis190_get_stats;
1811 dev->tx_timeout = sis190_tx_timeout;
1812 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1813 dev->hard_start_xmit = sis190_start_xmit;
1814 #ifdef CONFIG_NET_POLL_CONTROLLER
1815 dev->poll_controller = sis190_netpoll;
1817 dev->set_multicast_list = sis190_set_rx_mode;
1818 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1819 dev->irq = pdev->irq;
1820 dev->base_addr = (unsigned long) 0xdead;
1822 spin_lock_init(&tp->lock);
1824 rc = sis190_mii_probe(dev);
1826 goto err_release_board;
1828 rc = register_netdev(dev);
1830 goto err_remove_mii;
1832 net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), "
1833 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
1834 pci_name(pdev), sis_chip_info[ent->driver_data].name,
1836 dev->dev_addr[0], dev->dev_addr[1],
1837 dev->dev_addr[2], dev->dev_addr[3],
1838 dev->dev_addr[4], dev->dev_addr[5]);
1840 net_probe(tp, KERN_INFO "%s: %s mode.\n", dev->name,
1841 (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
1843 netif_carrier_off(dev);
1845 sis190_set_speed_auto(dev);
1850 sis190_mii_remove(dev);
1852 sis190_release_board(pdev);
1856 static void __devexit sis190_remove_one(struct pci_dev *pdev)
1858 struct net_device *dev = pci_get_drvdata(pdev);
1860 sis190_mii_remove(dev);
1861 unregister_netdev(dev);
1862 sis190_release_board(pdev);
1863 pci_set_drvdata(pdev, NULL);
1866 static struct pci_driver sis190_pci_driver = {
1868 .id_table = sis190_pci_tbl,
1869 .probe = sis190_init_one,
1870 .remove = __devexit_p(sis190_remove_one),
1873 static int __init sis190_init_module(void)
1875 return pci_module_init(&sis190_pci_driver);
1878 static void __exit sis190_cleanup_module(void)
1880 pci_unregister_driver(&sis190_pci_driver);
1883 module_init(sis190_init_module);
1884 module_exit(sis190_cleanup_module);