2 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
3 * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
4 * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
6 * Derived from Intel e1000 driver
7 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 * The full GNU General Public License is included in this distribution in the
24 * file called COPYING.
26 * Contact Information:
27 * Xiong Huang <xiong_huang@attansic.com>
28 * Attansic Technology Corp. 3F 147, Xianzheng 9th Road, Zhubei,
29 * Xinzhu 302, TAIWAN, REPUBLIC OF CHINA
31 * Chris Snook <csnook@redhat.com>
32 * Jay Cliburn <jcliburn@gmail.com>
34 * This version is adapted from the Attansic reference driver for
35 * inclusion in the Linux kernel. It is currently under heavy development.
36 * A very incomplete list of things that need to be dealt with:
40 * Add more ethtool functions.
41 * Fix abstruse irq enable/disable condition described here:
42 * http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2
48 * interrupt coalescing
52 #include <asm/atomic.h>
53 #include <asm/byteorder.h>
55 #include <linux/compiler.h>
56 #include <linux/crc32.h>
57 #include <linux/delay.h>
58 #include <linux/dma-mapping.h>
59 #include <linux/etherdevice.h>
60 #include <linux/hardirq.h>
61 #include <linux/if_ether.h>
62 #include <linux/if_vlan.h>
64 #include <linux/interrupt.h>
66 #include <linux/irqflags.h>
67 #include <linux/irqreturn.h>
68 #include <linux/jiffies.h>
69 #include <linux/mii.h>
70 #include <linux/module.h>
71 #include <linux/moduleparam.h>
72 #include <linux/net.h>
73 #include <linux/netdevice.h>
74 #include <linux/pci.h>
75 #include <linux/pci_ids.h>
77 #include <linux/skbuff.h>
78 #include <linux/slab.h>
79 #include <linux/spinlock.h>
80 #include <linux/string.h>
81 #include <linux/tcp.h>
82 #include <linux/timer.h>
83 #include <linux/types.h>
84 #include <linux/workqueue.h>
86 #include <net/checksum.h>
90 /* Temporary hack for merging atl1 and atl2 */
94 * atl1_pci_tbl - PCI Device ID Table
96 static const struct pci_device_id atl1_pci_tbl[] = {
97 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1)},
98 /* required last entry */
101 MODULE_DEVICE_TABLE(pci, atl1_pci_tbl);
104 * atl1_sw_init - Initialize general software structures (struct atl1_adapter)
105 * @adapter: board private structure to initialize
107 * atl1_sw_init initializes the Adapter private data structure.
108 * Fields are initialized based on PCI device information and
109 * OS network device settings (MTU size).
111 static int __devinit atl1_sw_init(struct atl1_adapter *adapter)
113 struct atl1_hw *hw = &adapter->hw;
114 struct net_device *netdev = adapter->netdev;
116 hw->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
117 hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
120 adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7;
121 adapter->ict = 50000; /* 100ms */
122 adapter->link_speed = SPEED_0; /* hardware init */
123 adapter->link_duplex = FULL_DUPLEX;
125 hw->phy_configured = false;
126 hw->preamble_len = 7;
136 hw->rfd_fetch_gap = 1;
137 hw->rx_jumbo_th = adapter->rx_buffer_len / 8;
138 hw->rx_jumbo_lkah = 1;
139 hw->rrd_ret_timer = 16;
141 hw->tpd_fetch_th = 16;
142 hw->txf_burst = 0x100;
143 hw->tx_jumbo_task_th = (hw->max_frame_size + 7) >> 3;
144 hw->tpd_fetch_gap = 1;
145 hw->rcb_value = atl1_rcb_64;
146 hw->dma_ord = atl1_dma_ord_enh;
147 hw->dmar_block = atl1_dma_req_256;
148 hw->dmaw_block = atl1_dma_req_256;
151 hw->cmb_rx_timer = 1; /* about 2us */
152 hw->cmb_tx_timer = 1; /* about 2us */
153 hw->smb_timer = 100000; /* about 200ms */
155 spin_lock_init(&adapter->lock);
156 spin_lock_init(&adapter->mb_lock);
161 static int mdio_read(struct net_device *netdev, int phy_id, int reg_num)
163 struct atl1_adapter *adapter = netdev_priv(netdev);
166 atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result);
171 static void mdio_write(struct net_device *netdev, int phy_id, int reg_num,
174 struct atl1_adapter *adapter = netdev_priv(netdev);
176 atl1_write_phy_reg(&adapter->hw, reg_num, val);
185 static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
187 struct atl1_adapter *adapter = netdev_priv(netdev);
191 if (!netif_running(netdev))
194 spin_lock_irqsave(&adapter->lock, flags);
195 retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
196 spin_unlock_irqrestore(&adapter->lock, flags);
202 * atl1_setup_mem_resources - allocate Tx / RX descriptor resources
203 * @adapter: board private structure
205 * Return 0 on success, negative on failure
207 s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
209 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
210 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
211 struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
212 struct atl1_ring_header *ring_header = &adapter->ring_header;
213 struct pci_dev *pdev = adapter->pdev;
217 size = sizeof(struct atl1_buffer) * (tpd_ring->count + rfd_ring->count);
218 tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL);
219 if (unlikely(!tpd_ring->buffer_info)) {
220 dev_err(&pdev->dev, "kzalloc failed , size = D%d\n", size);
223 rfd_ring->buffer_info =
224 (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count);
227 * real ring DMA buffer
228 * each ring/block may need up to 8 bytes for alignment, hence the
229 * additional 40 bytes tacked onto the end.
231 ring_header->size = size =
232 sizeof(struct tx_packet_desc) * tpd_ring->count
233 + sizeof(struct rx_free_desc) * rfd_ring->count
234 + sizeof(struct rx_return_desc) * rrd_ring->count
235 + sizeof(struct coals_msg_block)
236 + sizeof(struct stats_msg_block)
239 ring_header->desc = pci_alloc_consistent(pdev, ring_header->size,
241 if (unlikely(!ring_header->desc)) {
242 dev_err(&pdev->dev, "pci_alloc_consistent failed\n");
246 memset(ring_header->desc, 0, ring_header->size);
249 tpd_ring->dma = ring_header->dma;
250 offset = (tpd_ring->dma & 0x7) ? (8 - (ring_header->dma & 0x7)) : 0;
251 tpd_ring->dma += offset;
252 tpd_ring->desc = (u8 *) ring_header->desc + offset;
253 tpd_ring->size = sizeof(struct tx_packet_desc) * tpd_ring->count;
256 rfd_ring->dma = tpd_ring->dma + tpd_ring->size;
257 offset = (rfd_ring->dma & 0x7) ? (8 - (rfd_ring->dma & 0x7)) : 0;
258 rfd_ring->dma += offset;
259 rfd_ring->desc = (u8 *) tpd_ring->desc + (tpd_ring->size + offset);
260 rfd_ring->size = sizeof(struct rx_free_desc) * rfd_ring->count;
264 rrd_ring->dma = rfd_ring->dma + rfd_ring->size;
265 offset = (rrd_ring->dma & 0x7) ? (8 - (rrd_ring->dma & 0x7)) : 0;
266 rrd_ring->dma += offset;
267 rrd_ring->desc = (u8 *) rfd_ring->desc + (rfd_ring->size + offset);
268 rrd_ring->size = sizeof(struct rx_return_desc) * rrd_ring->count;
272 adapter->cmb.dma = rrd_ring->dma + rrd_ring->size;
273 offset = (adapter->cmb.dma & 0x7) ? (8 - (adapter->cmb.dma & 0x7)) : 0;
274 adapter->cmb.dma += offset;
275 adapter->cmb.cmb = (struct coals_msg_block *)
276 ((u8 *) rrd_ring->desc + (rrd_ring->size + offset));
279 adapter->smb.dma = adapter->cmb.dma + sizeof(struct coals_msg_block);
280 offset = (adapter->smb.dma & 0x7) ? (8 - (adapter->smb.dma & 0x7)) : 0;
281 adapter->smb.dma += offset;
282 adapter->smb.smb = (struct stats_msg_block *)
283 ((u8 *) adapter->cmb.cmb +
284 (sizeof(struct coals_msg_block) + offset));
289 kfree(tpd_ring->buffer_info);
293 static void atl1_init_ring_ptrs(struct atl1_adapter *adapter)
295 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
296 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
297 struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
299 atomic_set(&tpd_ring->next_to_use, 0);
300 atomic_set(&tpd_ring->next_to_clean, 0);
302 rfd_ring->next_to_clean = 0;
303 atomic_set(&rfd_ring->next_to_use, 0);
305 rrd_ring->next_to_use = 0;
306 atomic_set(&rrd_ring->next_to_clean, 0);
310 * atl1_clean_rx_ring - Free RFD Buffers
311 * @adapter: board private structure
313 static void atl1_clean_rx_ring(struct atl1_adapter *adapter)
315 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
316 struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
317 struct atl1_buffer *buffer_info;
318 struct pci_dev *pdev = adapter->pdev;
322 /* Free all the Rx ring sk_buffs */
323 for (i = 0; i < rfd_ring->count; i++) {
324 buffer_info = &rfd_ring->buffer_info[i];
325 if (buffer_info->dma) {
326 pci_unmap_page(pdev, buffer_info->dma,
327 buffer_info->length, PCI_DMA_FROMDEVICE);
328 buffer_info->dma = 0;
330 if (buffer_info->skb) {
331 dev_kfree_skb(buffer_info->skb);
332 buffer_info->skb = NULL;
336 size = sizeof(struct atl1_buffer) * rfd_ring->count;
337 memset(rfd_ring->buffer_info, 0, size);
339 /* Zero out the descriptor ring */
340 memset(rfd_ring->desc, 0, rfd_ring->size);
342 rfd_ring->next_to_clean = 0;
343 atomic_set(&rfd_ring->next_to_use, 0);
345 rrd_ring->next_to_use = 0;
346 atomic_set(&rrd_ring->next_to_clean, 0);
350 * atl1_clean_tx_ring - Free Tx Buffers
351 * @adapter: board private structure
353 static void atl1_clean_tx_ring(struct atl1_adapter *adapter)
355 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
356 struct atl1_buffer *buffer_info;
357 struct pci_dev *pdev = adapter->pdev;
361 /* Free all the Tx ring sk_buffs */
362 for (i = 0; i < tpd_ring->count; i++) {
363 buffer_info = &tpd_ring->buffer_info[i];
364 if (buffer_info->dma) {
365 pci_unmap_page(pdev, buffer_info->dma,
366 buffer_info->length, PCI_DMA_TODEVICE);
367 buffer_info->dma = 0;
371 for (i = 0; i < tpd_ring->count; i++) {
372 buffer_info = &tpd_ring->buffer_info[i];
373 if (buffer_info->skb) {
374 dev_kfree_skb_any(buffer_info->skb);
375 buffer_info->skb = NULL;
379 size = sizeof(struct atl1_buffer) * tpd_ring->count;
380 memset(tpd_ring->buffer_info, 0, size);
382 /* Zero out the descriptor ring */
383 memset(tpd_ring->desc, 0, tpd_ring->size);
385 atomic_set(&tpd_ring->next_to_use, 0);
386 atomic_set(&tpd_ring->next_to_clean, 0);
390 * atl1_free_ring_resources - Free Tx / RX descriptor Resources
391 * @adapter: board private structure
393 * Free all transmit software resources
395 void atl1_free_ring_resources(struct atl1_adapter *adapter)
397 struct pci_dev *pdev = adapter->pdev;
398 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
399 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
400 struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
401 struct atl1_ring_header *ring_header = &adapter->ring_header;
403 atl1_clean_tx_ring(adapter);
404 atl1_clean_rx_ring(adapter);
406 kfree(tpd_ring->buffer_info);
407 pci_free_consistent(pdev, ring_header->size, ring_header->desc,
410 tpd_ring->buffer_info = NULL;
411 tpd_ring->desc = NULL;
414 rfd_ring->buffer_info = NULL;
415 rfd_ring->desc = NULL;
418 rrd_ring->desc = NULL;
422 static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter)
425 struct atl1_hw *hw = &adapter->hw;
426 struct net_device *netdev = adapter->netdev;
427 /* Config MAC CTRL Register */
428 value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN;
430 if (FULL_DUPLEX == adapter->link_duplex)
431 value |= MAC_CTRL_DUPLX;
433 value |= ((u32) ((SPEED_1000 == adapter->link_speed) ?
434 MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) <<
435 MAC_CTRL_SPEED_SHIFT);
437 value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
439 value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
440 /* preamble length */
441 value |= (((u32) adapter->hw.preamble_len
442 & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
445 value |= MAC_CTRL_RMV_VLAN;
447 if (adapter->rx_csum)
448 value |= MAC_CTRL_RX_CHKSUM_EN;
451 value |= MAC_CTRL_BC_EN;
452 if (netdev->flags & IFF_PROMISC)
453 value |= MAC_CTRL_PROMIS_EN;
454 else if (netdev->flags & IFF_ALLMULTI)
455 value |= MAC_CTRL_MC_ALL_EN;
456 /* value |= MAC_CTRL_LOOPBACK; */
457 iowrite32(value, hw->hw_addr + REG_MAC_CTRL);
460 static u32 atl1_check_link(struct atl1_adapter *adapter)
462 struct atl1_hw *hw = &adapter->hw;
463 struct net_device *netdev = adapter->netdev;
465 u16 speed, duplex, phy_data;
468 /* MII_BMSR must read twice */
469 atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
470 atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
471 if (!(phy_data & BMSR_LSTATUS)) {
473 if (netif_carrier_ok(netdev)) {
474 /* old link state: Up */
475 dev_info(&adapter->pdev->dev, "link is down\n");
476 adapter->link_speed = SPEED_0;
477 netif_carrier_off(netdev);
478 netif_stop_queue(netdev);
484 ret_val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
488 switch (hw->media_type) {
489 case MEDIA_TYPE_1000M_FULL:
490 if (speed != SPEED_1000 || duplex != FULL_DUPLEX)
493 case MEDIA_TYPE_100M_FULL:
494 if (speed != SPEED_100 || duplex != FULL_DUPLEX)
497 case MEDIA_TYPE_100M_HALF:
498 if (speed != SPEED_100 || duplex != HALF_DUPLEX)
501 case MEDIA_TYPE_10M_FULL:
502 if (speed != SPEED_10 || duplex != FULL_DUPLEX)
505 case MEDIA_TYPE_10M_HALF:
506 if (speed != SPEED_10 || duplex != HALF_DUPLEX)
511 /* link result is our setting */
513 if (adapter->link_speed != speed
514 || adapter->link_duplex != duplex) {
515 adapter->link_speed = speed;
516 adapter->link_duplex = duplex;
517 atl1_setup_mac_ctrl(adapter);
518 dev_info(&adapter->pdev->dev,
519 "%s link is up %d Mbps %s\n",
520 netdev->name, adapter->link_speed,
521 adapter->link_duplex == FULL_DUPLEX ?
522 "full duplex" : "half duplex");
524 if (!netif_carrier_ok(netdev)) {
525 /* Link down -> Up */
526 netif_carrier_on(netdev);
527 netif_wake_queue(netdev);
532 /* change original link status */
533 if (netif_carrier_ok(netdev)) {
534 adapter->link_speed = SPEED_0;
535 netif_carrier_off(netdev);
536 netif_stop_queue(netdev);
539 if (hw->media_type != MEDIA_TYPE_AUTO_SENSOR &&
540 hw->media_type != MEDIA_TYPE_1000M_FULL) {
541 switch (hw->media_type) {
542 case MEDIA_TYPE_100M_FULL:
543 phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
546 case MEDIA_TYPE_100M_HALF:
547 phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
549 case MEDIA_TYPE_10M_FULL:
551 MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
554 /* MEDIA_TYPE_10M_HALF: */
555 phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
558 atl1_write_phy_reg(hw, MII_BMCR, phy_data);
562 /* auto-neg, insert timer to re-config phy */
563 if (!adapter->phy_timer_pending) {
564 adapter->phy_timer_pending = true;
565 mod_timer(&adapter->phy_config_timer, jiffies + 3 * HZ);
572 * atl1_change_mtu - Change the Maximum Transfer Unit
573 * @netdev: network interface device structure
574 * @new_mtu: new value for maximum frame size
576 * Returns 0 on success, negative on failure
578 static int atl1_change_mtu(struct net_device *netdev, int new_mtu)
580 struct atl1_adapter *adapter = netdev_priv(netdev);
581 int old_mtu = netdev->mtu;
582 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
584 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
585 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
586 dev_warn(&adapter->pdev->dev, "invalid MTU setting\n");
590 adapter->hw.max_frame_size = max_frame;
591 adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3;
592 adapter->rx_buffer_len = (max_frame + 7) & ~7;
593 adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8;
595 netdev->mtu = new_mtu;
596 if ((old_mtu != new_mtu) && netif_running(netdev)) {
604 static void set_flow_ctrl_old(struct atl1_adapter *adapter)
608 /* RFD Flow Control */
609 value = adapter->rfd_ring.count;
615 value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
616 ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT);
617 iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RXF_PAUSE_THRESH);
619 /* RRD Flow Control */
620 value = adapter->rrd_ring.count;
625 value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) |
626 ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT);
627 iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
630 static void set_flow_ctrl_new(struct atl1_hw *hw)
634 /* RXF Flow Control */
635 value = ioread32(hw->hw_addr + REG_SRAM_RXF_LEN);
642 value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
643 ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT);
644 iowrite32(value, hw->hw_addr + REG_RXQ_RXF_PAUSE_THRESH);
646 /* RRD Flow Control */
647 value = ioread32(hw->hw_addr + REG_SRAM_RRD_LEN);
654 value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) |
655 ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT);
656 iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
660 * atl1_configure - Configure Transmit&Receive Unit after Reset
661 * @adapter: board private structure
663 * Configure the Tx /Rx unit of the MAC after a reset.
665 static u32 atl1_configure(struct atl1_adapter *adapter)
667 struct atl1_hw *hw = &adapter->hw;
670 /* clear interrupt status */
671 iowrite32(0xffffffff, adapter->hw.hw_addr + REG_ISR);
673 /* set MAC Address */
674 value = (((u32) hw->mac_addr[2]) << 24) |
675 (((u32) hw->mac_addr[3]) << 16) |
676 (((u32) hw->mac_addr[4]) << 8) |
677 (((u32) hw->mac_addr[5]));
678 iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR);
679 value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1]));
680 iowrite32(value, hw->hw_addr + (REG_MAC_STA_ADDR + 4));
684 /* HI base address */
685 iowrite32((u32) ((adapter->tpd_ring.dma & 0xffffffff00000000ULL) >> 32),
686 hw->hw_addr + REG_DESC_BASE_ADDR_HI);
687 /* LO base address */
688 iowrite32((u32) (adapter->rfd_ring.dma & 0x00000000ffffffffULL),
689 hw->hw_addr + REG_DESC_RFD_ADDR_LO);
690 iowrite32((u32) (adapter->rrd_ring.dma & 0x00000000ffffffffULL),
691 hw->hw_addr + REG_DESC_RRD_ADDR_LO);
692 iowrite32((u32) (adapter->tpd_ring.dma & 0x00000000ffffffffULL),
693 hw->hw_addr + REG_DESC_TPD_ADDR_LO);
694 iowrite32((u32) (adapter->cmb.dma & 0x00000000ffffffffULL),
695 hw->hw_addr + REG_DESC_CMB_ADDR_LO);
696 iowrite32((u32) (adapter->smb.dma & 0x00000000ffffffffULL),
697 hw->hw_addr + REG_DESC_SMB_ADDR_LO);
700 value = adapter->rrd_ring.count;
702 value += adapter->rfd_ring.count;
703 iowrite32(value, hw->hw_addr + REG_DESC_RFD_RRD_RING_SIZE);
704 iowrite32(adapter->tpd_ring.count, hw->hw_addr +
705 REG_DESC_TPD_RING_SIZE);
708 iowrite32(1, hw->hw_addr + REG_LOAD_PTR);
711 value = ((atomic_read(&adapter->tpd_ring.next_to_use)
712 & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT) |
713 ((atomic_read(&adapter->rrd_ring.next_to_clean)
714 & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) |
715 ((atomic_read(&adapter->rfd_ring.next_to_use)
716 & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT);
717 iowrite32(value, hw->hw_addr + REG_MAILBOX);
720 value = (((u32) hw->ipgt & MAC_IPG_IFG_IPGT_MASK)
721 << MAC_IPG_IFG_IPGT_SHIFT) |
722 (((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK)
723 << MAC_IPG_IFG_MIFG_SHIFT) |
724 (((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK)
725 << MAC_IPG_IFG_IPGR1_SHIFT) |
726 (((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK)
727 << MAC_IPG_IFG_IPGR2_SHIFT);
728 iowrite32(value, hw->hw_addr + REG_MAC_IPG_IFG);
730 /* config Half-Duplex Control */
731 value = ((u32) hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) |
732 (((u32) hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK)
733 << MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) |
734 MAC_HALF_DUPLX_CTRL_EXC_DEF_EN |
735 (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) |
736 (((u32) hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK)
737 << MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT);
738 iowrite32(value, hw->hw_addr + REG_MAC_HALF_DUPLX_CTRL);
740 /* set Interrupt Moderator Timer */
741 iowrite16(adapter->imt, hw->hw_addr + REG_IRQ_MODU_TIMER_INIT);
742 iowrite32(MASTER_CTRL_ITIMER_EN, hw->hw_addr + REG_MASTER_CTRL);
744 /* set Interrupt Clear Timer */
745 iowrite16(adapter->ict, hw->hw_addr + REG_CMBDISDMA_TIMER);
747 /* set max frame size hw will accept */
748 iowrite32(hw->max_frame_size, hw->hw_addr + REG_MTU);
750 /* jumbo size & rrd retirement timer */
751 value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK)
752 << RXQ_JMBOSZ_TH_SHIFT) |
753 (((u32) hw->rx_jumbo_lkah & RXQ_JMBO_LKAH_MASK)
754 << RXQ_JMBO_LKAH_SHIFT) |
755 (((u32) hw->rrd_ret_timer & RXQ_RRD_TIMER_MASK)
756 << RXQ_RRD_TIMER_SHIFT);
757 iowrite32(value, hw->hw_addr + REG_RXQ_JMBOSZ_RRDTIM);
760 switch (hw->dev_rev) {
765 set_flow_ctrl_old(adapter);
768 set_flow_ctrl_new(hw);
773 value = (((u32) hw->tpd_burst & TXQ_CTRL_TPD_BURST_NUM_MASK)
774 << TXQ_CTRL_TPD_BURST_NUM_SHIFT) |
775 (((u32) hw->txf_burst & TXQ_CTRL_TXF_BURST_NUM_MASK)
776 << TXQ_CTRL_TXF_BURST_NUM_SHIFT) |
777 (((u32) hw->tpd_fetch_th & TXQ_CTRL_TPD_FETCH_TH_MASK)
778 << TXQ_CTRL_TPD_FETCH_TH_SHIFT) | TXQ_CTRL_ENH_MODE |
780 iowrite32(value, hw->hw_addr + REG_TXQ_CTRL);
782 /* min tpd fetch gap & tx jumbo packet size threshold for taskoffload */
783 value = (((u32) hw->tx_jumbo_task_th & TX_JUMBO_TASK_TH_MASK)
784 << TX_JUMBO_TASK_TH_SHIFT) |
785 (((u32) hw->tpd_fetch_gap & TX_TPD_MIN_IPG_MASK)
786 << TX_TPD_MIN_IPG_SHIFT);
787 iowrite32(value, hw->hw_addr + REG_TX_JUMBO_TASK_TH_TPD_IPG);
790 value = (((u32) hw->rfd_burst & RXQ_CTRL_RFD_BURST_NUM_MASK)
791 << RXQ_CTRL_RFD_BURST_NUM_SHIFT) |
792 (((u32) hw->rrd_burst & RXQ_CTRL_RRD_BURST_THRESH_MASK)
793 << RXQ_CTRL_RRD_BURST_THRESH_SHIFT) |
794 (((u32) hw->rfd_fetch_gap & RXQ_CTRL_RFD_PREF_MIN_IPG_MASK)
795 << RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT) | RXQ_CTRL_CUT_THRU_EN |
797 iowrite32(value, hw->hw_addr + REG_RXQ_CTRL);
799 /* config DMA Engine */
800 value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
801 << DMA_CTRL_DMAR_BURST_LEN_SHIFT) |
802 ((((u32) hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK)
803 << DMA_CTRL_DMAW_BURST_LEN_SHIFT) | DMA_CTRL_DMAR_EN |
805 value |= (u32) hw->dma_ord;
806 if (atl1_rcb_128 == hw->rcb_value)
807 value |= DMA_CTRL_RCB_VALUE;
808 iowrite32(value, hw->hw_addr + REG_DMA_CTRL);
810 /* config CMB / SMB */
811 value = (hw->cmb_tpd > adapter->tpd_ring.count) ?
812 hw->cmb_tpd : adapter->tpd_ring.count;
814 value |= hw->cmb_rrd;
815 iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TH);
816 value = hw->cmb_rx_timer | ((u32) hw->cmb_tx_timer << 16);
817 iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TIMER);
818 iowrite32(hw->smb_timer, hw->hw_addr + REG_SMB_TIMER);
820 /* --- enable CMB / SMB */
821 value = CSMB_CTRL_CMB_EN | CSMB_CTRL_SMB_EN;
822 iowrite32(value, hw->hw_addr + REG_CSMB_CTRL);
824 value = ioread32(adapter->hw.hw_addr + REG_ISR);
825 if (unlikely((value & ISR_PHY_LINKDOWN) != 0))
826 value = 1; /* config failed */
830 /* clear all interrupt status */
831 iowrite32(0x3fffffff, adapter->hw.hw_addr + REG_ISR);
832 iowrite32(0, adapter->hw.hw_addr + REG_ISR);
837 * atl1_pcie_patch - Patch for PCIE module
839 static void atl1_pcie_patch(struct atl1_adapter *adapter)
843 /* much vendor magic here */
845 iowrite32(value, adapter->hw.hw_addr + 0x12FC);
846 /* pcie flow control mode change */
847 value = ioread32(adapter->hw.hw_addr + 0x1008);
849 iowrite32(value, adapter->hw.hw_addr + 0x1008);
853 * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400
854 * on PCI Command register is disable.
855 * The function enable this bit.
856 * Brackett, 2006/03/15
858 static void atl1_via_workaround(struct atl1_adapter *adapter)
862 value = ioread16(adapter->hw.hw_addr + PCI_COMMAND);
863 if (value & PCI_COMMAND_INTX_DISABLE)
864 value &= ~PCI_COMMAND_INTX_DISABLE;
865 iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND);
868 static void atl1_inc_smb(struct atl1_adapter *adapter)
870 struct stats_msg_block *smb = adapter->smb.smb;
872 /* Fill out the OS statistics structure */
873 adapter->soft_stats.rx_packets += smb->rx_ok;
874 adapter->soft_stats.tx_packets += smb->tx_ok;
875 adapter->soft_stats.rx_bytes += smb->rx_byte_cnt;
876 adapter->soft_stats.tx_bytes += smb->tx_byte_cnt;
877 adapter->soft_stats.multicast += smb->rx_mcast;
878 adapter->soft_stats.collisions += (smb->tx_1_col + smb->tx_2_col * 2 +
879 smb->tx_late_col + smb->tx_abort_col * adapter->hw.max_retry);
882 adapter->soft_stats.rx_errors += (smb->rx_frag + smb->rx_fcs_err +
883 smb->rx_len_err + smb->rx_sz_ov + smb->rx_rxf_ov +
884 smb->rx_rrd_ov + smb->rx_align_err);
885 adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov;
886 adapter->soft_stats.rx_length_errors += smb->rx_len_err;
887 adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err;
888 adapter->soft_stats.rx_frame_errors += smb->rx_align_err;
889 adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov +
892 adapter->soft_stats.rx_pause += smb->rx_pause;
893 adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov;
894 adapter->soft_stats.rx_trunc += smb->rx_sz_ov;
897 adapter->soft_stats.tx_errors += (smb->tx_late_col +
898 smb->tx_abort_col + smb->tx_underrun + smb->tx_trunc);
899 adapter->soft_stats.tx_fifo_errors += smb->tx_underrun;
900 adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col;
901 adapter->soft_stats.tx_window_errors += smb->tx_late_col;
903 adapter->soft_stats.excecol += smb->tx_abort_col;
904 adapter->soft_stats.deffer += smb->tx_defer;
905 adapter->soft_stats.scc += smb->tx_1_col;
906 adapter->soft_stats.mcc += smb->tx_2_col;
907 adapter->soft_stats.latecol += smb->tx_late_col;
908 adapter->soft_stats.tx_underun += smb->tx_underrun;
909 adapter->soft_stats.tx_trunc += smb->tx_trunc;
910 adapter->soft_stats.tx_pause += smb->tx_pause;
912 adapter->net_stats.rx_packets = adapter->soft_stats.rx_packets;
913 adapter->net_stats.tx_packets = adapter->soft_stats.tx_packets;
914 adapter->net_stats.rx_bytes = adapter->soft_stats.rx_bytes;
915 adapter->net_stats.tx_bytes = adapter->soft_stats.tx_bytes;
916 adapter->net_stats.multicast = adapter->soft_stats.multicast;
917 adapter->net_stats.collisions = adapter->soft_stats.collisions;
918 adapter->net_stats.rx_errors = adapter->soft_stats.rx_errors;
919 adapter->net_stats.rx_over_errors =
920 adapter->soft_stats.rx_missed_errors;
921 adapter->net_stats.rx_length_errors =
922 adapter->soft_stats.rx_length_errors;
923 adapter->net_stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors;
924 adapter->net_stats.rx_frame_errors =
925 adapter->soft_stats.rx_frame_errors;
926 adapter->net_stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors;
927 adapter->net_stats.rx_missed_errors =
928 adapter->soft_stats.rx_missed_errors;
929 adapter->net_stats.tx_errors = adapter->soft_stats.tx_errors;
930 adapter->net_stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors;
931 adapter->net_stats.tx_aborted_errors =
932 adapter->soft_stats.tx_aborted_errors;
933 adapter->net_stats.tx_window_errors =
934 adapter->soft_stats.tx_window_errors;
935 adapter->net_stats.tx_carrier_errors =
936 adapter->soft_stats.tx_carrier_errors;
939 static void atl1_update_mailbox(struct atl1_adapter *adapter)
944 u32 rrd_next_to_clean;
947 spin_lock_irqsave(&adapter->mb_lock, flags);
949 tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use);
950 rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use);
951 rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean);
953 value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) <<
954 MB_RFD_PROD_INDX_SHIFT) |
955 ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) <<
956 MB_RRD_CONS_INDX_SHIFT) |
957 ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) <<
958 MB_TPD_PROD_INDX_SHIFT);
959 iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
961 spin_unlock_irqrestore(&adapter->mb_lock, flags);
964 static void atl1_clean_alloc_flag(struct atl1_adapter *adapter,
965 struct rx_return_desc *rrd, u16 offset)
967 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
969 while (rfd_ring->next_to_clean != (rrd->buf_indx + offset)) {
970 rfd_ring->buffer_info[rfd_ring->next_to_clean].alloced = 0;
971 if (++rfd_ring->next_to_clean == rfd_ring->count) {
972 rfd_ring->next_to_clean = 0;
977 static void atl1_update_rfd_index(struct atl1_adapter *adapter,
978 struct rx_return_desc *rrd)
982 num_buf = (rrd->xsz.xsum_sz.pkt_size + adapter->rx_buffer_len - 1) /
983 adapter->rx_buffer_len;
984 if (rrd->num_buf == num_buf)
985 /* clean alloc flag for bad rrd */
986 atl1_clean_alloc_flag(adapter, rrd, num_buf);
989 static void atl1_rx_checksum(struct atl1_adapter *adapter,
990 struct rx_return_desc *rrd, struct sk_buff *skb)
992 struct pci_dev *pdev = adapter->pdev;
994 skb->ip_summed = CHECKSUM_NONE;
996 if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
997 if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC |
998 ERR_FLAG_CODE | ERR_FLAG_OV)) {
999 adapter->hw_csum_err++;
1000 dev_printk(KERN_DEBUG, &pdev->dev,
1001 "rx checksum error\n");
1007 if (!(rrd->pkt_flg & PACKET_FLAG_IPV4))
1008 /* checksum is invalid, but it's not an IPv4 pkt, so ok */
1012 if (likely(!(rrd->err_flg &
1013 (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM)))) {
1014 skb->ip_summed = CHECKSUM_UNNECESSARY;
1015 adapter->hw_csum_good++;
1019 /* IPv4, but hardware thinks its checksum is wrong */
1020 dev_printk(KERN_DEBUG, &pdev->dev,
1021 "hw csum wrong, pkt_flag:%x, err_flag:%x\n",
1022 rrd->pkt_flg, rrd->err_flg);
1023 skb->ip_summed = CHECKSUM_COMPLETE;
1024 skb->csum = htons(rrd->xsz.xsum_sz.rx_chksum);
1025 adapter->hw_csum_err++;
1030 * atl1_alloc_rx_buffers - Replace used receive buffers
1031 * @adapter: address of board private structure
1033 static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
1035 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
1036 struct pci_dev *pdev = adapter->pdev;
1038 unsigned long offset;
1039 struct atl1_buffer *buffer_info, *next_info;
1040 struct sk_buff *skb;
1042 u16 rfd_next_to_use, next_next;
1043 struct rx_free_desc *rfd_desc;
1045 next_next = rfd_next_to_use = atomic_read(&rfd_ring->next_to_use);
1046 if (++next_next == rfd_ring->count)
1048 buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
1049 next_info = &rfd_ring->buffer_info[next_next];
1051 while (!buffer_info->alloced && !next_info->alloced) {
1052 if (buffer_info->skb) {
1053 buffer_info->alloced = 1;
1057 rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use);
1059 skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN);
1060 if (unlikely(!skb)) {
1061 /* Better luck next round */
1062 adapter->net_stats.rx_dropped++;
1067 * Make buffer alignment 2 beyond a 16 byte boundary
1068 * this will result in a 16 byte aligned IP header after
1069 * the 14 byte MAC header is removed
1071 skb_reserve(skb, NET_IP_ALIGN);
1073 buffer_info->alloced = 1;
1074 buffer_info->skb = skb;
1075 buffer_info->length = (u16) adapter->rx_buffer_len;
1076 page = virt_to_page(skb->data);
1077 offset = (unsigned long)skb->data & ~PAGE_MASK;
1078 buffer_info->dma = pci_map_page(pdev, page, offset,
1079 adapter->rx_buffer_len,
1080 PCI_DMA_FROMDEVICE);
1081 rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
1082 rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len);
1083 rfd_desc->coalese = 0;
1086 rfd_next_to_use = next_next;
1087 if (unlikely(++next_next == rfd_ring->count))
1090 buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
1091 next_info = &rfd_ring->buffer_info[next_next];
1097 * Force memory writes to complete before letting h/w
1098 * know there are new descriptors to fetch. (Only
1099 * applicable for weak-ordered memory model archs,
1103 atomic_set(&rfd_ring->next_to_use, (int)rfd_next_to_use);
1108 static void atl1_intr_rx(struct atl1_adapter *adapter)
1112 u16 rrd_next_to_clean;
1114 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
1115 struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
1116 struct atl1_buffer *buffer_info;
1117 struct rx_return_desc *rrd;
1118 struct sk_buff *skb;
1122 rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean);
1125 rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean);
1127 if (likely(rrd->xsz.valid)) { /* packet valid */
1129 /* check rrd status */
1130 if (likely(rrd->num_buf == 1))
1133 /* rrd seems to be bad */
1134 if (unlikely(i-- > 0)) {
1135 /* rrd may not be DMAed completely */
1136 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1137 "incomplete RRD DMA transfer\n");
1142 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1144 /* see if update RFD index */
1145 if (rrd->num_buf > 1)
1146 atl1_update_rfd_index(adapter, rrd);
1150 if (++rrd_next_to_clean == rrd_ring->count)
1151 rrd_next_to_clean = 0;
1154 } else { /* current rrd still not be updated */
1159 /* clean alloc flag for bad rrd */
1160 atl1_clean_alloc_flag(adapter, rrd, 0);
1162 buffer_info = &rfd_ring->buffer_info[rrd->buf_indx];
1163 if (++rfd_ring->next_to_clean == rfd_ring->count)
1164 rfd_ring->next_to_clean = 0;
1166 /* update rrd next to clean */
1167 if (++rrd_next_to_clean == rrd_ring->count)
1168 rrd_next_to_clean = 0;
1171 if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
1172 if (!(rrd->err_flg &
1173 (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM
1175 /* packet error, don't need upstream */
1176 buffer_info->alloced = 0;
1183 pci_unmap_page(adapter->pdev, buffer_info->dma,
1184 buffer_info->length, PCI_DMA_FROMDEVICE);
1185 skb = buffer_info->skb;
1186 length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size);
1188 skb_put(skb, length - ETH_FCS_LEN);
1190 /* Receive Checksum Offload */
1191 atl1_rx_checksum(adapter, rrd, skb);
1192 skb->protocol = eth_type_trans(skb, adapter->netdev);
1194 if (adapter->vlgrp && (rrd->pkt_flg & PACKET_FLAG_VLAN_INS)) {
1195 u16 vlan_tag = (rrd->vlan_tag >> 4) |
1196 ((rrd->vlan_tag & 7) << 13) |
1197 ((rrd->vlan_tag & 8) << 9);
1198 vlan_hwaccel_rx(skb, adapter->vlgrp, vlan_tag);
1202 /* let protocol layer free skb */
1203 buffer_info->skb = NULL;
1204 buffer_info->alloced = 0;
1207 adapter->netdev->last_rx = jiffies;
1210 atomic_set(&rrd_ring->next_to_clean, rrd_next_to_clean);
1212 atl1_alloc_rx_buffers(adapter);
1214 /* update mailbox ? */
1216 u32 tpd_next_to_use;
1217 u32 rfd_next_to_use;
1219 spin_lock(&adapter->mb_lock);
1221 tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use);
1223 atomic_read(&adapter->rfd_ring.next_to_use);
1225 atomic_read(&adapter->rrd_ring.next_to_clean);
1226 value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) <<
1227 MB_RFD_PROD_INDX_SHIFT) |
1228 ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) <<
1229 MB_RRD_CONS_INDX_SHIFT) |
1230 ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) <<
1231 MB_TPD_PROD_INDX_SHIFT);
1232 iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
1233 spin_unlock(&adapter->mb_lock);
1237 static void atl1_intr_tx(struct atl1_adapter *adapter)
1239 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1240 struct atl1_buffer *buffer_info;
1241 u16 sw_tpd_next_to_clean;
1242 u16 cmb_tpd_next_to_clean;
1244 sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean);
1245 cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx);
1247 while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) {
1248 struct tx_packet_desc *tpd;
1250 tpd = ATL1_TPD_DESC(tpd_ring, sw_tpd_next_to_clean);
1251 buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean];
1252 if (buffer_info->dma) {
1253 pci_unmap_page(adapter->pdev, buffer_info->dma,
1254 buffer_info->length, PCI_DMA_TODEVICE);
1255 buffer_info->dma = 0;
1258 if (buffer_info->skb) {
1259 dev_kfree_skb_irq(buffer_info->skb);
1260 buffer_info->skb = NULL;
1263 if (++sw_tpd_next_to_clean == tpd_ring->count)
1264 sw_tpd_next_to_clean = 0;
1266 atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean);
1268 if (netif_queue_stopped(adapter->netdev)
1269 && netif_carrier_ok(adapter->netdev))
1270 netif_wake_queue(adapter->netdev);
1273 static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring)
1275 u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
1276 u16 next_to_use = atomic_read(&tpd_ring->next_to_use);
1277 return ((next_to_clean > next_to_use) ?
1278 next_to_clean - next_to_use - 1 :
1279 tpd_ring->count + next_to_clean - next_to_use - 1);
1282 static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
1283 struct tx_packet_desc *ptpd)
1290 if (skb_shinfo(skb)->gso_size) {
1291 if (skb_header_cloned(skb)) {
1292 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1297 if (skb->protocol == ntohs(ETH_P_IP)) {
1298 struct iphdr *iph = ip_hdr(skb);
1300 real_len = (((unsigned char *)iph - skb->data) +
1301 ntohs(iph->tot_len));
1302 if (real_len < skb->len)
1303 pskb_trim(skb, real_len);
1304 hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
1305 if (skb->len == hdr_len) {
1307 tcp_hdr(skb)->check =
1308 ~csum_tcpudp_magic(iph->saddr,
1309 iph->daddr, tcp_hdrlen(skb),
1311 ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) <<
1313 ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) &
1314 TPD_TCPHDRLEN_MASK) <<
1315 TPD_TCPHDRLEN_SHIFT;
1316 ptpd->word3 |= 1 << TPD_IP_CSUM_SHIFT;
1317 ptpd->word3 |= 1 << TPD_TCP_CSUM_SHIFT;
1322 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1323 iph->daddr, 0, IPPROTO_TCP, 0);
1324 ip_off = (unsigned char *)iph -
1325 (unsigned char *) skb_network_header(skb);
1326 if (ip_off == 8) /* 802.3-SNAP frame */
1327 ptpd->word3 |= 1 << TPD_ETHTYPE_SHIFT;
1328 else if (ip_off != 0)
1331 ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) <<
1333 ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) &
1334 TPD_TCPHDRLEN_MASK) << TPD_TCPHDRLEN_SHIFT;
1335 ptpd->word3 |= (skb_shinfo(skb)->gso_size &
1336 TPD_MSS_MASK) << TPD_MSS_SHIFT;
1337 ptpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT;
1344 static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
1345 struct tx_packet_desc *ptpd)
1349 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1350 cso = skb_transport_offset(skb);
1351 css = cso + skb->csum_offset;
1352 if (unlikely(cso & 0x1)) {
1353 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1354 "payload offset not an even number\n");
1357 ptpd->word3 |= (cso & TPD_PLOADOFFSET_MASK) <<
1358 TPD_PLOADOFFSET_SHIFT;
1359 ptpd->word3 |= (css & TPD_CCSUMOFFSET_MASK) <<
1360 TPD_CCSUMOFFSET_SHIFT;
1361 ptpd->word3 |= 1 << TPD_CUST_CSUM_EN_SHIFT;
1367 static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
1368 struct tx_packet_desc *ptpd)
1371 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1372 struct atl1_buffer *buffer_info;
1373 u16 buf_len = skb->len;
1375 unsigned long offset;
1376 unsigned int nr_frags;
1383 buf_len -= skb->data_len;
1384 nr_frags = skb_shinfo(skb)->nr_frags;
1385 next_to_use = atomic_read(&tpd_ring->next_to_use);
1386 buffer_info = &tpd_ring->buffer_info[next_to_use];
1387 if (unlikely(buffer_info->skb))
1389 /* put skb in last TPD */
1390 buffer_info->skb = NULL;
1392 retval = (ptpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
1395 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1396 buffer_info->length = hdr_len;
1397 page = virt_to_page(skb->data);
1398 offset = (unsigned long)skb->data & ~PAGE_MASK;
1399 buffer_info->dma = pci_map_page(adapter->pdev, page,
1403 if (++next_to_use == tpd_ring->count)
1406 if (buf_len > hdr_len) {
1409 data_len = buf_len - hdr_len;
1410 nseg = (data_len + ATL1_MAX_TX_BUF_LEN - 1) /
1411 ATL1_MAX_TX_BUF_LEN;
1412 for (i = 0; i < nseg; i++) {
1414 &tpd_ring->buffer_info[next_to_use];
1415 buffer_info->skb = NULL;
1416 buffer_info->length =
1417 (ATL1_MAX_TX_BUF_LEN >=
1418 data_len) ? ATL1_MAX_TX_BUF_LEN : data_len;
1419 data_len -= buffer_info->length;
1420 page = virt_to_page(skb->data +
1421 (hdr_len + i * ATL1_MAX_TX_BUF_LEN));
1422 offset = (unsigned long)(skb->data +
1423 (hdr_len + i * ATL1_MAX_TX_BUF_LEN)) &
1425 buffer_info->dma = pci_map_page(adapter->pdev,
1426 page, offset, buffer_info->length,
1428 if (++next_to_use == tpd_ring->count)
1434 buffer_info->length = buf_len;
1435 page = virt_to_page(skb->data);
1436 offset = (unsigned long)skb->data & ~PAGE_MASK;
1437 buffer_info->dma = pci_map_page(adapter->pdev, page,
1438 offset, buf_len, PCI_DMA_TODEVICE);
1439 if (++next_to_use == tpd_ring->count)
1443 for (f = 0; f < nr_frags; f++) {
1444 struct skb_frag_struct *frag;
1447 frag = &skb_shinfo(skb)->frags[f];
1448 buf_len = frag->size;
1450 nseg = (buf_len + ATL1_MAX_TX_BUF_LEN - 1) /
1451 ATL1_MAX_TX_BUF_LEN;
1452 for (i = 0; i < nseg; i++) {
1453 buffer_info = &tpd_ring->buffer_info[next_to_use];
1454 if (unlikely(buffer_info->skb))
1456 buffer_info->skb = NULL;
1457 buffer_info->length = (buf_len > ATL1_MAX_TX_BUF_LEN) ?
1458 ATL1_MAX_TX_BUF_LEN : buf_len;
1459 buf_len -= buffer_info->length;
1460 buffer_info->dma = pci_map_page(adapter->pdev,
1462 frag->page_offset + (i * ATL1_MAX_TX_BUF_LEN),
1463 buffer_info->length, PCI_DMA_TODEVICE);
1465 if (++next_to_use == tpd_ring->count)
1470 /* last tpd's buffer-info */
1471 buffer_info->skb = skb;
1474 static void atl1_tx_queue(struct atl1_adapter *adapter, u16 count,
1475 struct tx_packet_desc *ptpd)
1478 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1479 struct atl1_buffer *buffer_info;
1480 struct tx_packet_desc *tpd;
1483 u16 next_to_use = (u16) atomic_read(&tpd_ring->next_to_use);
1485 for (j = 0; j < count; j++) {
1486 buffer_info = &tpd_ring->buffer_info[next_to_use];
1487 tpd = ATL1_TPD_DESC(&adapter->tpd_ring, next_to_use);
1489 memcpy(tpd, ptpd, sizeof(struct tx_packet_desc));
1490 tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
1491 tpd->word2 = (cpu_to_le16(buffer_info->length) &
1492 TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT;
1495 * if this is the first packet in a TSO chain, set
1496 * TPD_HDRFLAG, otherwise, clear it.
1498 val = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) &
1499 TPD_SEGMENT_EN_MASK;
1502 tpd->word3 |= 1 << TPD_HDRFLAG_SHIFT;
1504 tpd->word3 &= ~(1 << TPD_HDRFLAG_SHIFT);
1507 if (j == (count - 1))
1508 tpd->word3 |= 1 << TPD_EOP_SHIFT;
1510 if (++next_to_use == tpd_ring->count)
1514 * Force memory writes to complete before letting h/w
1515 * know there are new descriptors to fetch. (Only
1516 * applicable for weak-ordered memory model archs,
1521 atomic_set(&tpd_ring->next_to_use, next_to_use);
1524 static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1526 struct atl1_adapter *adapter = netdev_priv(netdev);
1527 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1532 struct tx_packet_desc *ptpd;
1535 unsigned long flags;
1536 unsigned int nr_frags = 0;
1537 unsigned int mss = 0;
1539 unsigned int proto_hdr_len;
1541 len -= skb->data_len;
1543 if (unlikely(skb->len <= 0)) {
1544 dev_kfree_skb_any(skb);
1545 return NETDEV_TX_OK;
1548 nr_frags = skb_shinfo(skb)->nr_frags;
1549 for (f = 0; f < nr_frags; f++) {
1550 frag_size = skb_shinfo(skb)->frags[f].size;
1552 count += (frag_size + ATL1_MAX_TX_BUF_LEN - 1) /
1553 ATL1_MAX_TX_BUF_LEN;
1556 mss = skb_shinfo(skb)->gso_size;
1558 if (skb->protocol == ntohs(ETH_P_IP)) {
1559 proto_hdr_len = (skb_transport_offset(skb) +
1561 if (unlikely(proto_hdr_len > len)) {
1562 dev_kfree_skb_any(skb);
1563 return NETDEV_TX_OK;
1565 /* need additional TPD ? */
1566 if (proto_hdr_len != len)
1567 count += (len - proto_hdr_len +
1568 ATL1_MAX_TX_BUF_LEN - 1) /
1569 ATL1_MAX_TX_BUF_LEN;
1573 if (!spin_trylock_irqsave(&adapter->lock, flags)) {
1574 /* Can't get lock - tell upper layer to requeue */
1575 dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx locked\n");
1576 return NETDEV_TX_LOCKED;
1579 if (atl1_tpd_avail(&adapter->tpd_ring) < count) {
1580 /* not enough descriptors */
1581 netif_stop_queue(netdev);
1582 spin_unlock_irqrestore(&adapter->lock, flags);
1583 dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx busy\n");
1584 return NETDEV_TX_BUSY;
1587 ptpd = ATL1_TPD_DESC(tpd_ring,
1588 (u16) atomic_read(&tpd_ring->next_to_use));
1589 memset(ptpd, 0, sizeof(struct tx_packet_desc));
1591 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
1592 vlan_tag = vlan_tx_tag_get(skb);
1593 vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) |
1594 ((vlan_tag >> 9) & 0x8);
1595 ptpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT;
1596 ptpd->word3 |= (vlan_tag & TPD_VL_TAGGED_MASK) <<
1597 TPD_VL_TAGGED_SHIFT;
1600 tso = atl1_tso(adapter, skb, ptpd);
1602 spin_unlock_irqrestore(&adapter->lock, flags);
1603 dev_kfree_skb_any(skb);
1604 return NETDEV_TX_OK;
1608 ret_val = atl1_tx_csum(adapter, skb, ptpd);
1610 spin_unlock_irqrestore(&adapter->lock, flags);
1611 dev_kfree_skb_any(skb);
1612 return NETDEV_TX_OK;
1616 atl1_tx_map(adapter, skb, ptpd);
1617 atl1_tx_queue(adapter, count, ptpd);
1618 atl1_update_mailbox(adapter);
1619 spin_unlock_irqrestore(&adapter->lock, flags);
1620 netdev->trans_start = jiffies;
1621 return NETDEV_TX_OK;
1625 * atl1_intr - Interrupt Handler
1626 * @irq: interrupt number
1627 * @data: pointer to a network interface device structure
1628 * @pt_regs: CPU registers structure
1630 static irqreturn_t atl1_intr(int irq, void *data)
1632 struct atl1_adapter *adapter = netdev_priv(data);
1637 status = adapter->cmb.cmb->int_stats;
1644 /* clear CMB interrupt status at once */
1645 adapter->cmb.cmb->int_stats = 0;
1647 if (status & ISR_GPHY) /* clear phy status */
1648 atlx_clear_phy_int(adapter);
1650 /* clear ISR status, and Enable CMB DMA/Disable Interrupt */
1651 iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR);
1653 /* check if SMB intr */
1654 if (status & ISR_SMB)
1655 atl1_inc_smb(adapter);
1657 /* check if PCIE PHY Link down */
1658 if (status & ISR_PHY_LINKDOWN) {
1659 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1660 "pcie phy link down %x\n", status);
1661 if (netif_running(adapter->netdev)) { /* reset MAC */
1662 iowrite32(0, adapter->hw.hw_addr + REG_IMR);
1663 schedule_work(&adapter->pcie_dma_to_rst_task);
1668 /* check if DMA read/write error ? */
1669 if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
1670 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1671 "pcie DMA r/w error (status = 0x%x)\n",
1673 iowrite32(0, adapter->hw.hw_addr + REG_IMR);
1674 schedule_work(&adapter->pcie_dma_to_rst_task);
1679 if (status & ISR_GPHY) {
1680 adapter->soft_stats.tx_carrier_errors++;
1681 atl1_check_for_link(adapter);
1684 /* transmit event */
1685 if (status & ISR_CMB_TX)
1686 atl1_intr_tx(adapter);
1689 if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN |
1690 ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
1691 ISR_HOST_RRD_OV | ISR_CMB_RX))) {
1692 if (status & (ISR_RXF_OV | ISR_RFD_UNRUN |
1693 ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
1695 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1696 "rx exception, ISR = 0x%x\n", status);
1697 atl1_intr_rx(adapter);
1703 } while ((status = adapter->cmb.cmb->int_stats));
1705 /* re-enable Interrupt */
1706 iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR);
1711 * atl1_watchdog - Timer Call-back
1712 * @data: pointer to netdev cast into an unsigned long
1714 static void atl1_watchdog(unsigned long data)
1716 struct atl1_adapter *adapter = (struct atl1_adapter *)data;
1718 /* Reset the timer */
1719 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1723 * atl1_phy_config - Timer Call-back
1724 * @data: pointer to netdev cast into an unsigned long
1726 static void atl1_phy_config(unsigned long data)
1728 struct atl1_adapter *adapter = (struct atl1_adapter *)data;
1729 struct atl1_hw *hw = &adapter->hw;
1730 unsigned long flags;
1732 spin_lock_irqsave(&adapter->lock, flags);
1733 adapter->phy_timer_pending = false;
1734 atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg);
1735 atl1_write_phy_reg(hw, MII_ATLX_CR, hw->mii_1000t_ctrl_reg);
1736 atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN);
1737 spin_unlock_irqrestore(&adapter->lock, flags);
1741 * Orphaned vendor comment left intact here:
1743 * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT
1744 * will assert. We do soft reset <0x1400=1> according
1745 * with the SPEC. BUT, it seemes that PCIE or DMA
1746 * state-machine will not be reset. DMAR_TO_INT will
1747 * assert again and again.
1750 static void atl1_tx_timeout_task(struct work_struct *work)
1752 struct atl1_adapter *adapter =
1753 container_of(work, struct atl1_adapter, tx_timeout_task);
1754 struct net_device *netdev = adapter->netdev;
1756 netif_device_detach(netdev);
1759 netif_device_attach(netdev);
1762 int atl1_reset(struct atl1_adapter *adapter)
1765 ret = atl1_reset_hw(&adapter->hw);
1768 return atl1_init_hw(&adapter->hw);
1771 s32 atl1_up(struct atl1_adapter *adapter)
1773 struct net_device *netdev = adapter->netdev;
1775 int irq_flags = IRQF_SAMPLE_RANDOM;
1777 /* hardware has been reset, we need to reload some things */
1778 atlx_set_multi(netdev);
1779 atl1_init_ring_ptrs(adapter);
1780 atlx_restore_vlan(adapter);
1781 err = atl1_alloc_rx_buffers(adapter);
1783 /* no RX BUFFER allocated */
1786 if (unlikely(atl1_configure(adapter))) {
1791 err = pci_enable_msi(adapter->pdev);
1793 dev_info(&adapter->pdev->dev,
1794 "Unable to enable MSI: %d\n", err);
1795 irq_flags |= IRQF_SHARED;
1798 err = request_irq(adapter->pdev->irq, &atl1_intr, irq_flags,
1799 netdev->name, netdev);
1803 mod_timer(&adapter->watchdog_timer, jiffies);
1804 atlx_irq_enable(adapter);
1805 atl1_check_link(adapter);
1809 pci_disable_msi(adapter->pdev);
1810 /* free rx_buffers */
1811 atl1_clean_rx_ring(adapter);
1815 void atl1_down(struct atl1_adapter *adapter)
1817 struct net_device *netdev = adapter->netdev;
1819 del_timer_sync(&adapter->watchdog_timer);
1820 del_timer_sync(&adapter->phy_config_timer);
1821 adapter->phy_timer_pending = false;
1823 atlx_irq_disable(adapter);
1824 free_irq(adapter->pdev->irq, netdev);
1825 pci_disable_msi(adapter->pdev);
1826 atl1_reset_hw(&adapter->hw);
1827 adapter->cmb.cmb->int_stats = 0;
1829 adapter->link_speed = SPEED_0;
1830 adapter->link_duplex = -1;
1831 netif_carrier_off(netdev);
1832 netif_stop_queue(netdev);
1834 atl1_clean_tx_ring(adapter);
1835 atl1_clean_rx_ring(adapter);
1839 * atl1_open - Called when a network interface is made active
1840 * @netdev: network interface device structure
1842 * Returns 0 on success, negative value on failure
1844 * The open entry point is called when a network interface is made
1845 * active by the system (IFF_UP). At this point all resources needed
1846 * for transmit and receive operations are allocated, the interrupt
1847 * handler is registered with the OS, the watchdog timer is started,
1848 * and the stack is notified that the interface is ready.
1850 static int atl1_open(struct net_device *netdev)
1852 struct atl1_adapter *adapter = netdev_priv(netdev);
1855 /* allocate transmit descriptors */
1856 err = atl1_setup_ring_resources(adapter);
1860 err = atl1_up(adapter);
1867 atl1_reset(adapter);
1872 * atl1_close - Disables a network interface
1873 * @netdev: network interface device structure
1875 * Returns 0, this is not allowed to fail
1877 * The close entry point is called when an interface is de-activated
1878 * by the OS. The hardware is still under the drivers control, but
1879 * needs to be disabled. A global MAC reset is issued to stop the
1880 * hardware, and all transmit and receive resources are freed.
1882 static int atl1_close(struct net_device *netdev)
1884 struct atl1_adapter *adapter = netdev_priv(netdev);
1886 atl1_free_ring_resources(adapter);
1891 static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
1893 struct net_device *netdev = pci_get_drvdata(pdev);
1894 struct atl1_adapter *adapter = netdev_priv(netdev);
1895 struct atl1_hw *hw = &adapter->hw;
1897 u32 wufc = adapter->wol;
1899 netif_device_detach(netdev);
1900 if (netif_running(netdev))
1903 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
1904 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
1905 if (ctrl & BMSR_LSTATUS)
1906 wufc &= ~ATLX_WUFC_LNKC;
1908 /* reduce speed to 10/100M */
1910 atl1_phy_enter_power_saving(hw);
1911 /* if resume, let driver to re- setup link */
1912 hw->phy_configured = false;
1913 atl1_set_mac_addr(hw);
1914 atlx_set_multi(netdev);
1917 /* turn on magic packet wol */
1918 if (wufc & ATLX_WUFC_MAG)
1919 ctrl = WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
1921 /* turn on Link change WOL */
1922 if (wufc & ATLX_WUFC_LNKC)
1923 ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
1924 iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
1926 /* turn on all-multi mode if wake on multicast is enabled */
1927 ctrl = ioread32(hw->hw_addr + REG_MAC_CTRL);
1928 ctrl &= ~MAC_CTRL_DBG;
1929 ctrl &= ~MAC_CTRL_PROMIS_EN;
1930 if (wufc & ATLX_WUFC_MC)
1931 ctrl |= MAC_CTRL_MC_ALL_EN;
1933 ctrl &= ~MAC_CTRL_MC_ALL_EN;
1935 /* turn on broadcast mode if wake on-BC is enabled */
1936 if (wufc & ATLX_WUFC_BC)
1937 ctrl |= MAC_CTRL_BC_EN;
1939 ctrl &= ~MAC_CTRL_BC_EN;
1942 ctrl |= MAC_CTRL_RX_EN;
1943 iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL);
1944 pci_enable_wake(pdev, PCI_D3hot, 1);
1945 pci_enable_wake(pdev, PCI_D3cold, 1);
1947 iowrite32(0, hw->hw_addr + REG_WOL_CTRL);
1948 pci_enable_wake(pdev, PCI_D3hot, 0);
1949 pci_enable_wake(pdev, PCI_D3cold, 0);
1952 pci_save_state(pdev);
1953 pci_disable_device(pdev);
1955 pci_set_power_state(pdev, PCI_D3hot);
1960 static int atl1_resume(struct pci_dev *pdev)
1962 struct net_device *netdev = pci_get_drvdata(pdev);
1963 struct atl1_adapter *adapter = netdev_priv(netdev);
1966 pci_set_power_state(pdev, PCI_D0);
1967 pci_restore_state(pdev);
1969 /* FIXME: check and handle */
1970 err = pci_enable_device(pdev);
1971 pci_enable_wake(pdev, PCI_D3hot, 0);
1972 pci_enable_wake(pdev, PCI_D3cold, 0);
1974 iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL);
1975 atl1_reset(adapter);
1977 if (netif_running(netdev))
1979 netif_device_attach(netdev);
1981 atl1_via_workaround(adapter);
1986 #define atl1_suspend NULL
1987 #define atl1_resume NULL
1990 #ifdef CONFIG_NET_POLL_CONTROLLER
1991 static void atl1_poll_controller(struct net_device *netdev)
1993 disable_irq(netdev->irq);
1994 atl1_intr(netdev->irq, netdev);
1995 enable_irq(netdev->irq);
2000 * atl1_probe - Device Initialization Routine
2001 * @pdev: PCI device information struct
2002 * @ent: entry in atl1_pci_tbl
2004 * Returns 0 on success, negative on failure
2006 * atl1_probe initializes an adapter identified by a pci_dev structure.
2007 * The OS initialization, configuring of the adapter private structure,
2008 * and a hardware reset occur.
2010 static int __devinit atl1_probe(struct pci_dev *pdev,
2011 const struct pci_device_id *ent)
2013 struct net_device *netdev;
2014 struct atl1_adapter *adapter;
2015 static int cards_found = 0;
2018 err = pci_enable_device(pdev);
2023 * The atl1 chip can DMA to 64-bit addresses, but it uses a single
2024 * shared register for the high 32 bits, so only a single, aligned,
2025 * 4 GB physical address range can be used at a time.
2027 * Supporting 64-bit DMA on this hardware is more trouble than it's
2028 * worth. It is far easier to limit to 32-bit DMA than update
2029 * various kernel subsystems to support the mechanics required by a
2030 * fixed-high-32-bit system.
2032 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2034 dev_err(&pdev->dev, "no usable DMA configuration\n");
2038 * Mark all PCI regions associated with PCI device
2039 * pdev as being reserved by owner atl1_driver_name
2041 err = pci_request_regions(pdev, ATLX_DRIVER_NAME);
2043 goto err_request_regions;
2046 * Enables bus-mastering on the device and calls
2047 * pcibios_set_master to do the needed arch specific settings
2049 pci_set_master(pdev);
2051 netdev = alloc_etherdev(sizeof(struct atl1_adapter));
2054 goto err_alloc_etherdev;
2056 SET_NETDEV_DEV(netdev, &pdev->dev);
2058 pci_set_drvdata(pdev, netdev);
2059 adapter = netdev_priv(netdev);
2060 adapter->netdev = netdev;
2061 adapter->pdev = pdev;
2062 adapter->hw.back = adapter;
2064 adapter->hw.hw_addr = pci_iomap(pdev, 0, 0);
2065 if (!adapter->hw.hw_addr) {
2069 /* get device revision number */
2070 adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr +
2071 (REG_MASTER_CTRL + 2));
2072 dev_info(&pdev->dev, "version %s\n", ATLX_DRIVER_VERSION);
2074 /* set default ring resource counts */
2075 adapter->rfd_ring.count = adapter->rrd_ring.count = ATL1_DEFAULT_RFD;
2076 adapter->tpd_ring.count = ATL1_DEFAULT_TPD;
2078 adapter->mii.dev = netdev;
2079 adapter->mii.mdio_read = mdio_read;
2080 adapter->mii.mdio_write = mdio_write;
2081 adapter->mii.phy_id_mask = 0x1f;
2082 adapter->mii.reg_num_mask = 0x1f;
2084 netdev->open = &atl1_open;
2085 netdev->stop = &atl1_close;
2086 netdev->hard_start_xmit = &atl1_xmit_frame;
2087 netdev->get_stats = &atlx_get_stats;
2088 netdev->set_multicast_list = &atlx_set_multi;
2089 netdev->set_mac_address = &atl1_set_mac;
2090 netdev->change_mtu = &atl1_change_mtu;
2091 netdev->do_ioctl = &atlx_ioctl;
2092 netdev->tx_timeout = &atlx_tx_timeout;
2093 netdev->watchdog_timeo = 5 * HZ;
2094 #ifdef CONFIG_NET_POLL_CONTROLLER
2095 netdev->poll_controller = atl1_poll_controller;
2097 netdev->vlan_rx_register = atlx_vlan_rx_register;
2099 netdev->ethtool_ops = &atl1_ethtool_ops;
2100 adapter->bd_number = cards_found;
2102 /* setup the private structure */
2103 err = atl1_sw_init(adapter);
2107 netdev->features = NETIF_F_HW_CSUM;
2108 netdev->features |= NETIF_F_SG;
2109 netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
2110 netdev->features |= NETIF_F_TSO;
2111 netdev->features |= NETIF_F_LLTX;
2114 * patch for some L1 of old version,
2115 * the final version of L1 may not need these
2118 /* atl1_pcie_patch(adapter); */
2120 /* really reset GPHY core */
2121 iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE);
2124 * reset the controller to
2125 * put the device in a known good starting state
2127 if (atl1_reset_hw(&adapter->hw)) {
2132 /* copy the MAC address out of the EEPROM */
2133 atl1_read_mac_addr(&adapter->hw);
2134 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
2136 if (!is_valid_ether_addr(netdev->dev_addr)) {
2141 atl1_check_options(adapter);
2143 /* pre-init the MAC, and setup link */
2144 err = atl1_init_hw(&adapter->hw);
2150 atl1_pcie_patch(adapter);
2151 /* assume we have no link for now */
2152 netif_carrier_off(netdev);
2153 netif_stop_queue(netdev);
2155 init_timer(&adapter->watchdog_timer);
2156 adapter->watchdog_timer.function = &atl1_watchdog;
2157 adapter->watchdog_timer.data = (unsigned long)adapter;
2159 init_timer(&adapter->phy_config_timer);
2160 adapter->phy_config_timer.function = &atl1_phy_config;
2161 adapter->phy_config_timer.data = (unsigned long)adapter;
2162 adapter->phy_timer_pending = false;
2164 INIT_WORK(&adapter->tx_timeout_task, atl1_tx_timeout_task);
2166 INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task);
2168 INIT_WORK(&adapter->pcie_dma_to_rst_task, atl1_tx_timeout_task);
2170 err = register_netdev(netdev);
2175 atl1_via_workaround(adapter);
2179 pci_iounmap(pdev, adapter->hw.hw_addr);
2181 free_netdev(netdev);
2183 pci_release_regions(pdev);
2185 err_request_regions:
2186 pci_disable_device(pdev);
2191 * atl1_remove - Device Removal Routine
2192 * @pdev: PCI device information struct
2194 * atl1_remove is called by the PCI subsystem to alert the driver
2195 * that it should release a PCI device. The could be caused by a
2196 * Hot-Plug event, or because the driver is going to be removed from
2199 static void __devexit atl1_remove(struct pci_dev *pdev)
2201 struct net_device *netdev = pci_get_drvdata(pdev);
2202 struct atl1_adapter *adapter;
2203 /* Device not available. Return. */
2207 adapter = netdev_priv(netdev);
2210 * Some atl1 boards lack persistent storage for their MAC, and get it
2211 * from the BIOS during POST. If we've been messing with the MAC
2212 * address, we need to save the permanent one.
2214 if (memcmp(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN)) {
2215 memcpy(adapter->hw.mac_addr, adapter->hw.perm_mac_addr,
2217 atl1_set_mac_addr(&adapter->hw);
2220 iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE);
2221 unregister_netdev(netdev);
2222 pci_iounmap(pdev, adapter->hw.hw_addr);
2223 pci_release_regions(pdev);
2224 free_netdev(netdev);
2225 pci_disable_device(pdev);
2228 static struct pci_driver atl1_driver = {
2229 .name = ATLX_DRIVER_NAME,
2230 .id_table = atl1_pci_tbl,
2231 .probe = atl1_probe,
2232 .remove = __devexit_p(atl1_remove),
2233 .suspend = atl1_suspend,
2234 .resume = atl1_resume
2238 * atl1_exit_module - Driver Exit Cleanup Routine
2240 * atl1_exit_module is called just before the driver is removed
2243 static void __exit atl1_exit_module(void)
2245 pci_unregister_driver(&atl1_driver);
2249 * atl1_init_module - Driver Registration Routine
2251 * atl1_init_module is the first routine called when the driver is
2252 * loaded. All it does is register with the PCI subsystem.
2254 static int __init atl1_init_module(void)
2256 return pci_register_driver(&atl1_driver);
2259 module_init(atl1_init_module);
2260 module_exit(atl1_exit_module);
2263 char stat_string[ETH_GSTRING_LEN];
2268 #define ATL1_STAT(m) \
2269 sizeof(((struct atl1_adapter *)0)->m), offsetof(struct atl1_adapter, m)
2271 static struct atl1_stats atl1_gstrings_stats[] = {
2272 {"rx_packets", ATL1_STAT(soft_stats.rx_packets)},
2273 {"tx_packets", ATL1_STAT(soft_stats.tx_packets)},
2274 {"rx_bytes", ATL1_STAT(soft_stats.rx_bytes)},
2275 {"tx_bytes", ATL1_STAT(soft_stats.tx_bytes)},
2276 {"rx_errors", ATL1_STAT(soft_stats.rx_errors)},
2277 {"tx_errors", ATL1_STAT(soft_stats.tx_errors)},
2278 {"rx_dropped", ATL1_STAT(net_stats.rx_dropped)},
2279 {"tx_dropped", ATL1_STAT(net_stats.tx_dropped)},
2280 {"multicast", ATL1_STAT(soft_stats.multicast)},
2281 {"collisions", ATL1_STAT(soft_stats.collisions)},
2282 {"rx_length_errors", ATL1_STAT(soft_stats.rx_length_errors)},
2283 {"rx_over_errors", ATL1_STAT(soft_stats.rx_missed_errors)},
2284 {"rx_crc_errors", ATL1_STAT(soft_stats.rx_crc_errors)},
2285 {"rx_frame_errors", ATL1_STAT(soft_stats.rx_frame_errors)},
2286 {"rx_fifo_errors", ATL1_STAT(soft_stats.rx_fifo_errors)},
2287 {"rx_missed_errors", ATL1_STAT(soft_stats.rx_missed_errors)},
2288 {"tx_aborted_errors", ATL1_STAT(soft_stats.tx_aborted_errors)},
2289 {"tx_carrier_errors", ATL1_STAT(soft_stats.tx_carrier_errors)},
2290 {"tx_fifo_errors", ATL1_STAT(soft_stats.tx_fifo_errors)},
2291 {"tx_window_errors", ATL1_STAT(soft_stats.tx_window_errors)},
2292 {"tx_abort_exce_coll", ATL1_STAT(soft_stats.excecol)},
2293 {"tx_abort_late_coll", ATL1_STAT(soft_stats.latecol)},
2294 {"tx_deferred_ok", ATL1_STAT(soft_stats.deffer)},
2295 {"tx_single_coll_ok", ATL1_STAT(soft_stats.scc)},
2296 {"tx_multi_coll_ok", ATL1_STAT(soft_stats.mcc)},
2297 {"tx_underun", ATL1_STAT(soft_stats.tx_underun)},
2298 {"tx_trunc", ATL1_STAT(soft_stats.tx_trunc)},
2299 {"tx_pause", ATL1_STAT(soft_stats.tx_pause)},
2300 {"rx_pause", ATL1_STAT(soft_stats.rx_pause)},
2301 {"rx_rrd_ov", ATL1_STAT(soft_stats.rx_rrd_ov)},
2302 {"rx_trunc", ATL1_STAT(soft_stats.rx_trunc)}
2305 static void atl1_get_ethtool_stats(struct net_device *netdev,
2306 struct ethtool_stats *stats, u64 *data)
2308 struct atl1_adapter *adapter = netdev_priv(netdev);
2312 for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) {
2313 p = (char *)adapter+atl1_gstrings_stats[i].stat_offset;
2314 data[i] = (atl1_gstrings_stats[i].sizeof_stat ==
2315 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
2320 static int atl1_get_sset_count(struct net_device *netdev, int sset)
2324 return ARRAY_SIZE(atl1_gstrings_stats);
2330 static int atl1_get_settings(struct net_device *netdev,
2331 struct ethtool_cmd *ecmd)
2333 struct atl1_adapter *adapter = netdev_priv(netdev);
2334 struct atl1_hw *hw = &adapter->hw;
2336 ecmd->supported = (SUPPORTED_10baseT_Half |
2337 SUPPORTED_10baseT_Full |
2338 SUPPORTED_100baseT_Half |
2339 SUPPORTED_100baseT_Full |
2340 SUPPORTED_1000baseT_Full |
2341 SUPPORTED_Autoneg | SUPPORTED_TP);
2342 ecmd->advertising = ADVERTISED_TP;
2343 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2344 hw->media_type == MEDIA_TYPE_1000M_FULL) {
2345 ecmd->advertising |= ADVERTISED_Autoneg;
2346 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR) {
2347 ecmd->advertising |= ADVERTISED_Autoneg;
2348 ecmd->advertising |=
2349 (ADVERTISED_10baseT_Half |
2350 ADVERTISED_10baseT_Full |
2351 ADVERTISED_100baseT_Half |
2352 ADVERTISED_100baseT_Full |
2353 ADVERTISED_1000baseT_Full);
2355 ecmd->advertising |= (ADVERTISED_1000baseT_Full);
2357 ecmd->port = PORT_TP;
2358 ecmd->phy_address = 0;
2359 ecmd->transceiver = XCVR_INTERNAL;
2361 if (netif_carrier_ok(adapter->netdev)) {
2362 u16 link_speed, link_duplex;
2363 atl1_get_speed_and_duplex(hw, &link_speed, &link_duplex);
2364 ecmd->speed = link_speed;
2365 if (link_duplex == FULL_DUPLEX)
2366 ecmd->duplex = DUPLEX_FULL;
2368 ecmd->duplex = DUPLEX_HALF;
2373 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2374 hw->media_type == MEDIA_TYPE_1000M_FULL)
2375 ecmd->autoneg = AUTONEG_ENABLE;
2377 ecmd->autoneg = AUTONEG_DISABLE;
2382 static int atl1_set_settings(struct net_device *netdev,
2383 struct ethtool_cmd *ecmd)
2385 struct atl1_adapter *adapter = netdev_priv(netdev);
2386 struct atl1_hw *hw = &adapter->hw;
2389 u16 old_media_type = hw->media_type;
2391 if (netif_running(adapter->netdev)) {
2392 dev_dbg(&adapter->pdev->dev, "ethtool shutting down adapter\n");
2396 if (ecmd->autoneg == AUTONEG_ENABLE)
2397 hw->media_type = MEDIA_TYPE_AUTO_SENSOR;
2399 if (ecmd->speed == SPEED_1000) {
2400 if (ecmd->duplex != DUPLEX_FULL) {
2401 dev_warn(&adapter->pdev->dev,
2402 "can't force to 1000M half duplex\n");
2406 hw->media_type = MEDIA_TYPE_1000M_FULL;
2407 } else if (ecmd->speed == SPEED_100) {
2408 if (ecmd->duplex == DUPLEX_FULL)
2409 hw->media_type = MEDIA_TYPE_100M_FULL;
2411 hw->media_type = MEDIA_TYPE_100M_HALF;
2413 if (ecmd->duplex == DUPLEX_FULL)
2414 hw->media_type = MEDIA_TYPE_10M_FULL;
2416 hw->media_type = MEDIA_TYPE_10M_HALF;
2419 switch (hw->media_type) {
2420 case MEDIA_TYPE_AUTO_SENSOR:
2422 ADVERTISED_10baseT_Half |
2423 ADVERTISED_10baseT_Full |
2424 ADVERTISED_100baseT_Half |
2425 ADVERTISED_100baseT_Full |
2426 ADVERTISED_1000baseT_Full |
2427 ADVERTISED_Autoneg | ADVERTISED_TP;
2429 case MEDIA_TYPE_1000M_FULL:
2431 ADVERTISED_1000baseT_Full |
2432 ADVERTISED_Autoneg | ADVERTISED_TP;
2435 ecmd->advertising = 0;
2438 if (atl1_phy_setup_autoneg_adv(hw)) {
2440 dev_warn(&adapter->pdev->dev,
2441 "invalid ethtool speed/duplex setting\n");
2444 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2445 hw->media_type == MEDIA_TYPE_1000M_FULL)
2446 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
2448 switch (hw->media_type) {
2449 case MEDIA_TYPE_100M_FULL:
2451 MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
2454 case MEDIA_TYPE_100M_HALF:
2455 phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
2457 case MEDIA_TYPE_10M_FULL:
2459 MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
2462 /* MEDIA_TYPE_10M_HALF: */
2463 phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
2467 atl1_write_phy_reg(hw, MII_BMCR, phy_data);
2470 hw->media_type = old_media_type;
2472 if (netif_running(adapter->netdev)) {
2473 dev_dbg(&adapter->pdev->dev, "ethtool starting adapter\n");
2475 } else if (!ret_val) {
2476 dev_dbg(&adapter->pdev->dev, "ethtool resetting adapter\n");
2477 atl1_reset(adapter);
2482 static void atl1_get_drvinfo(struct net_device *netdev,
2483 struct ethtool_drvinfo *drvinfo)
2485 struct atl1_adapter *adapter = netdev_priv(netdev);
2487 strncpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver));
2488 strncpy(drvinfo->version, ATLX_DRIVER_VERSION,
2489 sizeof(drvinfo->version));
2490 strncpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
2491 strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
2492 sizeof(drvinfo->bus_info));
2493 drvinfo->eedump_len = ATL1_EEDUMP_LEN;
2496 static void atl1_get_wol(struct net_device *netdev,
2497 struct ethtool_wolinfo *wol)
2499 struct atl1_adapter *adapter = netdev_priv(netdev);
2501 wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC;
2503 if (adapter->wol & ATLX_WUFC_EX)
2504 wol->wolopts |= WAKE_UCAST;
2505 if (adapter->wol & ATLX_WUFC_MC)
2506 wol->wolopts |= WAKE_MCAST;
2507 if (adapter->wol & ATLX_WUFC_BC)
2508 wol->wolopts |= WAKE_BCAST;
2509 if (adapter->wol & ATLX_WUFC_MAG)
2510 wol->wolopts |= WAKE_MAGIC;
2514 static int atl1_set_wol(struct net_device *netdev,
2515 struct ethtool_wolinfo *wol)
2517 struct atl1_adapter *adapter = netdev_priv(netdev);
2519 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
2522 if (wol->wolopts & WAKE_UCAST)
2523 adapter->wol |= ATLX_WUFC_EX;
2524 if (wol->wolopts & WAKE_MCAST)
2525 adapter->wol |= ATLX_WUFC_MC;
2526 if (wol->wolopts & WAKE_BCAST)
2527 adapter->wol |= ATLX_WUFC_BC;
2528 if (wol->wolopts & WAKE_MAGIC)
2529 adapter->wol |= ATLX_WUFC_MAG;
2533 static int atl1_get_regs_len(struct net_device *netdev)
2535 return ATL1_REG_COUNT * sizeof(u32);
2538 static void atl1_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
2541 struct atl1_adapter *adapter = netdev_priv(netdev);
2542 struct atl1_hw *hw = &adapter->hw;
2546 for (i = 0; i < ATL1_REG_COUNT; i++) {
2548 * This switch statement avoids reserved regions
2549 * of register space.
2574 /* reserved region; don't read it */
2578 /* unreserved region */
2579 regbuf[i] = ioread32(hw->hw_addr + (i * sizeof(u32)));
2584 static void atl1_get_ringparam(struct net_device *netdev,
2585 struct ethtool_ringparam *ring)
2587 struct atl1_adapter *adapter = netdev_priv(netdev);
2588 struct atl1_tpd_ring *txdr = &adapter->tpd_ring;
2589 struct atl1_rfd_ring *rxdr = &adapter->rfd_ring;
2591 ring->rx_max_pending = ATL1_MAX_RFD;
2592 ring->tx_max_pending = ATL1_MAX_TPD;
2593 ring->rx_mini_max_pending = 0;
2594 ring->rx_jumbo_max_pending = 0;
2595 ring->rx_pending = rxdr->count;
2596 ring->tx_pending = txdr->count;
2597 ring->rx_mini_pending = 0;
2598 ring->rx_jumbo_pending = 0;
2601 static int atl1_set_ringparam(struct net_device *netdev,
2602 struct ethtool_ringparam *ring)
2604 struct atl1_adapter *adapter = netdev_priv(netdev);
2605 struct atl1_tpd_ring *tpdr = &adapter->tpd_ring;
2606 struct atl1_rrd_ring *rrdr = &adapter->rrd_ring;
2607 struct atl1_rfd_ring *rfdr = &adapter->rfd_ring;
2609 struct atl1_tpd_ring tpd_old, tpd_new;
2610 struct atl1_rfd_ring rfd_old, rfd_new;
2611 struct atl1_rrd_ring rrd_old, rrd_new;
2612 struct atl1_ring_header rhdr_old, rhdr_new;
2615 tpd_old = adapter->tpd_ring;
2616 rfd_old = adapter->rfd_ring;
2617 rrd_old = adapter->rrd_ring;
2618 rhdr_old = adapter->ring_header;
2620 if (netif_running(adapter->netdev))
2623 rfdr->count = (u16) max(ring->rx_pending, (u32) ATL1_MIN_RFD);
2624 rfdr->count = rfdr->count > ATL1_MAX_RFD ? ATL1_MAX_RFD :
2626 rfdr->count = (rfdr->count + 3) & ~3;
2627 rrdr->count = rfdr->count;
2629 tpdr->count = (u16) max(ring->tx_pending, (u32) ATL1_MIN_TPD);
2630 tpdr->count = tpdr->count > ATL1_MAX_TPD ? ATL1_MAX_TPD :
2632 tpdr->count = (tpdr->count + 3) & ~3;
2634 if (netif_running(adapter->netdev)) {
2635 /* try to get new resources before deleting old */
2636 err = atl1_setup_ring_resources(adapter);
2638 goto err_setup_ring;
2641 * save the new, restore the old in order to free it,
2642 * then restore the new back again
2645 rfd_new = adapter->rfd_ring;
2646 rrd_new = adapter->rrd_ring;
2647 tpd_new = adapter->tpd_ring;
2648 rhdr_new = adapter->ring_header;
2649 adapter->rfd_ring = rfd_old;
2650 adapter->rrd_ring = rrd_old;
2651 adapter->tpd_ring = tpd_old;
2652 adapter->ring_header = rhdr_old;
2653 atl1_free_ring_resources(adapter);
2654 adapter->rfd_ring = rfd_new;
2655 adapter->rrd_ring = rrd_new;
2656 adapter->tpd_ring = tpd_new;
2657 adapter->ring_header = rhdr_new;
2659 err = atl1_up(adapter);
2666 adapter->rfd_ring = rfd_old;
2667 adapter->rrd_ring = rrd_old;
2668 adapter->tpd_ring = tpd_old;
2669 adapter->ring_header = rhdr_old;
2674 static void atl1_get_pauseparam(struct net_device *netdev,
2675 struct ethtool_pauseparam *epause)
2677 struct atl1_adapter *adapter = netdev_priv(netdev);
2678 struct atl1_hw *hw = &adapter->hw;
2680 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2681 hw->media_type == MEDIA_TYPE_1000M_FULL) {
2682 epause->autoneg = AUTONEG_ENABLE;
2684 epause->autoneg = AUTONEG_DISABLE;
2686 epause->rx_pause = 1;
2687 epause->tx_pause = 1;
2690 static int atl1_set_pauseparam(struct net_device *netdev,
2691 struct ethtool_pauseparam *epause)
2693 struct atl1_adapter *adapter = netdev_priv(netdev);
2694 struct atl1_hw *hw = &adapter->hw;
2696 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2697 hw->media_type == MEDIA_TYPE_1000M_FULL) {
2698 epause->autoneg = AUTONEG_ENABLE;
2700 epause->autoneg = AUTONEG_DISABLE;
2703 epause->rx_pause = 1;
2704 epause->tx_pause = 1;
2709 /* FIXME: is this right? -- CHS */
2710 static u32 atl1_get_rx_csum(struct net_device *netdev)
2715 static void atl1_get_strings(struct net_device *netdev, u32 stringset,
2721 switch (stringset) {
2723 for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) {
2724 memcpy(p, atl1_gstrings_stats[i].stat_string,
2726 p += ETH_GSTRING_LEN;
2732 static int atl1_nway_reset(struct net_device *netdev)
2734 struct atl1_adapter *adapter = netdev_priv(netdev);
2735 struct atl1_hw *hw = &adapter->hw;
2737 if (netif_running(netdev)) {
2741 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2742 hw->media_type == MEDIA_TYPE_1000M_FULL) {
2743 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
2745 switch (hw->media_type) {
2746 case MEDIA_TYPE_100M_FULL:
2747 phy_data = MII_CR_FULL_DUPLEX |
2748 MII_CR_SPEED_100 | MII_CR_RESET;
2750 case MEDIA_TYPE_100M_HALF:
2751 phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
2753 case MEDIA_TYPE_10M_FULL:
2754 phy_data = MII_CR_FULL_DUPLEX |
2755 MII_CR_SPEED_10 | MII_CR_RESET;
2758 /* MEDIA_TYPE_10M_HALF */
2759 phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
2762 atl1_write_phy_reg(hw, MII_BMCR, phy_data);
2768 const struct ethtool_ops atl1_ethtool_ops = {
2769 .get_settings = atl1_get_settings,
2770 .set_settings = atl1_set_settings,
2771 .get_drvinfo = atl1_get_drvinfo,
2772 .get_wol = atl1_get_wol,
2773 .set_wol = atl1_set_wol,
2774 .get_regs_len = atl1_get_regs_len,
2775 .get_regs = atl1_get_regs,
2776 .get_ringparam = atl1_get_ringparam,
2777 .set_ringparam = atl1_set_ringparam,
2778 .get_pauseparam = atl1_get_pauseparam,
2779 .set_pauseparam = atl1_set_pauseparam,
2780 .get_rx_csum = atl1_get_rx_csum,
2781 .set_tx_csum = ethtool_op_set_tx_hw_csum,
2782 .get_link = ethtool_op_get_link,
2783 .set_sg = ethtool_op_set_sg,
2784 .get_strings = atl1_get_strings,
2785 .nway_reset = atl1_nway_reset,
2786 .get_ethtool_stats = atl1_get_ethtool_stats,
2787 .get_sset_count = atl1_get_sset_count,
2788 .set_tso = ethtool_op_set_tso,
2792 * Reset the transmit and receive units; mask and clear all interrupts.
2793 * hw - Struct containing variables accessed by shared code
2794 * return : 0 or idle status (if error)
2796 s32 atl1_reset_hw(struct atl1_hw *hw)
2798 struct pci_dev *pdev = hw->back->pdev;
2803 * Clear Interrupt mask to stop board from generating
2804 * interrupts & Clear any pending interrupt events
2807 * iowrite32(0, hw->hw_addr + REG_IMR);
2808 * iowrite32(0xffffffff, hw->hw_addr + REG_ISR);
2812 * Issue Soft Reset to the MAC. This will reset the chip's
2813 * transmit, receive, DMA. It will not effect
2814 * the current PCI configuration. The global reset bit is self-
2815 * clearing, and should clear within a microsecond.
2817 iowrite32(MASTER_CTRL_SOFT_RST, hw->hw_addr + REG_MASTER_CTRL);
2818 ioread32(hw->hw_addr + REG_MASTER_CTRL);
2820 iowrite16(1, hw->hw_addr + REG_PHY_ENABLE);
2821 ioread16(hw->hw_addr + REG_PHY_ENABLE);
2823 /* delay about 1ms */
2826 /* Wait at least 10ms for All module to be Idle */
2827 for (i = 0; i < 10; i++) {
2828 icr = ioread32(hw->hw_addr + REG_IDLE_STATUS);
2833 /* FIXME: still the right way to do this? */
2838 dev_dbg(&pdev->dev, "ICR = 0x%x\n", icr);
2845 /* function about EEPROM
2847 * check_eeprom_exist
2848 * return 0 if eeprom exist
2850 static int atl1_check_eeprom_exist(struct atl1_hw *hw)
2853 value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
2854 if (value & SPI_FLASH_CTRL_EN_VPD) {
2855 value &= ~SPI_FLASH_CTRL_EN_VPD;
2856 iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
2859 value = ioread16(hw->hw_addr + REG_PCIE_CAP_LIST);
2860 return ((value & 0xFF00) == 0x6C00) ? 0 : 1;
2863 static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value)
2869 /* address do not align */
2872 iowrite32(0, hw->hw_addr + REG_VPD_DATA);
2873 control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT;
2874 iowrite32(control, hw->hw_addr + REG_VPD_CAP);
2875 ioread32(hw->hw_addr + REG_VPD_CAP);
2877 for (i = 0; i < 10; i++) {
2879 control = ioread32(hw->hw_addr + REG_VPD_CAP);
2880 if (control & VPD_CAP_VPD_FLAG)
2883 if (control & VPD_CAP_VPD_FLAG) {
2884 *p_value = ioread32(hw->hw_addr + REG_VPD_DATA);
2892 * Reads the value from a PHY register
2893 * hw - Struct containing variables accessed by shared code
2894 * reg_addr - address of the PHY register to read
2896 s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data)
2901 val = ((u32) (reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT |
2902 MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | MDIO_CLK_25_4 <<
2904 iowrite32(val, hw->hw_addr + REG_MDIO_CTRL);
2905 ioread32(hw->hw_addr + REG_MDIO_CTRL);
2907 for (i = 0; i < MDIO_WAIT_TIMES; i++) {
2909 val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
2910 if (!(val & (MDIO_START | MDIO_BUSY)))
2913 if (!(val & (MDIO_START | MDIO_BUSY))) {
2914 *phy_data = (u16) val;
2917 return ATLX_ERR_PHY;
2920 #define CUSTOM_SPI_CS_SETUP 2
2921 #define CUSTOM_SPI_CLK_HI 2
2922 #define CUSTOM_SPI_CLK_LO 2
2923 #define CUSTOM_SPI_CS_HOLD 2
2924 #define CUSTOM_SPI_CS_HI 3
2926 static bool atl1_spi_read(struct atl1_hw *hw, u32 addr, u32 *buf)
2931 iowrite32(0, hw->hw_addr + REG_SPI_DATA);
2932 iowrite32(addr, hw->hw_addr + REG_SPI_ADDR);
2934 value = SPI_FLASH_CTRL_WAIT_READY |
2935 (CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) <<
2936 SPI_FLASH_CTRL_CS_SETUP_SHIFT | (CUSTOM_SPI_CLK_HI &
2937 SPI_FLASH_CTRL_CLK_HI_MASK) <<
2938 SPI_FLASH_CTRL_CLK_HI_SHIFT | (CUSTOM_SPI_CLK_LO &
2939 SPI_FLASH_CTRL_CLK_LO_MASK) <<
2940 SPI_FLASH_CTRL_CLK_LO_SHIFT | (CUSTOM_SPI_CS_HOLD &
2941 SPI_FLASH_CTRL_CS_HOLD_MASK) <<
2942 SPI_FLASH_CTRL_CS_HOLD_SHIFT | (CUSTOM_SPI_CS_HI &
2943 SPI_FLASH_CTRL_CS_HI_MASK) <<
2944 SPI_FLASH_CTRL_CS_HI_SHIFT | (1 & SPI_FLASH_CTRL_INS_MASK) <<
2945 SPI_FLASH_CTRL_INS_SHIFT;
2947 iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
2949 value |= SPI_FLASH_CTRL_START;
2950 iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
2951 ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
2953 for (i = 0; i < 10; i++) {
2955 value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
2956 if (!(value & SPI_FLASH_CTRL_START))
2960 if (value & SPI_FLASH_CTRL_START)
2963 *buf = ioread32(hw->hw_addr + REG_SPI_DATA);
2969 * get_permanent_address
2970 * return 0 if get valid mac address,
2972 static int atl1_get_permanent_address(struct atl1_hw *hw)
2977 u8 eth_addr[ETH_ALEN];
2980 if (is_valid_ether_addr(hw->perm_mac_addr))
2984 addr[0] = addr[1] = 0;
2986 if (!atl1_check_eeprom_exist(hw)) {
2989 /* Read out all EEPROM content */
2992 if (atl1_read_eeprom(hw, i + 0x100, &control)) {
2994 if (reg == REG_MAC_STA_ADDR)
2996 else if (reg == (REG_MAC_STA_ADDR + 4))
2999 } else if ((control & 0xff) == 0x5A) {
3001 reg = (u16) (control >> 16);
3010 *(u32 *) ð_addr[2] = swab32(addr[0]);
3011 *(u16 *) ð_addr[0] = swab16(*(u16 *) &addr[1]);
3012 if (is_valid_ether_addr(eth_addr)) {
3013 memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
3019 /* see if SPI FLAGS exist ? */
3020 addr[0] = addr[1] = 0;
3025 if (atl1_spi_read(hw, i + 0x1f000, &control)) {
3027 if (reg == REG_MAC_STA_ADDR)
3029 else if (reg == (REG_MAC_STA_ADDR + 4))
3032 } else if ((control & 0xff) == 0x5A) {
3034 reg = (u16) (control >> 16);
3044 *(u32 *) ð_addr[2] = swab32(addr[0]);
3045 *(u16 *) ð_addr[0] = swab16(*(u16 *) &addr[1]);
3046 if (is_valid_ether_addr(eth_addr)) {
3047 memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
3052 * On some motherboards, the MAC address is written by the
3053 * BIOS directly to the MAC register during POST, and is
3054 * not stored in eeprom. If all else thus far has failed
3055 * to fetch the permanent MAC address, try reading it directly.
3057 addr[0] = ioread32(hw->hw_addr + REG_MAC_STA_ADDR);
3058 addr[1] = ioread16(hw->hw_addr + (REG_MAC_STA_ADDR + 4));
3059 *(u32 *) ð_addr[2] = swab32(addr[0]);
3060 *(u16 *) ð_addr[0] = swab16(*(u16 *) &addr[1]);
3061 if (is_valid_ether_addr(eth_addr)) {
3062 memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
3070 * Reads the adapter's MAC address from the EEPROM
3071 * hw - Struct containing variables accessed by shared code
3073 s32 atl1_read_mac_addr(struct atl1_hw *hw)
3077 if (atl1_get_permanent_address(hw))
3078 random_ether_addr(hw->perm_mac_addr);
3080 for (i = 0; i < ETH_ALEN; i++)
3081 hw->mac_addr[i] = hw->perm_mac_addr[i];
3086 * Hashes an address to determine its location in the multicast table
3087 * hw - Struct containing variables accessed by shared code
3088 * mc_addr - the multicast address to hash
3092 * set hash value for a multicast address
3093 * hash calcu processing :
3094 * 1. calcu 32bit CRC for multicast address
3095 * 2. reverse crc with MSB to LSB
3097 u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
3099 u32 crc32, value = 0;
3102 crc32 = ether_crc_le(6, mc_addr);
3103 for (i = 0; i < 32; i++)
3104 value |= (((crc32 >> i) & 1) << (31 - i));
3110 * Sets the bit in the multicast table corresponding to the hash value.
3111 * hw - Struct containing variables accessed by shared code
3112 * hash_value - Multicast address hash value
3114 void atl1_hash_set(struct atl1_hw *hw, u32 hash_value)
3116 u32 hash_bit, hash_reg;
3120 * The HASH Table is a register array of 2 32-bit registers.
3121 * It is treated like an array of 64 bits. We want to set
3122 * bit BitArray[hash_value]. So we figure out what register
3123 * the bit is in, read it, OR in the new bit, then write
3124 * back the new value. The register is determined by the
3125 * upper 7 bits of the hash value and the bit within that
3126 * register are determined by the lower 5 bits of the value.
3128 hash_reg = (hash_value >> 31) & 0x1;
3129 hash_bit = (hash_value >> 26) & 0x1F;
3130 mta = ioread32((hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2));
3131 mta |= (1 << hash_bit);
3132 iowrite32(mta, (hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2));
3136 * Writes a value to a PHY register
3137 * hw - Struct containing variables accessed by shared code
3138 * reg_addr - address of the PHY register to write
3139 * data - data to write to the PHY
3141 s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data)
3146 val = ((u32) (phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT |
3147 (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT |
3149 MDIO_START | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
3150 iowrite32(val, hw->hw_addr + REG_MDIO_CTRL);
3151 ioread32(hw->hw_addr + REG_MDIO_CTRL);
3153 for (i = 0; i < MDIO_WAIT_TIMES; i++) {
3155 val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
3156 if (!(val & (MDIO_START | MDIO_BUSY)))
3160 if (!(val & (MDIO_START | MDIO_BUSY)))
3163 return ATLX_ERR_PHY;
3167 * Make L001's PHY out of Power Saving State (bug)
3168 * hw - Struct containing variables accessed by shared code
3169 * when power on, L001's PHY always on Power saving State
3170 * (Gigabit Link forbidden)
3172 static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw)
3175 ret = atl1_write_phy_reg(hw, 29, 0x0029);
3178 return atl1_write_phy_reg(hw, 30, 0);
3182 *TODO: do something or get rid of this
3184 s32 atl1_phy_enter_power_saving(struct atl1_hw *hw)
3191 ret_val = atl1_write_phy_reg(hw, ...);
3192 ret_val = atl1_write_phy_reg(hw, ...);
3199 * Resets the PHY and make all config validate
3200 * hw - Struct containing variables accessed by shared code
3202 * Sets bit 15 and 12 of the MII Control regiser (for F001 bug)
3204 static s32 atl1_phy_reset(struct atl1_hw *hw)
3206 struct pci_dev *pdev = hw->back->pdev;
3210 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
3211 hw->media_type == MEDIA_TYPE_1000M_FULL)
3212 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
3214 switch (hw->media_type) {
3215 case MEDIA_TYPE_100M_FULL:
3217 MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
3220 case MEDIA_TYPE_100M_HALF:
3221 phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
3223 case MEDIA_TYPE_10M_FULL:
3225 MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
3228 /* MEDIA_TYPE_10M_HALF: */
3229 phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
3234 ret_val = atl1_write_phy_reg(hw, MII_BMCR, phy_data);
3238 /* pcie serdes link may be down! */
3239 dev_dbg(&pdev->dev, "pcie phy link down\n");
3241 for (i = 0; i < 25; i++) {
3243 val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
3244 if (!(val & (MDIO_START | MDIO_BUSY)))
3248 if ((val & (MDIO_START | MDIO_BUSY)) != 0) {
3249 dev_warn(&pdev->dev, "pcie link down at least 25ms\n");
3257 * Configures PHY autoneg and flow control advertisement settings
3258 * hw - Struct containing variables accessed by shared code
3260 s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw)
3263 s16 mii_autoneg_adv_reg;
3264 s16 mii_1000t_ctrl_reg;
3266 /* Read the MII Auto-Neg Advertisement Register (Address 4). */
3267 mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK;
3269 /* Read the MII 1000Base-T Control Register (Address 9). */
3270 mii_1000t_ctrl_reg = MII_ATLX_CR_1000T_DEFAULT_CAP_MASK;
3273 * First we clear all the 10/100 mb speed bits in the Auto-Neg
3274 * Advertisement Register (Address 4) and the 1000 mb speed bits in
3275 * the 1000Base-T Control Register (Address 9).
3277 mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
3278 mii_1000t_ctrl_reg &= ~MII_ATLX_CR_1000T_SPEED_MASK;
3281 * Need to parse media_type and set up
3282 * the appropriate PHY registers.
3284 switch (hw->media_type) {
3285 case MEDIA_TYPE_AUTO_SENSOR:
3286 mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS |
3287 MII_AR_10T_FD_CAPS |
3288 MII_AR_100TX_HD_CAPS |
3289 MII_AR_100TX_FD_CAPS);
3290 mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS;
3293 case MEDIA_TYPE_1000M_FULL:
3294 mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS;
3297 case MEDIA_TYPE_100M_FULL:
3298 mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS;
3301 case MEDIA_TYPE_100M_HALF:
3302 mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS;
3305 case MEDIA_TYPE_10M_FULL:
3306 mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS;
3310 mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS;
3314 /* flow control fixed to enable all */
3315 mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
3317 hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
3318 hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg;
3320 ret_val = atl1_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
3324 ret_val = atl1_write_phy_reg(hw, MII_ATLX_CR, mii_1000t_ctrl_reg);
3332 * Configures link settings.
3333 * hw - Struct containing variables accessed by shared code
3334 * Assumes the hardware has previously been reset and the
3335 * transmitter and receiver are not enabled.
3337 static s32 atl1_setup_link(struct atl1_hw *hw)
3339 struct pci_dev *pdev = hw->back->pdev;
3344 * PHY will advertise value(s) parsed from
3345 * autoneg_advertised and fc
3346 * no matter what autoneg is , We will not wait link result.
3348 ret_val = atl1_phy_setup_autoneg_adv(hw);
3350 dev_dbg(&pdev->dev, "error setting up autonegotiation\n");
3353 /* SW.Reset , En-Auto-Neg if needed */
3354 ret_val = atl1_phy_reset(hw);
3356 dev_dbg(&pdev->dev, "error resetting phy\n");
3359 hw->phy_configured = true;
3363 static void atl1_init_flash_opcode(struct atl1_hw *hw)
3365 if (hw->flash_vendor >= ARRAY_SIZE(flash_table))
3367 hw->flash_vendor = 0;
3370 iowrite8(flash_table[hw->flash_vendor].cmd_program,
3371 hw->hw_addr + REG_SPI_FLASH_OP_PROGRAM);
3372 iowrite8(flash_table[hw->flash_vendor].cmd_sector_erase,
3373 hw->hw_addr + REG_SPI_FLASH_OP_SC_ERASE);
3374 iowrite8(flash_table[hw->flash_vendor].cmd_chip_erase,
3375 hw->hw_addr + REG_SPI_FLASH_OP_CHIP_ERASE);
3376 iowrite8(flash_table[hw->flash_vendor].cmd_rdid,
3377 hw->hw_addr + REG_SPI_FLASH_OP_RDID);
3378 iowrite8(flash_table[hw->flash_vendor].cmd_wren,
3379 hw->hw_addr + REG_SPI_FLASH_OP_WREN);
3380 iowrite8(flash_table[hw->flash_vendor].cmd_rdsr,
3381 hw->hw_addr + REG_SPI_FLASH_OP_RDSR);
3382 iowrite8(flash_table[hw->flash_vendor].cmd_wrsr,
3383 hw->hw_addr + REG_SPI_FLASH_OP_WRSR);
3384 iowrite8(flash_table[hw->flash_vendor].cmd_read,
3385 hw->hw_addr + REG_SPI_FLASH_OP_READ);
3389 * Performs basic configuration of the adapter.
3390 * hw - Struct containing variables accessed by shared code
3391 * Assumes that the controller has previously been reset and is in a
3392 * post-reset uninitialized state. Initializes multicast table,
3393 * and Calls routines to setup link
3394 * Leaves the transmit and receive units disabled and uninitialized.
3396 s32 atl1_init_hw(struct atl1_hw *hw)
3400 /* Zero out the Multicast HASH table */
3401 iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE);
3402 /* clear the old settings from the multicast hash table */
3403 iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
3405 atl1_init_flash_opcode(hw);
3407 if (!hw->phy_configured) {
3408 /* enable GPHY LinkChange Interrrupt */
3409 ret_val = atl1_write_phy_reg(hw, 18, 0xC00);
3412 /* make PHY out of power-saving state */
3413 ret_val = atl1_phy_leave_power_saving(hw);
3416 /* Call a subroutine to configure the link */
3417 ret_val = atl1_setup_link(hw);
3423 * Detects the current speed and duplex settings of the hardware.
3424 * hw - Struct containing variables accessed by shared code
3425 * speed - Speed of the connection
3426 * duplex - Duplex setting of the connection
3428 s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex)
3430 struct pci_dev *pdev = hw->back->pdev;
3434 /* ; --- Read PHY Specific Status Register (17) */
3435 ret_val = atl1_read_phy_reg(hw, MII_ATLX_PSSR, &phy_data);
3439 if (!(phy_data & MII_ATLX_PSSR_SPD_DPLX_RESOLVED))
3440 return ATLX_ERR_PHY_RES;
3442 switch (phy_data & MII_ATLX_PSSR_SPEED) {
3443 case MII_ATLX_PSSR_1000MBS:
3444 *speed = SPEED_1000;
3446 case MII_ATLX_PSSR_100MBS:
3449 case MII_ATLX_PSSR_10MBS:
3453 dev_dbg(&pdev->dev, "error getting speed\n");
3454 return ATLX_ERR_PHY_SPEED;
3457 if (phy_data & MII_ATLX_PSSR_DPLX)
3458 *duplex = FULL_DUPLEX;
3460 *duplex = HALF_DUPLEX;
3465 void atl1_set_mac_addr(struct atl1_hw *hw)
3470 * 0: 6AF600DC 1: 000B
3473 value = (((u32) hw->mac_addr[2]) << 24) |
3474 (((u32) hw->mac_addr[3]) << 16) |
3475 (((u32) hw->mac_addr[4]) << 8) | (((u32) hw->mac_addr[5]));
3476 iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR);
3478 value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1]));
3479 iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2));