1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2007 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
55 #define DRV_MODULE_NAME "bnx2"
56 #define PFX DRV_MODULE_NAME ": "
57 #define DRV_MODULE_VERSION "1.6.2"
58 #define DRV_MODULE_RELDATE "July 6, 2007"
60 #define RUN_AT(x) (jiffies + (x))
62 /* Time in jiffies before concluding the transmitter is hung. */
63 #define TX_TIMEOUT (5*HZ)
65 static const char version[] __devinitdata =
66 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
68 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
69 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
73 static int disable_msi = 0;
75 module_param(disable_msi, int, 0);
76 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
90 /* indexed by board_t, above */
93 } board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
101 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
105 static struct pci_device_id bnx2_pci_tbl[] = {
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
127 static struct flash_spec flash_table[] =
130 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
131 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
132 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
134 /* Expansion entry 0001 */
135 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
139 /* Saifun SA25F010 (non-buffered flash) */
140 /* strap, cfg1, & write1 need updates */
141 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
142 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
144 "Non-buffered flash (128kB)"},
145 /* Saifun SA25F020 (non-buffered flash) */
146 /* strap, cfg1, & write1 need updates */
147 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
148 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
149 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
150 "Non-buffered flash (256kB)"},
151 /* Expansion entry 0100 */
152 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
153 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
154 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
156 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
157 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
158 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
159 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
160 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
161 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
162 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
163 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
164 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
165 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
166 /* Saifun SA25F005 (non-buffered flash) */
167 /* strap, cfg1, & write1 need updates */
168 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
169 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
170 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
171 "Non-buffered flash (64kB)"},
173 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
174 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
175 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
177 /* Expansion entry 1001 */
178 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
179 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
180 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
182 /* Expansion entry 1010 */
183 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
184 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
185 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
187 /* ATMEL AT45DB011B (buffered flash) */
188 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
189 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
190 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
191 "Buffered flash (128kB)"},
192 /* Expansion entry 1100 */
193 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
194 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
197 /* Expansion entry 1101 */
198 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
199 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
200 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
202 /* Ateml Expansion entry 1110 */
203 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
204 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
205 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
206 "Entry 1110 (Atmel)"},
207 /* ATMEL AT45DB021B (buffered flash) */
208 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
209 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
210 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
211 "Buffered flash (256kB)"},
214 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
216 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
222 /* The ring uses 256 indices for 255 entries, one of them
223 * needs to be skipped.
225 diff = bp->tx_prod - bp->tx_cons;
226 if (unlikely(diff >= TX_DESC_CNT)) {
228 if (diff == TX_DESC_CNT)
229 diff = MAX_TX_DESC_CNT;
231 return (bp->tx_ring_size - diff);
235 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
239 spin_lock_bh(&bp->indirect_lock);
240 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
241 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
242 spin_unlock_bh(&bp->indirect_lock);
247 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
249 spin_lock_bh(&bp->indirect_lock);
250 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
251 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
252 spin_unlock_bh(&bp->indirect_lock);
256 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
259 spin_lock_bh(&bp->indirect_lock);
260 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
263 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
264 REG_WR(bp, BNX2_CTX_CTX_CTRL,
265 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
266 for (i = 0; i < 5; i++) {
268 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
269 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
274 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
275 REG_WR(bp, BNX2_CTX_DATA, val);
277 spin_unlock_bh(&bp->indirect_lock);
281 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
286 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
287 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
288 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
290 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
291 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
296 val1 = (bp->phy_addr << 21) | (reg << 16) |
297 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
298 BNX2_EMAC_MDIO_COMM_START_BUSY;
299 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
301 for (i = 0; i < 50; i++) {
304 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
305 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
308 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
309 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
315 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
324 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
325 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
326 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
328 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
329 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
338 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
343 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
344 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
345 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
347 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
348 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
353 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
354 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
355 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
356 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
358 for (i = 0; i < 50; i++) {
361 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
362 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
368 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
373 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
375 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
377 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
378 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
387 bnx2_disable_int(struct bnx2 *bp)
389 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
390 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
391 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
395 bnx2_enable_int(struct bnx2 *bp)
397 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
398 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
399 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
401 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
402 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
404 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
408 bnx2_disable_int_sync(struct bnx2 *bp)
410 atomic_inc(&bp->intr_sem);
411 bnx2_disable_int(bp);
412 synchronize_irq(bp->pdev->irq);
416 bnx2_netif_stop(struct bnx2 *bp)
418 bnx2_disable_int_sync(bp);
419 if (netif_running(bp->dev)) {
420 netif_poll_disable(bp->dev);
421 netif_tx_disable(bp->dev);
422 bp->dev->trans_start = jiffies; /* prevent tx timeout */
427 bnx2_netif_start(struct bnx2 *bp)
429 if (atomic_dec_and_test(&bp->intr_sem)) {
430 if (netif_running(bp->dev)) {
431 netif_wake_queue(bp->dev);
432 netif_poll_enable(bp->dev);
439 bnx2_free_mem(struct bnx2 *bp)
443 for (i = 0; i < bp->ctx_pages; i++) {
444 if (bp->ctx_blk[i]) {
445 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
447 bp->ctx_blk_mapping[i]);
448 bp->ctx_blk[i] = NULL;
451 if (bp->status_blk) {
452 pci_free_consistent(bp->pdev, bp->status_stats_size,
453 bp->status_blk, bp->status_blk_mapping);
454 bp->status_blk = NULL;
455 bp->stats_blk = NULL;
457 if (bp->tx_desc_ring) {
458 pci_free_consistent(bp->pdev,
459 sizeof(struct tx_bd) * TX_DESC_CNT,
460 bp->tx_desc_ring, bp->tx_desc_mapping);
461 bp->tx_desc_ring = NULL;
463 kfree(bp->tx_buf_ring);
464 bp->tx_buf_ring = NULL;
465 for (i = 0; i < bp->rx_max_ring; i++) {
466 if (bp->rx_desc_ring[i])
467 pci_free_consistent(bp->pdev,
468 sizeof(struct rx_bd) * RX_DESC_CNT,
470 bp->rx_desc_mapping[i]);
471 bp->rx_desc_ring[i] = NULL;
473 vfree(bp->rx_buf_ring);
474 bp->rx_buf_ring = NULL;
478 bnx2_alloc_mem(struct bnx2 *bp)
480 int i, status_blk_size;
482 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
484 if (bp->tx_buf_ring == NULL)
487 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
488 sizeof(struct tx_bd) *
490 &bp->tx_desc_mapping);
491 if (bp->tx_desc_ring == NULL)
494 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
496 if (bp->rx_buf_ring == NULL)
499 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
502 for (i = 0; i < bp->rx_max_ring; i++) {
503 bp->rx_desc_ring[i] =
504 pci_alloc_consistent(bp->pdev,
505 sizeof(struct rx_bd) * RX_DESC_CNT,
506 &bp->rx_desc_mapping[i]);
507 if (bp->rx_desc_ring[i] == NULL)
512 /* Combine status and statistics blocks into one allocation. */
513 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
514 bp->status_stats_size = status_blk_size +
515 sizeof(struct statistics_block);
517 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
518 &bp->status_blk_mapping);
519 if (bp->status_blk == NULL)
522 memset(bp->status_blk, 0, bp->status_stats_size);
524 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
527 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
529 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
530 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
531 if (bp->ctx_pages == 0)
533 for (i = 0; i < bp->ctx_pages; i++) {
534 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
536 &bp->ctx_blk_mapping[i]);
537 if (bp->ctx_blk[i] == NULL)
549 bnx2_report_fw_link(struct bnx2 *bp)
551 u32 fw_link_status = 0;
553 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
559 switch (bp->line_speed) {
561 if (bp->duplex == DUPLEX_HALF)
562 fw_link_status = BNX2_LINK_STATUS_10HALF;
564 fw_link_status = BNX2_LINK_STATUS_10FULL;
567 if (bp->duplex == DUPLEX_HALF)
568 fw_link_status = BNX2_LINK_STATUS_100HALF;
570 fw_link_status = BNX2_LINK_STATUS_100FULL;
573 if (bp->duplex == DUPLEX_HALF)
574 fw_link_status = BNX2_LINK_STATUS_1000HALF;
576 fw_link_status = BNX2_LINK_STATUS_1000FULL;
579 if (bp->duplex == DUPLEX_HALF)
580 fw_link_status = BNX2_LINK_STATUS_2500HALF;
582 fw_link_status = BNX2_LINK_STATUS_2500FULL;
586 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
589 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
591 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
592 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
594 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
595 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
596 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
598 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
602 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
604 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
608 bnx2_xceiver_str(struct bnx2 *bp)
610 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
611 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
616 bnx2_report_link(struct bnx2 *bp)
619 netif_carrier_on(bp->dev);
620 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
621 bnx2_xceiver_str(bp));
623 printk("%d Mbps ", bp->line_speed);
625 if (bp->duplex == DUPLEX_FULL)
626 printk("full duplex");
628 printk("half duplex");
631 if (bp->flow_ctrl & FLOW_CTRL_RX) {
632 printk(", receive ");
633 if (bp->flow_ctrl & FLOW_CTRL_TX)
634 printk("& transmit ");
637 printk(", transmit ");
639 printk("flow control ON");
644 netif_carrier_off(bp->dev);
645 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
646 bnx2_xceiver_str(bp));
649 bnx2_report_fw_link(bp);
653 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
655 u32 local_adv, remote_adv;
658 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
659 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
661 if (bp->duplex == DUPLEX_FULL) {
662 bp->flow_ctrl = bp->req_flow_ctrl;
667 if (bp->duplex != DUPLEX_FULL) {
671 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
672 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
675 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
676 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
677 bp->flow_ctrl |= FLOW_CTRL_TX;
678 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
679 bp->flow_ctrl |= FLOW_CTRL_RX;
683 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
684 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
686 if (bp->phy_flags & PHY_SERDES_FLAG) {
687 u32 new_local_adv = 0;
688 u32 new_remote_adv = 0;
690 if (local_adv & ADVERTISE_1000XPAUSE)
691 new_local_adv |= ADVERTISE_PAUSE_CAP;
692 if (local_adv & ADVERTISE_1000XPSE_ASYM)
693 new_local_adv |= ADVERTISE_PAUSE_ASYM;
694 if (remote_adv & ADVERTISE_1000XPAUSE)
695 new_remote_adv |= ADVERTISE_PAUSE_CAP;
696 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
697 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
699 local_adv = new_local_adv;
700 remote_adv = new_remote_adv;
703 /* See Table 28B-3 of 802.3ab-1999 spec. */
704 if (local_adv & ADVERTISE_PAUSE_CAP) {
705 if(local_adv & ADVERTISE_PAUSE_ASYM) {
706 if (remote_adv & ADVERTISE_PAUSE_CAP) {
707 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
709 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
710 bp->flow_ctrl = FLOW_CTRL_RX;
714 if (remote_adv & ADVERTISE_PAUSE_CAP) {
715 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
719 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
720 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
721 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
723 bp->flow_ctrl = FLOW_CTRL_TX;
729 bnx2_5709s_linkup(struct bnx2 *bp)
735 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
736 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
737 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
739 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
740 bp->line_speed = bp->req_line_speed;
741 bp->duplex = bp->req_duplex;
744 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
746 case MII_BNX2_GP_TOP_AN_SPEED_10:
747 bp->line_speed = SPEED_10;
749 case MII_BNX2_GP_TOP_AN_SPEED_100:
750 bp->line_speed = SPEED_100;
752 case MII_BNX2_GP_TOP_AN_SPEED_1G:
753 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
754 bp->line_speed = SPEED_1000;
756 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
757 bp->line_speed = SPEED_2500;
760 if (val & MII_BNX2_GP_TOP_AN_FD)
761 bp->duplex = DUPLEX_FULL;
763 bp->duplex = DUPLEX_HALF;
768 bnx2_5708s_linkup(struct bnx2 *bp)
773 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
774 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
775 case BCM5708S_1000X_STAT1_SPEED_10:
776 bp->line_speed = SPEED_10;
778 case BCM5708S_1000X_STAT1_SPEED_100:
779 bp->line_speed = SPEED_100;
781 case BCM5708S_1000X_STAT1_SPEED_1G:
782 bp->line_speed = SPEED_1000;
784 case BCM5708S_1000X_STAT1_SPEED_2G5:
785 bp->line_speed = SPEED_2500;
788 if (val & BCM5708S_1000X_STAT1_FD)
789 bp->duplex = DUPLEX_FULL;
791 bp->duplex = DUPLEX_HALF;
797 bnx2_5706s_linkup(struct bnx2 *bp)
799 u32 bmcr, local_adv, remote_adv, common;
802 bp->line_speed = SPEED_1000;
804 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
805 if (bmcr & BMCR_FULLDPLX) {
806 bp->duplex = DUPLEX_FULL;
809 bp->duplex = DUPLEX_HALF;
812 if (!(bmcr & BMCR_ANENABLE)) {
816 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
817 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
819 common = local_adv & remote_adv;
820 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
822 if (common & ADVERTISE_1000XFULL) {
823 bp->duplex = DUPLEX_FULL;
826 bp->duplex = DUPLEX_HALF;
834 bnx2_copper_linkup(struct bnx2 *bp)
838 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
839 if (bmcr & BMCR_ANENABLE) {
840 u32 local_adv, remote_adv, common;
842 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
843 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
845 common = local_adv & (remote_adv >> 2);
846 if (common & ADVERTISE_1000FULL) {
847 bp->line_speed = SPEED_1000;
848 bp->duplex = DUPLEX_FULL;
850 else if (common & ADVERTISE_1000HALF) {
851 bp->line_speed = SPEED_1000;
852 bp->duplex = DUPLEX_HALF;
855 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
856 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
858 common = local_adv & remote_adv;
859 if (common & ADVERTISE_100FULL) {
860 bp->line_speed = SPEED_100;
861 bp->duplex = DUPLEX_FULL;
863 else if (common & ADVERTISE_100HALF) {
864 bp->line_speed = SPEED_100;
865 bp->duplex = DUPLEX_HALF;
867 else if (common & ADVERTISE_10FULL) {
868 bp->line_speed = SPEED_10;
869 bp->duplex = DUPLEX_FULL;
871 else if (common & ADVERTISE_10HALF) {
872 bp->line_speed = SPEED_10;
873 bp->duplex = DUPLEX_HALF;
882 if (bmcr & BMCR_SPEED100) {
883 bp->line_speed = SPEED_100;
886 bp->line_speed = SPEED_10;
888 if (bmcr & BMCR_FULLDPLX) {
889 bp->duplex = DUPLEX_FULL;
892 bp->duplex = DUPLEX_HALF;
900 bnx2_set_mac_link(struct bnx2 *bp)
904 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
905 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
906 (bp->duplex == DUPLEX_HALF)) {
907 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
910 /* Configure the EMAC mode register. */
911 val = REG_RD(bp, BNX2_EMAC_MODE);
913 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
914 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
915 BNX2_EMAC_MODE_25G_MODE);
918 switch (bp->line_speed) {
920 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
921 val |= BNX2_EMAC_MODE_PORT_MII_10M;
926 val |= BNX2_EMAC_MODE_PORT_MII;
929 val |= BNX2_EMAC_MODE_25G_MODE;
932 val |= BNX2_EMAC_MODE_PORT_GMII;
937 val |= BNX2_EMAC_MODE_PORT_GMII;
940 /* Set the MAC to operate in the appropriate duplex mode. */
941 if (bp->duplex == DUPLEX_HALF)
942 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
943 REG_WR(bp, BNX2_EMAC_MODE, val);
945 /* Enable/disable rx PAUSE. */
946 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
948 if (bp->flow_ctrl & FLOW_CTRL_RX)
949 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
950 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
952 /* Enable/disable tx PAUSE. */
953 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
954 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
956 if (bp->flow_ctrl & FLOW_CTRL_TX)
957 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
958 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
960 /* Acknowledge the interrupt. */
961 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
967 bnx2_enable_bmsr1(struct bnx2 *bp)
969 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
970 (CHIP_NUM(bp) == CHIP_NUM_5709))
971 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
972 MII_BNX2_BLK_ADDR_GP_STATUS);
976 bnx2_disable_bmsr1(struct bnx2 *bp)
978 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
979 (CHIP_NUM(bp) == CHIP_NUM_5709))
980 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
981 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
985 bnx2_test_and_enable_2g5(struct bnx2 *bp)
990 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
993 if (bp->autoneg & AUTONEG_SPEED)
994 bp->advertising |= ADVERTISED_2500baseX_Full;
996 if (CHIP_NUM(bp) == CHIP_NUM_5709)
997 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
999 bnx2_read_phy(bp, bp->mii_up1, &up1);
1000 if (!(up1 & BCM5708S_UP1_2G5)) {
1001 up1 |= BCM5708S_UP1_2G5;
1002 bnx2_write_phy(bp, bp->mii_up1, up1);
1006 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1007 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1008 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1014 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1019 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1022 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1023 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1025 bnx2_read_phy(bp, bp->mii_up1, &up1);
1026 if (up1 & BCM5708S_UP1_2G5) {
1027 up1 &= ~BCM5708S_UP1_2G5;
1028 bnx2_write_phy(bp, bp->mii_up1, up1);
1032 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1033 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1034 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1040 bnx2_enable_forced_2g5(struct bnx2 *bp)
1044 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1047 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1050 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1051 MII_BNX2_BLK_ADDR_SERDES_DIG);
1052 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1053 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1054 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1055 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1057 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1058 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1059 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1061 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1062 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1063 bmcr |= BCM5708S_BMCR_FORCE_2500;
1066 if (bp->autoneg & AUTONEG_SPEED) {
1067 bmcr &= ~BMCR_ANENABLE;
1068 if (bp->req_duplex == DUPLEX_FULL)
1069 bmcr |= BMCR_FULLDPLX;
1071 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1075 bnx2_disable_forced_2g5(struct bnx2 *bp)
1079 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1082 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1085 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1086 MII_BNX2_BLK_ADDR_SERDES_DIG);
1087 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1088 val &= ~MII_BNX2_SD_MISC1_FORCE;
1089 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1091 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1092 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1093 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1095 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1096 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1097 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1100 if (bp->autoneg & AUTONEG_SPEED)
1101 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1102 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1106 bnx2_set_link(struct bnx2 *bp)
1111 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1116 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1119 link_up = bp->link_up;
1121 bnx2_enable_bmsr1(bp);
1122 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1123 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1124 bnx2_disable_bmsr1(bp);
1126 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1127 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1130 val = REG_RD(bp, BNX2_EMAC_STATUS);
1131 if (val & BNX2_EMAC_STATUS_LINK)
1132 bmsr |= BMSR_LSTATUS;
1134 bmsr &= ~BMSR_LSTATUS;
1137 if (bmsr & BMSR_LSTATUS) {
1140 if (bp->phy_flags & PHY_SERDES_FLAG) {
1141 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1142 bnx2_5706s_linkup(bp);
1143 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1144 bnx2_5708s_linkup(bp);
1145 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1146 bnx2_5709s_linkup(bp);
1149 bnx2_copper_linkup(bp);
1151 bnx2_resolve_flow_ctrl(bp);
1154 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1155 (bp->autoneg & AUTONEG_SPEED))
1156 bnx2_disable_forced_2g5(bp);
1158 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1162 if (bp->link_up != link_up) {
1163 bnx2_report_link(bp);
1166 bnx2_set_mac_link(bp);
1172 bnx2_reset_phy(struct bnx2 *bp)
1177 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1179 #define PHY_RESET_MAX_WAIT 100
1180 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1183 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1184 if (!(reg & BMCR_RESET)) {
1189 if (i == PHY_RESET_MAX_WAIT) {
1196 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1200 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1201 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1203 if (bp->phy_flags & PHY_SERDES_FLAG) {
1204 adv = ADVERTISE_1000XPAUSE;
1207 adv = ADVERTISE_PAUSE_CAP;
1210 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1211 if (bp->phy_flags & PHY_SERDES_FLAG) {
1212 adv = ADVERTISE_1000XPSE_ASYM;
1215 adv = ADVERTISE_PAUSE_ASYM;
1218 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1219 if (bp->phy_flags & PHY_SERDES_FLAG) {
1220 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1223 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1229 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1232 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1234 u32 speed_arg = 0, pause_adv;
1236 pause_adv = bnx2_phy_get_pause_adv(bp);
1238 if (bp->autoneg & AUTONEG_SPEED) {
1239 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1240 if (bp->advertising & ADVERTISED_10baseT_Half)
1241 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1242 if (bp->advertising & ADVERTISED_10baseT_Full)
1243 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1244 if (bp->advertising & ADVERTISED_100baseT_Half)
1245 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1246 if (bp->advertising & ADVERTISED_100baseT_Full)
1247 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1248 if (bp->advertising & ADVERTISED_1000baseT_Full)
1249 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1250 if (bp->advertising & ADVERTISED_2500baseX_Full)
1251 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1253 if (bp->req_line_speed == SPEED_2500)
1254 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1255 else if (bp->req_line_speed == SPEED_1000)
1256 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1257 else if (bp->req_line_speed == SPEED_100) {
1258 if (bp->req_duplex == DUPLEX_FULL)
1259 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1261 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1262 } else if (bp->req_line_speed == SPEED_10) {
1263 if (bp->req_duplex == DUPLEX_FULL)
1264 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1266 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1270 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1271 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1272 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1273 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1275 if (port == PORT_TP)
1276 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1277 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1279 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1281 spin_unlock_bh(&bp->phy_lock);
1282 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1283 spin_lock_bh(&bp->phy_lock);
1289 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1294 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1295 return (bnx2_setup_remote_phy(bp, port));
1297 if (!(bp->autoneg & AUTONEG_SPEED)) {
1299 int force_link_down = 0;
1301 if (bp->req_line_speed == SPEED_2500) {
1302 if (!bnx2_test_and_enable_2g5(bp))
1303 force_link_down = 1;
1304 } else if (bp->req_line_speed == SPEED_1000) {
1305 if (bnx2_test_and_disable_2g5(bp))
1306 force_link_down = 1;
1308 bnx2_read_phy(bp, bp->mii_adv, &adv);
1309 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1311 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1312 new_bmcr = bmcr & ~BMCR_ANENABLE;
1313 new_bmcr |= BMCR_SPEED1000;
1315 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1316 if (bp->req_line_speed == SPEED_2500)
1317 bnx2_enable_forced_2g5(bp);
1318 else if (bp->req_line_speed == SPEED_1000) {
1319 bnx2_disable_forced_2g5(bp);
1320 new_bmcr &= ~0x2000;
1323 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1324 if (bp->req_line_speed == SPEED_2500)
1325 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1327 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1330 if (bp->req_duplex == DUPLEX_FULL) {
1331 adv |= ADVERTISE_1000XFULL;
1332 new_bmcr |= BMCR_FULLDPLX;
1335 adv |= ADVERTISE_1000XHALF;
1336 new_bmcr &= ~BMCR_FULLDPLX;
1338 if ((new_bmcr != bmcr) || (force_link_down)) {
1339 /* Force a link down visible on the other side */
1341 bnx2_write_phy(bp, bp->mii_adv, adv &
1342 ~(ADVERTISE_1000XFULL |
1343 ADVERTISE_1000XHALF));
1344 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1345 BMCR_ANRESTART | BMCR_ANENABLE);
1348 netif_carrier_off(bp->dev);
1349 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1350 bnx2_report_link(bp);
1352 bnx2_write_phy(bp, bp->mii_adv, adv);
1353 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1355 bnx2_resolve_flow_ctrl(bp);
1356 bnx2_set_mac_link(bp);
1361 bnx2_test_and_enable_2g5(bp);
1363 if (bp->advertising & ADVERTISED_1000baseT_Full)
1364 new_adv |= ADVERTISE_1000XFULL;
1366 new_adv |= bnx2_phy_get_pause_adv(bp);
1368 bnx2_read_phy(bp, bp->mii_adv, &adv);
1369 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1371 bp->serdes_an_pending = 0;
1372 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1373 /* Force a link down visible on the other side */
1375 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1376 spin_unlock_bh(&bp->phy_lock);
1378 spin_lock_bh(&bp->phy_lock);
1381 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1382 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1384 /* Speed up link-up time when the link partner
1385 * does not autonegotiate which is very common
1386 * in blade servers. Some blade servers use
1387 * IPMI for kerboard input and it's important
1388 * to minimize link disruptions. Autoneg. involves
1389 * exchanging base pages plus 3 next pages and
1390 * normally completes in about 120 msec.
1392 bp->current_interval = SERDES_AN_TIMEOUT;
1393 bp->serdes_an_pending = 1;
1394 mod_timer(&bp->timer, jiffies + bp->current_interval);
1396 bnx2_resolve_flow_ctrl(bp);
1397 bnx2_set_mac_link(bp);
1403 #define ETHTOOL_ALL_FIBRE_SPEED \
1404 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1405 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1406 (ADVERTISED_1000baseT_Full)
1408 #define ETHTOOL_ALL_COPPER_SPEED \
1409 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1410 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1411 ADVERTISED_1000baseT_Full)
1413 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1414 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1416 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1419 bnx2_set_default_remote_link(struct bnx2 *bp)
1423 if (bp->phy_port == PORT_TP)
1424 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1426 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1428 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1429 bp->req_line_speed = 0;
1430 bp->autoneg |= AUTONEG_SPEED;
1431 bp->advertising = ADVERTISED_Autoneg;
1432 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1433 bp->advertising |= ADVERTISED_10baseT_Half;
1434 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1435 bp->advertising |= ADVERTISED_10baseT_Full;
1436 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1437 bp->advertising |= ADVERTISED_100baseT_Half;
1438 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1439 bp->advertising |= ADVERTISED_100baseT_Full;
1440 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1441 bp->advertising |= ADVERTISED_1000baseT_Full;
1442 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1443 bp->advertising |= ADVERTISED_2500baseX_Full;
1446 bp->advertising = 0;
1447 bp->req_duplex = DUPLEX_FULL;
1448 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1449 bp->req_line_speed = SPEED_10;
1450 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1451 bp->req_duplex = DUPLEX_HALF;
1453 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1454 bp->req_line_speed = SPEED_100;
1455 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1456 bp->req_duplex = DUPLEX_HALF;
1458 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1459 bp->req_line_speed = SPEED_1000;
1460 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1461 bp->req_line_speed = SPEED_2500;
1466 bnx2_set_default_link(struct bnx2 *bp)
1468 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1469 return bnx2_set_default_remote_link(bp);
1471 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1472 bp->req_line_speed = 0;
1473 if (bp->phy_flags & PHY_SERDES_FLAG) {
1476 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1478 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1479 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1480 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1482 bp->req_line_speed = bp->line_speed = SPEED_1000;
1483 bp->req_duplex = DUPLEX_FULL;
1486 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1490 bnx2_send_heart_beat(struct bnx2 *bp)
1495 spin_lock(&bp->indirect_lock);
1496 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1497 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1498 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1499 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1500 spin_unlock(&bp->indirect_lock);
1504 bnx2_remote_phy_event(struct bnx2 *bp)
1507 u8 link_up = bp->link_up;
1510 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1512 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1513 bnx2_send_heart_beat(bp);
1515 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1517 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1523 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1524 bp->duplex = DUPLEX_FULL;
1526 case BNX2_LINK_STATUS_10HALF:
1527 bp->duplex = DUPLEX_HALF;
1528 case BNX2_LINK_STATUS_10FULL:
1529 bp->line_speed = SPEED_10;
1531 case BNX2_LINK_STATUS_100HALF:
1532 bp->duplex = DUPLEX_HALF;
1533 case BNX2_LINK_STATUS_100BASE_T4:
1534 case BNX2_LINK_STATUS_100FULL:
1535 bp->line_speed = SPEED_100;
1537 case BNX2_LINK_STATUS_1000HALF:
1538 bp->duplex = DUPLEX_HALF;
1539 case BNX2_LINK_STATUS_1000FULL:
1540 bp->line_speed = SPEED_1000;
1542 case BNX2_LINK_STATUS_2500HALF:
1543 bp->duplex = DUPLEX_HALF;
1544 case BNX2_LINK_STATUS_2500FULL:
1545 bp->line_speed = SPEED_2500;
1552 spin_lock(&bp->phy_lock);
1554 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1555 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1556 if (bp->duplex == DUPLEX_FULL)
1557 bp->flow_ctrl = bp->req_flow_ctrl;
1559 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1560 bp->flow_ctrl |= FLOW_CTRL_TX;
1561 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1562 bp->flow_ctrl |= FLOW_CTRL_RX;
1565 old_port = bp->phy_port;
1566 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1567 bp->phy_port = PORT_FIBRE;
1569 bp->phy_port = PORT_TP;
1571 if (old_port != bp->phy_port)
1572 bnx2_set_default_link(bp);
1574 spin_unlock(&bp->phy_lock);
1576 if (bp->link_up != link_up)
1577 bnx2_report_link(bp);
1579 bnx2_set_mac_link(bp);
1583 bnx2_set_remote_link(struct bnx2 *bp)
1587 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1589 case BNX2_FW_EVT_CODE_LINK_EVENT:
1590 bnx2_remote_phy_event(bp);
1592 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1594 bnx2_send_heart_beat(bp);
1601 bnx2_setup_copper_phy(struct bnx2 *bp)
1606 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1608 if (bp->autoneg & AUTONEG_SPEED) {
1609 u32 adv_reg, adv1000_reg;
1610 u32 new_adv_reg = 0;
1611 u32 new_adv1000_reg = 0;
1613 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1614 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1615 ADVERTISE_PAUSE_ASYM);
1617 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1618 adv1000_reg &= PHY_ALL_1000_SPEED;
1620 if (bp->advertising & ADVERTISED_10baseT_Half)
1621 new_adv_reg |= ADVERTISE_10HALF;
1622 if (bp->advertising & ADVERTISED_10baseT_Full)
1623 new_adv_reg |= ADVERTISE_10FULL;
1624 if (bp->advertising & ADVERTISED_100baseT_Half)
1625 new_adv_reg |= ADVERTISE_100HALF;
1626 if (bp->advertising & ADVERTISED_100baseT_Full)
1627 new_adv_reg |= ADVERTISE_100FULL;
1628 if (bp->advertising & ADVERTISED_1000baseT_Full)
1629 new_adv1000_reg |= ADVERTISE_1000FULL;
1631 new_adv_reg |= ADVERTISE_CSMA;
1633 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1635 if ((adv1000_reg != new_adv1000_reg) ||
1636 (adv_reg != new_adv_reg) ||
1637 ((bmcr & BMCR_ANENABLE) == 0)) {
1639 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1640 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1641 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1644 else if (bp->link_up) {
1645 /* Flow ctrl may have changed from auto to forced */
1646 /* or vice-versa. */
1648 bnx2_resolve_flow_ctrl(bp);
1649 bnx2_set_mac_link(bp);
1655 if (bp->req_line_speed == SPEED_100) {
1656 new_bmcr |= BMCR_SPEED100;
1658 if (bp->req_duplex == DUPLEX_FULL) {
1659 new_bmcr |= BMCR_FULLDPLX;
1661 if (new_bmcr != bmcr) {
1664 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1665 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1667 if (bmsr & BMSR_LSTATUS) {
1668 /* Force link down */
1669 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1670 spin_unlock_bh(&bp->phy_lock);
1672 spin_lock_bh(&bp->phy_lock);
1674 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1675 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1678 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1680 /* Normally, the new speed is setup after the link has
1681 * gone down and up again. In some cases, link will not go
1682 * down so we need to set up the new speed here.
1684 if (bmsr & BMSR_LSTATUS) {
1685 bp->line_speed = bp->req_line_speed;
1686 bp->duplex = bp->req_duplex;
1687 bnx2_resolve_flow_ctrl(bp);
1688 bnx2_set_mac_link(bp);
1691 bnx2_resolve_flow_ctrl(bp);
1692 bnx2_set_mac_link(bp);
1698 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1700 if (bp->loopback == MAC_LOOPBACK)
1703 if (bp->phy_flags & PHY_SERDES_FLAG) {
1704 return (bnx2_setup_serdes_phy(bp, port));
1707 return (bnx2_setup_copper_phy(bp));
1712 bnx2_init_5709s_phy(struct bnx2 *bp)
1716 bp->mii_bmcr = MII_BMCR + 0x10;
1717 bp->mii_bmsr = MII_BMSR + 0x10;
1718 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1719 bp->mii_adv = MII_ADVERTISE + 0x10;
1720 bp->mii_lpa = MII_LPA + 0x10;
1721 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1723 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1724 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1726 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1729 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1731 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1732 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1733 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1734 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1736 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1737 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1738 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1739 val |= BCM5708S_UP1_2G5;
1741 val &= ~BCM5708S_UP1_2G5;
1742 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1744 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1745 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1746 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1747 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1749 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1751 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1752 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1753 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1755 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1761 bnx2_init_5708s_phy(struct bnx2 *bp)
1767 bp->mii_up1 = BCM5708S_UP1;
1769 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1770 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1771 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1773 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1774 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1775 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1777 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1778 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1779 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1781 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1782 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1783 val |= BCM5708S_UP1_2G5;
1784 bnx2_write_phy(bp, BCM5708S_UP1, val);
1787 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1788 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1789 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1790 /* increase tx signal amplitude */
1791 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1792 BCM5708S_BLK_ADDR_TX_MISC);
1793 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1794 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1795 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1796 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1799 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1800 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1805 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1806 BNX2_SHARED_HW_CFG_CONFIG);
1807 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1808 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1809 BCM5708S_BLK_ADDR_TX_MISC);
1810 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1811 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1812 BCM5708S_BLK_ADDR_DIG);
1819 bnx2_init_5706s_phy(struct bnx2 *bp)
1823 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1825 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1826 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1828 if (bp->dev->mtu > 1500) {
1831 /* Set extended packet length bit */
1832 bnx2_write_phy(bp, 0x18, 0x7);
1833 bnx2_read_phy(bp, 0x18, &val);
1834 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1836 bnx2_write_phy(bp, 0x1c, 0x6c00);
1837 bnx2_read_phy(bp, 0x1c, &val);
1838 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1843 bnx2_write_phy(bp, 0x18, 0x7);
1844 bnx2_read_phy(bp, 0x18, &val);
1845 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1847 bnx2_write_phy(bp, 0x1c, 0x6c00);
1848 bnx2_read_phy(bp, 0x1c, &val);
1849 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1856 bnx2_init_copper_phy(struct bnx2 *bp)
1862 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1863 bnx2_write_phy(bp, 0x18, 0x0c00);
1864 bnx2_write_phy(bp, 0x17, 0x000a);
1865 bnx2_write_phy(bp, 0x15, 0x310b);
1866 bnx2_write_phy(bp, 0x17, 0x201f);
1867 bnx2_write_phy(bp, 0x15, 0x9506);
1868 bnx2_write_phy(bp, 0x17, 0x401f);
1869 bnx2_write_phy(bp, 0x15, 0x14e2);
1870 bnx2_write_phy(bp, 0x18, 0x0400);
1873 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1874 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1875 MII_BNX2_DSP_EXPAND_REG | 0x8);
1876 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1878 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1881 if (bp->dev->mtu > 1500) {
1882 /* Set extended packet length bit */
1883 bnx2_write_phy(bp, 0x18, 0x7);
1884 bnx2_read_phy(bp, 0x18, &val);
1885 bnx2_write_phy(bp, 0x18, val | 0x4000);
1887 bnx2_read_phy(bp, 0x10, &val);
1888 bnx2_write_phy(bp, 0x10, val | 0x1);
1891 bnx2_write_phy(bp, 0x18, 0x7);
1892 bnx2_read_phy(bp, 0x18, &val);
1893 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1895 bnx2_read_phy(bp, 0x10, &val);
1896 bnx2_write_phy(bp, 0x10, val & ~0x1);
1899 /* ethernet@wirespeed */
1900 bnx2_write_phy(bp, 0x18, 0x7007);
1901 bnx2_read_phy(bp, 0x18, &val);
1902 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1908 bnx2_init_phy(struct bnx2 *bp)
1913 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1914 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1916 bp->mii_bmcr = MII_BMCR;
1917 bp->mii_bmsr = MII_BMSR;
1918 bp->mii_bmsr1 = MII_BMSR;
1919 bp->mii_adv = MII_ADVERTISE;
1920 bp->mii_lpa = MII_LPA;
1922 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1924 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1927 bnx2_read_phy(bp, MII_PHYSID1, &val);
1928 bp->phy_id = val << 16;
1929 bnx2_read_phy(bp, MII_PHYSID2, &val);
1930 bp->phy_id |= val & 0xffff;
1932 if (bp->phy_flags & PHY_SERDES_FLAG) {
1933 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1934 rc = bnx2_init_5706s_phy(bp);
1935 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1936 rc = bnx2_init_5708s_phy(bp);
1937 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1938 rc = bnx2_init_5709s_phy(bp);
1941 rc = bnx2_init_copper_phy(bp);
1946 rc = bnx2_setup_phy(bp, bp->phy_port);
1952 bnx2_set_mac_loopback(struct bnx2 *bp)
1956 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1957 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1958 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1959 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1964 static int bnx2_test_link(struct bnx2 *);
1967 bnx2_set_phy_loopback(struct bnx2 *bp)
1972 spin_lock_bh(&bp->phy_lock);
1973 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
1975 spin_unlock_bh(&bp->phy_lock);
1979 for (i = 0; i < 10; i++) {
1980 if (bnx2_test_link(bp) == 0)
1985 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1986 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1987 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1988 BNX2_EMAC_MODE_25G_MODE);
1990 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1991 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1997 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2003 msg_data |= bp->fw_wr_seq;
2005 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2007 /* wait for an acknowledgement. */
2008 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2011 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
2013 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2016 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2019 /* If we timed out, inform the firmware that this is the case. */
2020 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2022 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2025 msg_data &= ~BNX2_DRV_MSG_CODE;
2026 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2028 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2033 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2040 bnx2_init_5709_context(struct bnx2 *bp)
2045 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2046 val |= (BCM_PAGE_BITS - 8) << 16;
2047 REG_WR(bp, BNX2_CTX_COMMAND, val);
2048 for (i = 0; i < 10; i++) {
2049 val = REG_RD(bp, BNX2_CTX_COMMAND);
2050 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2054 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2057 for (i = 0; i < bp->ctx_pages; i++) {
2060 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2061 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2062 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2063 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2064 (u64) bp->ctx_blk_mapping[i] >> 32);
2065 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2066 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2067 for (j = 0; j < 10; j++) {
2069 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2070 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2074 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2083 bnx2_init_context(struct bnx2 *bp)
2089 u32 vcid_addr, pcid_addr, offset;
2094 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2097 vcid_addr = GET_PCID_ADDR(vcid);
2099 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2104 pcid_addr = GET_PCID_ADDR(new_vcid);
2107 vcid_addr = GET_CID_ADDR(vcid);
2108 pcid_addr = vcid_addr;
2111 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2112 vcid_addr += (i << PHY_CTX_SHIFT);
2113 pcid_addr += (i << PHY_CTX_SHIFT);
2115 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
2116 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2118 /* Zero out the context. */
2119 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2120 CTX_WR(bp, 0x00, offset, 0);
2122 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2123 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2129 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2135 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2136 if (good_mbuf == NULL) {
2137 printk(KERN_ERR PFX "Failed to allocate memory in "
2138 "bnx2_alloc_bad_rbuf\n");
2142 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2143 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2147 /* Allocate a bunch of mbufs and save the good ones in an array. */
2148 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2149 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2150 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2152 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2154 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2156 /* The addresses with Bit 9 set are bad memory blocks. */
2157 if (!(val & (1 << 9))) {
2158 good_mbuf[good_mbuf_cnt] = (u16) val;
2162 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2165 /* Free the good ones back to the mbuf pool thus discarding
2166 * all the bad ones. */
2167 while (good_mbuf_cnt) {
2170 val = good_mbuf[good_mbuf_cnt];
2171 val = (val << 9) | val | 1;
2173 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2180 bnx2_set_mac_addr(struct bnx2 *bp)
2183 u8 *mac_addr = bp->dev->dev_addr;
2185 val = (mac_addr[0] << 8) | mac_addr[1];
2187 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2189 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2190 (mac_addr[4] << 8) | mac_addr[5];
2192 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2196 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2198 struct sk_buff *skb;
2199 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2201 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2202 unsigned long align;
2204 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2209 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2210 skb_reserve(skb, BNX2_RX_ALIGN - align);
2212 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2213 PCI_DMA_FROMDEVICE);
2216 pci_unmap_addr_set(rx_buf, mapping, mapping);
2218 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2219 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2221 bp->rx_prod_bseq += bp->rx_buf_use_size;
2227 bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
2229 struct status_block *sblk = bp->status_blk;
2230 u32 new_link_state, old_link_state;
2233 new_link_state = sblk->status_attn_bits & event;
2234 old_link_state = sblk->status_attn_bits_ack & event;
2235 if (new_link_state != old_link_state) {
2237 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2239 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2247 bnx2_phy_int(struct bnx2 *bp)
2249 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2250 spin_lock(&bp->phy_lock);
2252 spin_unlock(&bp->phy_lock);
2254 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2255 bnx2_set_remote_link(bp);
2260 bnx2_tx_int(struct bnx2 *bp)
2262 struct status_block *sblk = bp->status_blk;
2263 u16 hw_cons, sw_cons, sw_ring_cons;
2266 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
2267 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2270 sw_cons = bp->tx_cons;
2272 while (sw_cons != hw_cons) {
2273 struct sw_bd *tx_buf;
2274 struct sk_buff *skb;
2277 sw_ring_cons = TX_RING_IDX(sw_cons);
2279 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2282 /* partial BD completions possible with TSO packets */
2283 if (skb_is_gso(skb)) {
2284 u16 last_idx, last_ring_idx;
2286 last_idx = sw_cons +
2287 skb_shinfo(skb)->nr_frags + 1;
2288 last_ring_idx = sw_ring_cons +
2289 skb_shinfo(skb)->nr_frags + 1;
2290 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2293 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2298 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2299 skb_headlen(skb), PCI_DMA_TODEVICE);
2302 last = skb_shinfo(skb)->nr_frags;
2304 for (i = 0; i < last; i++) {
2305 sw_cons = NEXT_TX_BD(sw_cons);
2307 pci_unmap_page(bp->pdev,
2309 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2311 skb_shinfo(skb)->frags[i].size,
2315 sw_cons = NEXT_TX_BD(sw_cons);
2317 tx_free_bd += last + 1;
2321 hw_cons = bp->hw_tx_cons =
2322 sblk->status_tx_quick_consumer_index0;
2324 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2329 bp->tx_cons = sw_cons;
2330 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2331 * before checking for netif_queue_stopped(). Without the
2332 * memory barrier, there is a small possibility that bnx2_start_xmit()
2333 * will miss it and cause the queue to be stopped forever.
2337 if (unlikely(netif_queue_stopped(bp->dev)) &&
2338 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2339 netif_tx_lock(bp->dev);
2340 if ((netif_queue_stopped(bp->dev)) &&
2341 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2342 netif_wake_queue(bp->dev);
2343 netif_tx_unlock(bp->dev);
2348 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2351 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2352 struct rx_bd *cons_bd, *prod_bd;
2354 cons_rx_buf = &bp->rx_buf_ring[cons];
2355 prod_rx_buf = &bp->rx_buf_ring[prod];
2357 pci_dma_sync_single_for_device(bp->pdev,
2358 pci_unmap_addr(cons_rx_buf, mapping),
2359 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2361 bp->rx_prod_bseq += bp->rx_buf_use_size;
2363 prod_rx_buf->skb = skb;
2368 pci_unmap_addr_set(prod_rx_buf, mapping,
2369 pci_unmap_addr(cons_rx_buf, mapping));
2371 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2372 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2373 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2374 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2378 bnx2_rx_int(struct bnx2 *bp, int budget)
2380 struct status_block *sblk = bp->status_blk;
2381 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2382 struct l2_fhdr *rx_hdr;
2385 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
2386 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2389 sw_cons = bp->rx_cons;
2390 sw_prod = bp->rx_prod;
2392 /* Memory barrier necessary as speculative reads of the rx
2393 * buffer can be ahead of the index in the status block
2396 while (sw_cons != hw_cons) {
2399 struct sw_bd *rx_buf;
2400 struct sk_buff *skb;
2401 dma_addr_t dma_addr;
2403 sw_ring_cons = RX_RING_IDX(sw_cons);
2404 sw_ring_prod = RX_RING_IDX(sw_prod);
2406 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2411 dma_addr = pci_unmap_addr(rx_buf, mapping);
2413 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2414 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2416 rx_hdr = (struct l2_fhdr *) skb->data;
2417 len = rx_hdr->l2_fhdr_pkt_len - 4;
2419 if ((status = rx_hdr->l2_fhdr_status) &
2420 (L2_FHDR_ERRORS_BAD_CRC |
2421 L2_FHDR_ERRORS_PHY_DECODE |
2422 L2_FHDR_ERRORS_ALIGNMENT |
2423 L2_FHDR_ERRORS_TOO_SHORT |
2424 L2_FHDR_ERRORS_GIANT_FRAME)) {
2429 /* Since we don't have a jumbo ring, copy small packets
2432 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2433 struct sk_buff *new_skb;
2435 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2436 if (new_skb == NULL)
2440 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2441 new_skb->data, len + 2);
2442 skb_reserve(new_skb, 2);
2443 skb_put(new_skb, len);
2445 bnx2_reuse_rx_skb(bp, skb,
2446 sw_ring_cons, sw_ring_prod);
2450 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
2451 pci_unmap_single(bp->pdev, dma_addr,
2452 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2454 skb_reserve(skb, bp->rx_offset);
2459 bnx2_reuse_rx_skb(bp, skb,
2460 sw_ring_cons, sw_ring_prod);
2464 skb->protocol = eth_type_trans(skb, bp->dev);
2466 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2467 (ntohs(skb->protocol) != 0x8100)) {
2474 skb->ip_summed = CHECKSUM_NONE;
2476 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2477 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2479 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2480 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2481 skb->ip_summed = CHECKSUM_UNNECESSARY;
2485 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2486 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2487 rx_hdr->l2_fhdr_vlan_tag);
2491 netif_receive_skb(skb);
2493 bp->dev->last_rx = jiffies;
2497 sw_cons = NEXT_RX_BD(sw_cons);
2498 sw_prod = NEXT_RX_BD(sw_prod);
2500 if ((rx_pkt == budget))
2503 /* Refresh hw_cons to see if there is new work */
2504 if (sw_cons == hw_cons) {
2505 hw_cons = bp->hw_rx_cons =
2506 sblk->status_rx_quick_consumer_index0;
2507 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2512 bp->rx_cons = sw_cons;
2513 bp->rx_prod = sw_prod;
2515 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2517 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2525 /* MSI ISR - The only difference between this and the INTx ISR
2526 * is that the MSI interrupt is always serviced.
2529 bnx2_msi(int irq, void *dev_instance)
2531 struct net_device *dev = dev_instance;
2532 struct bnx2 *bp = netdev_priv(dev);
2534 prefetch(bp->status_blk);
2535 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2536 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2537 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2539 /* Return here if interrupt is disabled. */
2540 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2543 netif_rx_schedule(dev);
2549 bnx2_msi_1shot(int irq, void *dev_instance)
2551 struct net_device *dev = dev_instance;
2552 struct bnx2 *bp = netdev_priv(dev);
2554 prefetch(bp->status_blk);
2556 /* Return here if interrupt is disabled. */
2557 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2560 netif_rx_schedule(dev);
2566 bnx2_interrupt(int irq, void *dev_instance)
2568 struct net_device *dev = dev_instance;
2569 struct bnx2 *bp = netdev_priv(dev);
2570 struct status_block *sblk = bp->status_blk;
2572 /* When using INTx, it is possible for the interrupt to arrive
2573 * at the CPU before the status block posted prior to the
2574 * interrupt. Reading a register will flush the status block.
2575 * When using MSI, the MSI message will always complete after
2576 * the status block write.
2578 if ((sblk->status_idx == bp->last_status_idx) &&
2579 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2580 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2583 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2584 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2585 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2587 /* Read back to deassert IRQ immediately to avoid too many
2588 * spurious interrupts.
2590 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2592 /* Return here if interrupt is shared and is disabled. */
2593 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2596 if (netif_rx_schedule_prep(dev)) {
2597 bp->last_status_idx = sblk->status_idx;
2598 __netif_rx_schedule(dev);
2604 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2605 STATUS_ATTN_BITS_TIMER_ABORT)
2608 bnx2_has_work(struct bnx2 *bp)
2610 struct status_block *sblk = bp->status_blk;
2612 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2613 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2616 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2617 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2624 bnx2_poll(struct net_device *dev, int *budget)
2626 struct bnx2 *bp = netdev_priv(dev);
2627 struct status_block *sblk = bp->status_blk;
2628 u32 status_attn_bits = sblk->status_attn_bits;
2629 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2631 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2632 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2636 /* This is needed to take care of transient status
2637 * during link changes.
2639 REG_WR(bp, BNX2_HC_COMMAND,
2640 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2641 REG_RD(bp, BNX2_HC_COMMAND);
2644 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2647 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2648 int orig_budget = *budget;
2651 if (orig_budget > dev->quota)
2652 orig_budget = dev->quota;
2654 work_done = bnx2_rx_int(bp, orig_budget);
2655 *budget -= work_done;
2656 dev->quota -= work_done;
2659 bp->last_status_idx = bp->status_blk->status_idx;
2662 if (!bnx2_has_work(bp)) {
2663 netif_rx_complete(dev);
2664 if (likely(bp->flags & USING_MSI_FLAG)) {
2665 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2666 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2667 bp->last_status_idx);
2670 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2671 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2672 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2673 bp->last_status_idx);
2675 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2676 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2677 bp->last_status_idx);
2684 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2685 * from set_multicast.
2688 bnx2_set_rx_mode(struct net_device *dev)
2690 struct bnx2 *bp = netdev_priv(dev);
2691 u32 rx_mode, sort_mode;
2694 spin_lock_bh(&bp->phy_lock);
2696 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2697 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2698 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2700 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2701 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2703 if (!(bp->flags & ASF_ENABLE_FLAG))
2704 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2706 if (dev->flags & IFF_PROMISC) {
2707 /* Promiscuous mode. */
2708 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2709 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2710 BNX2_RPM_SORT_USER0_PROM_VLAN;
2712 else if (dev->flags & IFF_ALLMULTI) {
2713 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2714 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2717 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2720 /* Accept one or more multicast(s). */
2721 struct dev_mc_list *mclist;
2722 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2727 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2729 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2730 i++, mclist = mclist->next) {
2732 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2734 regidx = (bit & 0xe0) >> 5;
2736 mc_filter[regidx] |= (1 << bit);
2739 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2740 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2744 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2747 if (rx_mode != bp->rx_mode) {
2748 bp->rx_mode = rx_mode;
2749 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2752 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2753 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2754 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2756 spin_unlock_bh(&bp->phy_lock);
2759 #define FW_BUF_SIZE 0x8000
2762 bnx2_gunzip_init(struct bnx2 *bp)
2764 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2767 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2770 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2771 if (bp->strm->workspace == NULL)
2781 vfree(bp->gunzip_buf);
2782 bp->gunzip_buf = NULL;
2785 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2786 "uncompression.\n", bp->dev->name);
2791 bnx2_gunzip_end(struct bnx2 *bp)
2793 kfree(bp->strm->workspace);
2798 if (bp->gunzip_buf) {
2799 vfree(bp->gunzip_buf);
2800 bp->gunzip_buf = NULL;
2805 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2809 /* check gzip header */
2810 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2816 if (zbuf[3] & FNAME)
2817 while ((zbuf[n++] != 0) && (n < len));
2819 bp->strm->next_in = zbuf + n;
2820 bp->strm->avail_in = len - n;
2821 bp->strm->next_out = bp->gunzip_buf;
2822 bp->strm->avail_out = FW_BUF_SIZE;
2824 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2828 rc = zlib_inflate(bp->strm, Z_FINISH);
2830 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2831 *outbuf = bp->gunzip_buf;
2833 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2834 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2835 bp->dev->name, bp->strm->msg);
2837 zlib_inflateEnd(bp->strm);
2839 if (rc == Z_STREAM_END)
2846 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2853 for (i = 0; i < rv2p_code_len; i += 8) {
2854 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2856 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2859 if (rv2p_proc == RV2P_PROC1) {
2860 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2861 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2864 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2865 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2869 /* Reset the processor, un-stall is done later. */
2870 if (rv2p_proc == RV2P_PROC1) {
2871 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2874 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2879 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2886 val = REG_RD_IND(bp, cpu_reg->mode);
2887 val |= cpu_reg->mode_value_halt;
2888 REG_WR_IND(bp, cpu_reg->mode, val);
2889 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2891 /* Load the Text area. */
2892 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2897 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2907 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2908 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2912 /* Load the Data area. */
2913 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2917 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2918 REG_WR_IND(bp, offset, fw->data[j]);
2922 /* Load the SBSS area. */
2923 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2927 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2928 REG_WR_IND(bp, offset, fw->sbss[j]);
2932 /* Load the BSS area. */
2933 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2937 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2938 REG_WR_IND(bp, offset, fw->bss[j]);
2942 /* Load the Read-Only area. */
2943 offset = cpu_reg->spad_base +
2944 (fw->rodata_addr - cpu_reg->mips_view_base);
2948 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2949 REG_WR_IND(bp, offset, fw->rodata[j]);
2953 /* Clear the pre-fetch instruction. */
2954 REG_WR_IND(bp, cpu_reg->inst, 0);
2955 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2957 /* Start the CPU. */
2958 val = REG_RD_IND(bp, cpu_reg->mode);
2959 val &= ~cpu_reg->mode_value_halt;
2960 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2961 REG_WR_IND(bp, cpu_reg->mode, val);
2967 bnx2_init_cpus(struct bnx2 *bp)
2969 struct cpu_reg cpu_reg;
2975 if ((rc = bnx2_gunzip_init(bp)) != 0)
2978 /* Initialize the RV2P processor. */
2979 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2984 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2986 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2991 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2993 /* Initialize the RX Processor. */
2994 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2995 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2996 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2997 cpu_reg.state = BNX2_RXP_CPU_STATE;
2998 cpu_reg.state_value_clear = 0xffffff;
2999 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3000 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3001 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3002 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3003 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3004 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3005 cpu_reg.mips_view_base = 0x8000000;
3007 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3008 fw = &bnx2_rxp_fw_09;
3010 fw = &bnx2_rxp_fw_06;
3012 rc = load_cpu_fw(bp, &cpu_reg, fw);
3016 /* Initialize the TX Processor. */
3017 cpu_reg.mode = BNX2_TXP_CPU_MODE;
3018 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3019 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3020 cpu_reg.state = BNX2_TXP_CPU_STATE;
3021 cpu_reg.state_value_clear = 0xffffff;
3022 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3023 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3024 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3025 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3026 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3027 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3028 cpu_reg.mips_view_base = 0x8000000;
3030 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3031 fw = &bnx2_txp_fw_09;
3033 fw = &bnx2_txp_fw_06;
3035 rc = load_cpu_fw(bp, &cpu_reg, fw);
3039 /* Initialize the TX Patch-up Processor. */
3040 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3041 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3042 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3043 cpu_reg.state = BNX2_TPAT_CPU_STATE;
3044 cpu_reg.state_value_clear = 0xffffff;
3045 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3046 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3047 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3048 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3049 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3050 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3051 cpu_reg.mips_view_base = 0x8000000;
3053 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3054 fw = &bnx2_tpat_fw_09;
3056 fw = &bnx2_tpat_fw_06;
3058 rc = load_cpu_fw(bp, &cpu_reg, fw);
3062 /* Initialize the Completion Processor. */
3063 cpu_reg.mode = BNX2_COM_CPU_MODE;
3064 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3065 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3066 cpu_reg.state = BNX2_COM_CPU_STATE;
3067 cpu_reg.state_value_clear = 0xffffff;
3068 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3069 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3070 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3071 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3072 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3073 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3074 cpu_reg.mips_view_base = 0x8000000;
3076 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3077 fw = &bnx2_com_fw_09;
3079 fw = &bnx2_com_fw_06;
3081 rc = load_cpu_fw(bp, &cpu_reg, fw);
3085 /* Initialize the Command Processor. */
3086 cpu_reg.mode = BNX2_CP_CPU_MODE;
3087 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3088 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3089 cpu_reg.state = BNX2_CP_CPU_STATE;
3090 cpu_reg.state_value_clear = 0xffffff;
3091 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3092 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3093 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3094 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3095 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3096 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3097 cpu_reg.mips_view_base = 0x8000000;
3099 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3100 fw = &bnx2_cp_fw_09;
3102 rc = load_cpu_fw(bp, &cpu_reg, fw);
3107 bnx2_gunzip_end(bp);
3112 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3116 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3122 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3123 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3124 PCI_PM_CTRL_PME_STATUS);
3126 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3127 /* delay required during transition out of D3hot */
3130 val = REG_RD(bp, BNX2_EMAC_MODE);
3131 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3132 val &= ~BNX2_EMAC_MODE_MPKT;
3133 REG_WR(bp, BNX2_EMAC_MODE, val);
3135 val = REG_RD(bp, BNX2_RPM_CONFIG);
3136 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3137 REG_WR(bp, BNX2_RPM_CONFIG, val);
3148 autoneg = bp->autoneg;
3149 advertising = bp->advertising;
3151 bp->autoneg = AUTONEG_SPEED;
3152 bp->advertising = ADVERTISED_10baseT_Half |
3153 ADVERTISED_10baseT_Full |
3154 ADVERTISED_100baseT_Half |
3155 ADVERTISED_100baseT_Full |
3158 bnx2_setup_copper_phy(bp);
3160 bp->autoneg = autoneg;
3161 bp->advertising = advertising;
3163 bnx2_set_mac_addr(bp);
3165 val = REG_RD(bp, BNX2_EMAC_MODE);
3167 /* Enable port mode. */
3168 val &= ~BNX2_EMAC_MODE_PORT;
3169 val |= BNX2_EMAC_MODE_PORT_MII |
3170 BNX2_EMAC_MODE_MPKT_RCVD |
3171 BNX2_EMAC_MODE_ACPI_RCVD |
3172 BNX2_EMAC_MODE_MPKT;
3174 REG_WR(bp, BNX2_EMAC_MODE, val);
3176 /* receive all multicast */
3177 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3178 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3181 REG_WR(bp, BNX2_EMAC_RX_MODE,
3182 BNX2_EMAC_RX_MODE_SORT_MODE);
3184 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3185 BNX2_RPM_SORT_USER0_MC_EN;
3186 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3187 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3188 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3189 BNX2_RPM_SORT_USER0_ENA);
3191 /* Need to enable EMAC and RPM for WOL. */
3192 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3193 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3194 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3195 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3197 val = REG_RD(bp, BNX2_RPM_CONFIG);
3198 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3199 REG_WR(bp, BNX2_RPM_CONFIG, val);
3201 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3204 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3207 if (!(bp->flags & NO_WOL_FLAG))
3208 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3210 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3211 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3212 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3221 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3223 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3226 /* No more memory access after this point until
3227 * device is brought back to D0.
3239 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3244 /* Request access to the flash interface. */
3245 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3246 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3247 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3248 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3254 if (j >= NVRAM_TIMEOUT_COUNT)
3261 bnx2_release_nvram_lock(struct bnx2 *bp)
3266 /* Relinquish nvram interface. */
3267 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3269 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3270 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3271 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3277 if (j >= NVRAM_TIMEOUT_COUNT)
3285 bnx2_enable_nvram_write(struct bnx2 *bp)
3289 val = REG_RD(bp, BNX2_MISC_CFG);
3290 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3292 if (!bp->flash_info->buffered) {
3295 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3296 REG_WR(bp, BNX2_NVM_COMMAND,
3297 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3299 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3302 val = REG_RD(bp, BNX2_NVM_COMMAND);
3303 if (val & BNX2_NVM_COMMAND_DONE)
3307 if (j >= NVRAM_TIMEOUT_COUNT)
3314 bnx2_disable_nvram_write(struct bnx2 *bp)
3318 val = REG_RD(bp, BNX2_MISC_CFG);
3319 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3324 bnx2_enable_nvram_access(struct bnx2 *bp)
3328 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3329 /* Enable both bits, even on read. */
3330 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3331 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3335 bnx2_disable_nvram_access(struct bnx2 *bp)
3339 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3340 /* Disable both bits, even after read. */
3341 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3342 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3343 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3347 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3352 if (bp->flash_info->buffered)
3353 /* Buffered flash, no erase needed */
3356 /* Build an erase command */
3357 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3358 BNX2_NVM_COMMAND_DOIT;
3360 /* Need to clear DONE bit separately. */
3361 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3363 /* Address of the NVRAM to read from. */
3364 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3366 /* Issue an erase command. */
3367 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3369 /* Wait for completion. */
3370 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3375 val = REG_RD(bp, BNX2_NVM_COMMAND);
3376 if (val & BNX2_NVM_COMMAND_DONE)
3380 if (j >= NVRAM_TIMEOUT_COUNT)
3387 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3392 /* Build the command word. */
3393 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3395 /* Calculate an offset of a buffered flash. */
3396 if (bp->flash_info->buffered) {
3397 offset = ((offset / bp->flash_info->page_size) <<
3398 bp->flash_info->page_bits) +
3399 (offset % bp->flash_info->page_size);
3402 /* Need to clear DONE bit separately. */
3403 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3405 /* Address of the NVRAM to read from. */
3406 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3408 /* Issue a read command. */
3409 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3411 /* Wait for completion. */
3412 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3417 val = REG_RD(bp, BNX2_NVM_COMMAND);
3418 if (val & BNX2_NVM_COMMAND_DONE) {
3419 val = REG_RD(bp, BNX2_NVM_READ);
3421 val = be32_to_cpu(val);
3422 memcpy(ret_val, &val, 4);
3426 if (j >= NVRAM_TIMEOUT_COUNT)
3434 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3439 /* Build the command word. */
3440 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3442 /* Calculate an offset of a buffered flash. */
3443 if (bp->flash_info->buffered) {
3444 offset = ((offset / bp->flash_info->page_size) <<
3445 bp->flash_info->page_bits) +
3446 (offset % bp->flash_info->page_size);
3449 /* Need to clear DONE bit separately. */
3450 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3452 memcpy(&val32, val, 4);
3453 val32 = cpu_to_be32(val32);
3455 /* Write the data. */
3456 REG_WR(bp, BNX2_NVM_WRITE, val32);
3458 /* Address of the NVRAM to write to. */
3459 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3461 /* Issue the write command. */
3462 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3464 /* Wait for completion. */
3465 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3468 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3471 if (j >= NVRAM_TIMEOUT_COUNT)
3478 bnx2_init_nvram(struct bnx2 *bp)
3481 int j, entry_count, rc;
3482 struct flash_spec *flash;
3484 /* Determine the selected interface. */
3485 val = REG_RD(bp, BNX2_NVM_CFG1);
3487 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
3490 if (val & 0x40000000) {
3492 /* Flash interface has been reconfigured */
3493 for (j = 0, flash = &flash_table[0]; j < entry_count;
3495 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3496 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3497 bp->flash_info = flash;
3504 /* Not yet been reconfigured */
3506 if (val & (1 << 23))
3507 mask = FLASH_BACKUP_STRAP_MASK;
3509 mask = FLASH_STRAP_MASK;
3511 for (j = 0, flash = &flash_table[0]; j < entry_count;
3514 if ((val & mask) == (flash->strapping & mask)) {
3515 bp->flash_info = flash;
3517 /* Request access to the flash interface. */
3518 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3521 /* Enable access to flash interface */
3522 bnx2_enable_nvram_access(bp);
3524 /* Reconfigure the flash interface */
3525 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3526 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3527 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3528 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3530 /* Disable access to flash interface */
3531 bnx2_disable_nvram_access(bp);
3532 bnx2_release_nvram_lock(bp);
3537 } /* if (val & 0x40000000) */
3539 if (j == entry_count) {
3540 bp->flash_info = NULL;
3541 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3545 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3546 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3548 bp->flash_size = val;
3550 bp->flash_size = bp->flash_info->total_size;
3556 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3560 u32 cmd_flags, offset32, len32, extra;
3565 /* Request access to the flash interface. */
3566 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3569 /* Enable access to flash interface */
3570 bnx2_enable_nvram_access(bp);
3583 pre_len = 4 - (offset & 3);
3585 if (pre_len >= len32) {
3587 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3588 BNX2_NVM_COMMAND_LAST;
3591 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3594 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3599 memcpy(ret_buf, buf + (offset & 3), pre_len);
3606 extra = 4 - (len32 & 3);
3607 len32 = (len32 + 4) & ~3;
3614 cmd_flags = BNX2_NVM_COMMAND_LAST;
3616 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3617 BNX2_NVM_COMMAND_LAST;
3619 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3621 memcpy(ret_buf, buf, 4 - extra);
3623 else if (len32 > 0) {
3626 /* Read the first word. */
3630 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3632 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3634 /* Advance to the next dword. */
3639 while (len32 > 4 && rc == 0) {
3640 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3642 /* Advance to the next dword. */
3651 cmd_flags = BNX2_NVM_COMMAND_LAST;
3652 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3654 memcpy(ret_buf, buf, 4 - extra);
3657 /* Disable access to flash interface */
3658 bnx2_disable_nvram_access(bp);
3660 bnx2_release_nvram_lock(bp);
3666 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3669 u32 written, offset32, len32;
3670 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3672 int align_start, align_end;
3677 align_start = align_end = 0;
3679 if ((align_start = (offset32 & 3))) {
3681 len32 += align_start;
3684 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3689 align_end = 4 - (len32 & 3);
3691 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3695 if (align_start || align_end) {
3696 align_buf = kmalloc(len32, GFP_KERNEL);
3697 if (align_buf == NULL)
3700 memcpy(align_buf, start, 4);
3703 memcpy(align_buf + len32 - 4, end, 4);
3705 memcpy(align_buf + align_start, data_buf, buf_size);
3709 if (bp->flash_info->buffered == 0) {
3710 flash_buffer = kmalloc(264, GFP_KERNEL);
3711 if (flash_buffer == NULL) {
3713 goto nvram_write_end;
3718 while ((written < len32) && (rc == 0)) {
3719 u32 page_start, page_end, data_start, data_end;
3720 u32 addr, cmd_flags;
3723 /* Find the page_start addr */
3724 page_start = offset32 + written;
3725 page_start -= (page_start % bp->flash_info->page_size);
3726 /* Find the page_end addr */
3727 page_end = page_start + bp->flash_info->page_size;
3728 /* Find the data_start addr */
3729 data_start = (written == 0) ? offset32 : page_start;
3730 /* Find the data_end addr */
3731 data_end = (page_end > offset32 + len32) ?
3732 (offset32 + len32) : page_end;
3734 /* Request access to the flash interface. */
3735 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3736 goto nvram_write_end;
3738 /* Enable access to flash interface */
3739 bnx2_enable_nvram_access(bp);
3741 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3742 if (bp->flash_info->buffered == 0) {
3745 /* Read the whole page into the buffer
3746 * (non-buffer flash only) */
3747 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3748 if (j == (bp->flash_info->page_size - 4)) {
3749 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3751 rc = bnx2_nvram_read_dword(bp,
3757 goto nvram_write_end;
3763 /* Enable writes to flash interface (unlock write-protect) */
3764 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3765 goto nvram_write_end;
3767 /* Loop to write back the buffer data from page_start to
3770 if (bp->flash_info->buffered == 0) {
3771 /* Erase the page */
3772 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3773 goto nvram_write_end;
3775 /* Re-enable the write again for the actual write */
3776 bnx2_enable_nvram_write(bp);
3778 for (addr = page_start; addr < data_start;
3779 addr += 4, i += 4) {
3781 rc = bnx2_nvram_write_dword(bp, addr,
3782 &flash_buffer[i], cmd_flags);
3785 goto nvram_write_end;
3791 /* Loop to write the new data from data_start to data_end */
3792 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3793 if ((addr == page_end - 4) ||
3794 ((bp->flash_info->buffered) &&
3795 (addr == data_end - 4))) {
3797 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3799 rc = bnx2_nvram_write_dword(bp, addr, buf,
3803 goto nvram_write_end;
3809 /* Loop to write back the buffer data from data_end
3811 if (bp->flash_info->buffered == 0) {
3812 for (addr = data_end; addr < page_end;
3813 addr += 4, i += 4) {
3815 if (addr == page_end-4) {
3816 cmd_flags = BNX2_NVM_COMMAND_LAST;
3818 rc = bnx2_nvram_write_dword(bp, addr,
3819 &flash_buffer[i], cmd_flags);
3822 goto nvram_write_end;
3828 /* Disable writes to flash interface (lock write-protect) */
3829 bnx2_disable_nvram_write(bp);
3831 /* Disable access to flash interface */
3832 bnx2_disable_nvram_access(bp);
3833 bnx2_release_nvram_lock(bp);
3835 /* Increment written */
3836 written += data_end - data_start;
3840 kfree(flash_buffer);
3846 bnx2_init_remote_phy(struct bnx2 *bp)
3850 bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
3851 if (!(bp->phy_flags & PHY_SERDES_FLAG))
3854 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
3855 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
3858 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
3859 if (netif_running(bp->dev)) {
3860 val = BNX2_DRV_ACK_CAP_SIGNATURE |
3861 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
3862 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
3865 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
3867 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
3868 if (val & BNX2_LINK_STATUS_SERDES_LINK)
3869 bp->phy_port = PORT_FIBRE;
3871 bp->phy_port = PORT_TP;
3876 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3881 /* Wait for the current PCI transaction to complete before
3882 * issuing a reset. */
3883 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3884 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3885 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3886 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3887 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3888 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3891 /* Wait for the firmware to tell us it is ok to issue a reset. */
3892 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3894 /* Deposit a driver reset signature so the firmware knows that
3895 * this is a soft reset. */
3896 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3897 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3899 /* Do a dummy read to force the chip to complete all current transaction
3900 * before we issue a reset. */
3901 val = REG_RD(bp, BNX2_MISC_ID);
3903 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3904 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3905 REG_RD(bp, BNX2_MISC_COMMAND);
3908 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3909 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3911 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3914 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3915 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3916 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3919 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3921 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3922 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3923 current->state = TASK_UNINTERRUPTIBLE;
3924 schedule_timeout(HZ / 50);
3927 /* Reset takes approximate 30 usec */
3928 for (i = 0; i < 10; i++) {
3929 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3930 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3931 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3936 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3937 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3938 printk(KERN_ERR PFX "Chip reset did not complete\n");
3943 /* Make sure byte swapping is properly configured. */
3944 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3945 if (val != 0x01020304) {
3946 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3950 /* Wait for the firmware to finish its initialization. */
3951 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3955 spin_lock_bh(&bp->phy_lock);
3956 bnx2_init_remote_phy(bp);
3957 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
3958 bnx2_set_default_remote_link(bp);
3959 spin_unlock_bh(&bp->phy_lock);
3961 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3962 /* Adjust the voltage regular to two steps lower. The default
3963 * of this register is 0x0000000e. */
3964 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3966 /* Remove bad rbuf memory from the free pool. */
3967 rc = bnx2_alloc_bad_rbuf(bp);
3974 bnx2_init_chip(struct bnx2 *bp)
3979 /* Make sure the interrupt is not active. */
3980 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3982 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3983 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3985 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3987 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3988 DMA_READ_CHANS << 12 |
3989 DMA_WRITE_CHANS << 16;
3991 val |= (0x2 << 20) | (1 << 11);
3993 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3996 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3997 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3998 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4000 REG_WR(bp, BNX2_DMA_CONFIG, val);
4002 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4003 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4004 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4005 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4008 if (bp->flags & PCIX_FLAG) {
4011 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4013 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4014 val16 & ~PCI_X_CMD_ERO);
4017 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4018 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4019 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4020 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4022 /* Initialize context mapping and zero out the quick contexts. The
4023 * context block must have already been enabled. */
4024 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4025 rc = bnx2_init_5709_context(bp);
4029 bnx2_init_context(bp);
4031 if ((rc = bnx2_init_cpus(bp)) != 0)
4034 bnx2_init_nvram(bp);
4036 bnx2_set_mac_addr(bp);
4038 val = REG_RD(bp, BNX2_MQ_CONFIG);
4039 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4040 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4041 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4042 val |= BNX2_MQ_CONFIG_HALT_DIS;
4044 REG_WR(bp, BNX2_MQ_CONFIG, val);
4046 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4047 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4048 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4050 val = (BCM_PAGE_BITS - 8) << 24;
4051 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4053 /* Configure page size. */
4054 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4055 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4056 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4057 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4059 val = bp->mac_addr[0] +
4060 (bp->mac_addr[1] << 8) +
4061 (bp->mac_addr[2] << 16) +
4063 (bp->mac_addr[4] << 8) +
4064 (bp->mac_addr[5] << 16);
4065 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4067 /* Program the MTU. Also include 4 bytes for CRC32. */
4068 val = bp->dev->mtu + ETH_HLEN + 4;
4069 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4070 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4071 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4073 bp->last_status_idx = 0;
4074 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4076 /* Set up how to generate a link change interrupt. */
4077 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4079 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4080 (u64) bp->status_blk_mapping & 0xffffffff);
4081 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4083 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4084 (u64) bp->stats_blk_mapping & 0xffffffff);
4085 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4086 (u64) bp->stats_blk_mapping >> 32);
4088 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4089 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4091 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4092 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4094 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4095 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4097 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4099 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4101 REG_WR(bp, BNX2_HC_COM_TICKS,
4102 (bp->com_ticks_int << 16) | bp->com_ticks);
4104 REG_WR(bp, BNX2_HC_CMD_TICKS,
4105 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4107 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4108 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4110 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
4111 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4113 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4114 val = BNX2_HC_CONFIG_COLLECT_STATS;
4116 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4117 BNX2_HC_CONFIG_COLLECT_STATS;
4120 if (bp->flags & ONE_SHOT_MSI_FLAG)
4121 val |= BNX2_HC_CONFIG_ONE_SHOT;
4123 REG_WR(bp, BNX2_HC_CONFIG, val);
4125 /* Clear internal stats counters. */
4126 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4128 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4130 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
4131 BNX2_PORT_FEATURE_ASF_ENABLED)
4132 bp->flags |= ASF_ENABLE_FLAG;
4134 /* Initialize the receive filter. */
4135 bnx2_set_rx_mode(bp->dev);
4137 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4138 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4139 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4140 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4142 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4145 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4146 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4150 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4156 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4158 u32 val, offset0, offset1, offset2, offset3;
4160 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4161 offset0 = BNX2_L2CTX_TYPE_XI;
4162 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4163 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4164 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4166 offset0 = BNX2_L2CTX_TYPE;
4167 offset1 = BNX2_L2CTX_CMD_TYPE;
4168 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4169 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4171 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4172 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4174 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4175 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4177 val = (u64) bp->tx_desc_mapping >> 32;
4178 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4180 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4181 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4185 bnx2_init_tx_ring(struct bnx2 *bp)
4190 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4192 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4194 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4195 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4200 bp->tx_prod_bseq = 0;
4203 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4204 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4206 bnx2_init_tx_context(bp, cid);
4210 bnx2_init_rx_ring(struct bnx2 *bp)
4214 u16 prod, ring_prod;
4217 /* 8 for CRC and VLAN */
4218 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4220 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4222 ring_prod = prod = bp->rx_prod = 0;
4225 bp->rx_prod_bseq = 0;
4227 for (i = 0; i < bp->rx_max_ring; i++) {
4230 rxbd = &bp->rx_desc_ring[i][0];
4231 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4232 rxbd->rx_bd_len = bp->rx_buf_use_size;
4233 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4235 if (i == (bp->rx_max_ring - 1))
4239 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
4240 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
4244 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4245 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4247 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
4249 val = (u64) bp->rx_desc_mapping[0] >> 32;
4250 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
4252 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4253 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
4255 for (i = 0; i < bp->rx_ring_size; i++) {
4256 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4259 prod = NEXT_RX_BD(prod);
4260 ring_prod = RX_RING_IDX(prod);
4264 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4266 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4270 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4274 bp->rx_ring_size = size;
4276 while (size > MAX_RX_DESC_CNT) {
4277 size -= MAX_RX_DESC_CNT;
4280 /* round to next power of 2 */
4282 while ((max & num_rings) == 0)
4285 if (num_rings != max)
4288 bp->rx_max_ring = max;
4289 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4293 bnx2_free_tx_skbs(struct bnx2 *bp)
4297 if (bp->tx_buf_ring == NULL)
4300 for (i = 0; i < TX_DESC_CNT; ) {
4301 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4302 struct sk_buff *skb = tx_buf->skb;
4310 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4311 skb_headlen(skb), PCI_DMA_TODEVICE);
4315 last = skb_shinfo(skb)->nr_frags;
4316 for (j = 0; j < last; j++) {
4317 tx_buf = &bp->tx_buf_ring[i + j + 1];
4318 pci_unmap_page(bp->pdev,
4319 pci_unmap_addr(tx_buf, mapping),
4320 skb_shinfo(skb)->frags[j].size,
4330 bnx2_free_rx_skbs(struct bnx2 *bp)
4334 if (bp->rx_buf_ring == NULL)
4337 for (i = 0; i < bp->rx_max_ring_idx; i++) {
4338 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4339 struct sk_buff *skb = rx_buf->skb;
4344 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4345 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4354 bnx2_free_skbs(struct bnx2 *bp)
4356 bnx2_free_tx_skbs(bp);
4357 bnx2_free_rx_skbs(bp);
4361 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4365 rc = bnx2_reset_chip(bp, reset_code);
4370 if ((rc = bnx2_init_chip(bp)) != 0)
4373 bnx2_init_tx_ring(bp);
4374 bnx2_init_rx_ring(bp);
4379 bnx2_init_nic(struct bnx2 *bp)
4383 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4386 spin_lock_bh(&bp->phy_lock);
4389 spin_unlock_bh(&bp->phy_lock);
4394 bnx2_test_registers(struct bnx2 *bp)
4398 static const struct {
4401 #define BNX2_FL_NOT_5709 1
4405 { 0x006c, 0, 0x00000000, 0x0000003f },
4406 { 0x0090, 0, 0xffffffff, 0x00000000 },
4407 { 0x0094, 0, 0x00000000, 0x00000000 },
4409 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4410 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4411 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4412 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4413 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4414 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4415 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4416 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4417 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4419 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4420 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4421 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4422 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4423 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4424 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4426 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4427 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4428 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
4430 { 0x1000, 0, 0x00000000, 0x00000001 },
4431 { 0x1004, 0, 0x00000000, 0x000f0001 },
4433 { 0x1408, 0, 0x01c00800, 0x00000000 },
4434 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4435 { 0x14a8, 0, 0x00000000, 0x000001ff },
4436 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4437 { 0x14b0, 0, 0x00000002, 0x00000001 },
4438 { 0x14b8, 0, 0x00000000, 0x00000000 },
4439 { 0x14c0, 0, 0x00000000, 0x00000009 },
4440 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4441 { 0x14cc, 0, 0x00000000, 0x00000001 },
4442 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4444 { 0x1800, 0, 0x00000000, 0x00000001 },
4445 { 0x1804, 0, 0x00000000, 0x00000003 },
4447 { 0x2800, 0, 0x00000000, 0x00000001 },
4448 { 0x2804, 0, 0x00000000, 0x00003f01 },
4449 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4450 { 0x2810, 0, 0xffff0000, 0x00000000 },
4451 { 0x2814, 0, 0xffff0000, 0x00000000 },
4452 { 0x2818, 0, 0xffff0000, 0x00000000 },
4453 { 0x281c, 0, 0xffff0000, 0x00000000 },
4454 { 0x2834, 0, 0xffffffff, 0x00000000 },
4455 { 0x2840, 0, 0x00000000, 0xffffffff },
4456 { 0x2844, 0, 0x00000000, 0xffffffff },
4457 { 0x2848, 0, 0xffffffff, 0x00000000 },
4458 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4460 { 0x2c00, 0, 0x00000000, 0x00000011 },
4461 { 0x2c04, 0, 0x00000000, 0x00030007 },
4463 { 0x3c00, 0, 0x00000000, 0x00000001 },
4464 { 0x3c04, 0, 0x00000000, 0x00070000 },
4465 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4466 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4467 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4468 { 0x3c14, 0, 0x00000000, 0xffffffff },
4469 { 0x3c18, 0, 0x00000000, 0xffffffff },
4470 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4471 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4473 { 0x5004, 0, 0x00000000, 0x0000007f },
4474 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4476 { 0x5c00, 0, 0x00000000, 0x00000001 },
4477 { 0x5c04, 0, 0x00000000, 0x0003000f },
4478 { 0x5c08, 0, 0x00000003, 0x00000000 },
4479 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4480 { 0x5c10, 0, 0x00000000, 0xffffffff },
4481 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4482 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4483 { 0x5c88, 0, 0x00000000, 0x00077373 },
4484 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4486 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4487 { 0x680c, 0, 0xffffffff, 0x00000000 },
4488 { 0x6810, 0, 0xffffffff, 0x00000000 },
4489 { 0x6814, 0, 0xffffffff, 0x00000000 },
4490 { 0x6818, 0, 0xffffffff, 0x00000000 },
4491 { 0x681c, 0, 0xffffffff, 0x00000000 },
4492 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4493 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4494 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4495 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4496 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4497 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4498 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4499 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4500 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4501 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4502 { 0x684c, 0, 0xffffffff, 0x00000000 },
4503 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4504 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4505 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4506 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4507 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4508 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4510 { 0xffff, 0, 0x00000000, 0x00000000 },
4515 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4518 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4519 u32 offset, rw_mask, ro_mask, save_val, val;
4520 u16 flags = reg_tbl[i].flags;
4522 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4525 offset = (u32) reg_tbl[i].offset;
4526 rw_mask = reg_tbl[i].rw_mask;
4527 ro_mask = reg_tbl[i].ro_mask;
4529 save_val = readl(bp->regview + offset);
4531 writel(0, bp->regview + offset);
4533 val = readl(bp->regview + offset);
4534 if ((val & rw_mask) != 0) {
4538 if ((val & ro_mask) != (save_val & ro_mask)) {
4542 writel(0xffffffff, bp->regview + offset);
4544 val = readl(bp->regview + offset);
4545 if ((val & rw_mask) != rw_mask) {
4549 if ((val & ro_mask) != (save_val & ro_mask)) {
4553 writel(save_val, bp->regview + offset);
4557 writel(save_val, bp->regview + offset);
4565 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4567 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4568 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4571 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4574 for (offset = 0; offset < size; offset += 4) {
4576 REG_WR_IND(bp, start + offset, test_pattern[i]);
4578 if (REG_RD_IND(bp, start + offset) !=
4588 bnx2_test_memory(struct bnx2 *bp)
4592 static struct mem_entry {
4595 } mem_tbl_5706[] = {
4596 { 0x60000, 0x4000 },
4597 { 0xa0000, 0x3000 },
4598 { 0xe0000, 0x4000 },
4599 { 0x120000, 0x4000 },
4600 { 0x1a0000, 0x4000 },
4601 { 0x160000, 0x4000 },
4605 { 0x60000, 0x4000 },
4606 { 0xa0000, 0x3000 },
4607 { 0xe0000, 0x4000 },
4608 { 0x120000, 0x4000 },
4609 { 0x1a0000, 0x4000 },
4612 struct mem_entry *mem_tbl;
4614 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4615 mem_tbl = mem_tbl_5709;
4617 mem_tbl = mem_tbl_5706;
4619 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4620 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4621 mem_tbl[i].len)) != 0) {
4629 #define BNX2_MAC_LOOPBACK 0
4630 #define BNX2_PHY_LOOPBACK 1
4633 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4635 unsigned int pkt_size, num_pkts, i;
4636 struct sk_buff *skb, *rx_skb;
4637 unsigned char *packet;
4638 u16 rx_start_idx, rx_idx;
4641 struct sw_bd *rx_buf;
4642 struct l2_fhdr *rx_hdr;
4645 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4646 bp->loopback = MAC_LOOPBACK;
4647 bnx2_set_mac_loopback(bp);
4649 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4650 bp->loopback = PHY_LOOPBACK;
4651 bnx2_set_phy_loopback(bp);
4657 skb = netdev_alloc_skb(bp->dev, pkt_size);
4660 packet = skb_put(skb, pkt_size);
4661 memcpy(packet, bp->dev->dev_addr, 6);
4662 memset(packet + 6, 0x0, 8);
4663 for (i = 14; i < pkt_size; i++)
4664 packet[i] = (unsigned char) (i & 0xff);
4666 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4669 REG_WR(bp, BNX2_HC_COMMAND,
4670 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4672 REG_RD(bp, BNX2_HC_COMMAND);
4675 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4679 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4681 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4682 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4683 txbd->tx_bd_mss_nbytes = pkt_size;
4684 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4687 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4688 bp->tx_prod_bseq += pkt_size;
4690 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4691 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4695 REG_WR(bp, BNX2_HC_COMMAND,
4696 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4698 REG_RD(bp, BNX2_HC_COMMAND);
4702 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4705 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4706 goto loopback_test_done;
4709 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4710 if (rx_idx != rx_start_idx + num_pkts) {
4711 goto loopback_test_done;
4714 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4715 rx_skb = rx_buf->skb;
4717 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4718 skb_reserve(rx_skb, bp->rx_offset);
4720 pci_dma_sync_single_for_cpu(bp->pdev,
4721 pci_unmap_addr(rx_buf, mapping),
4722 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4724 if (rx_hdr->l2_fhdr_status &
4725 (L2_FHDR_ERRORS_BAD_CRC |
4726 L2_FHDR_ERRORS_PHY_DECODE |
4727 L2_FHDR_ERRORS_ALIGNMENT |
4728 L2_FHDR_ERRORS_TOO_SHORT |
4729 L2_FHDR_ERRORS_GIANT_FRAME)) {
4731 goto loopback_test_done;
4734 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4735 goto loopback_test_done;
4738 for (i = 14; i < pkt_size; i++) {
4739 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4740 goto loopback_test_done;
4751 #define BNX2_MAC_LOOPBACK_FAILED 1
4752 #define BNX2_PHY_LOOPBACK_FAILED 2
4753 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4754 BNX2_PHY_LOOPBACK_FAILED)
4757 bnx2_test_loopback(struct bnx2 *bp)
4761 if (!netif_running(bp->dev))
4762 return BNX2_LOOPBACK_FAILED;
4764 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4765 spin_lock_bh(&bp->phy_lock);
4767 spin_unlock_bh(&bp->phy_lock);
4768 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4769 rc |= BNX2_MAC_LOOPBACK_FAILED;
4770 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4771 rc |= BNX2_PHY_LOOPBACK_FAILED;
4775 #define NVRAM_SIZE 0x200
4776 #define CRC32_RESIDUAL 0xdebb20e3
4779 bnx2_test_nvram(struct bnx2 *bp)
4781 u32 buf[NVRAM_SIZE / 4];
4782 u8 *data = (u8 *) buf;
4786 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4787 goto test_nvram_done;
4789 magic = be32_to_cpu(buf[0]);
4790 if (magic != 0x669955aa) {
4792 goto test_nvram_done;
4795 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4796 goto test_nvram_done;
4798 csum = ether_crc_le(0x100, data);
4799 if (csum != CRC32_RESIDUAL) {
4801 goto test_nvram_done;
4804 csum = ether_crc_le(0x100, data + 0x100);
4805 if (csum != CRC32_RESIDUAL) {
4814 bnx2_test_link(struct bnx2 *bp)
4818 spin_lock_bh(&bp->phy_lock);
4819 bnx2_enable_bmsr1(bp);
4820 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4821 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4822 bnx2_disable_bmsr1(bp);
4823 spin_unlock_bh(&bp->phy_lock);
4825 if (bmsr & BMSR_LSTATUS) {
4832 bnx2_test_intr(struct bnx2 *bp)
4837 if (!netif_running(bp->dev))
4840 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4842 /* This register is not touched during run-time. */
4843 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4844 REG_RD(bp, BNX2_HC_COMMAND);
4846 for (i = 0; i < 10; i++) {
4847 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4853 msleep_interruptible(10);
4862 bnx2_5706_serdes_timer(struct bnx2 *bp)
4864 spin_lock(&bp->phy_lock);
4865 if (bp->serdes_an_pending)
4866 bp->serdes_an_pending--;
4867 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4870 bp->current_interval = bp->timer_interval;
4872 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4874 if (bmcr & BMCR_ANENABLE) {
4877 bnx2_write_phy(bp, 0x1c, 0x7c00);
4878 bnx2_read_phy(bp, 0x1c, &phy1);
4880 bnx2_write_phy(bp, 0x17, 0x0f01);
4881 bnx2_read_phy(bp, 0x15, &phy2);
4882 bnx2_write_phy(bp, 0x17, 0x0f01);
4883 bnx2_read_phy(bp, 0x15, &phy2);
4885 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4886 !(phy2 & 0x20)) { /* no CONFIG */
4888 bmcr &= ~BMCR_ANENABLE;
4889 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4890 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4891 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4895 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4896 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4899 bnx2_write_phy(bp, 0x17, 0x0f01);
4900 bnx2_read_phy(bp, 0x15, &phy2);
4904 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4905 bmcr |= BMCR_ANENABLE;
4906 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4908 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4911 bp->current_interval = bp->timer_interval;
4913 spin_unlock(&bp->phy_lock);
4917 bnx2_5708_serdes_timer(struct bnx2 *bp)
4919 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4922 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4923 bp->serdes_an_pending = 0;
4927 spin_lock(&bp->phy_lock);
4928 if (bp->serdes_an_pending)
4929 bp->serdes_an_pending--;
4930 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4933 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4934 if (bmcr & BMCR_ANENABLE) {
4935 bnx2_enable_forced_2g5(bp);
4936 bp->current_interval = SERDES_FORCED_TIMEOUT;
4938 bnx2_disable_forced_2g5(bp);
4939 bp->serdes_an_pending = 2;
4940 bp->current_interval = bp->timer_interval;
4944 bp->current_interval = bp->timer_interval;
4946 spin_unlock(&bp->phy_lock);
4950 bnx2_timer(unsigned long data)
4952 struct bnx2 *bp = (struct bnx2 *) data;
4954 if (!netif_running(bp->dev))
4957 if (atomic_read(&bp->intr_sem) != 0)
4958 goto bnx2_restart_timer;
4960 bnx2_send_heart_beat(bp);
4962 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4964 /* workaround occasional corrupted counters */
4965 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
4966 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
4967 BNX2_HC_COMMAND_STATS_NOW);
4969 if (bp->phy_flags & PHY_SERDES_FLAG) {
4970 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4971 bnx2_5706_serdes_timer(bp);
4973 bnx2_5708_serdes_timer(bp);
4977 mod_timer(&bp->timer, jiffies + bp->current_interval);
4981 bnx2_request_irq(struct bnx2 *bp)
4983 struct net_device *dev = bp->dev;
4986 if (bp->flags & USING_MSI_FLAG) {
4987 irq_handler_t fn = bnx2_msi;
4989 if (bp->flags & ONE_SHOT_MSI_FLAG)
4990 fn = bnx2_msi_1shot;
4992 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
4994 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4995 IRQF_SHARED, dev->name, dev);
5000 bnx2_free_irq(struct bnx2 *bp)
5002 struct net_device *dev = bp->dev;
5004 if (bp->flags & USING_MSI_FLAG) {
5005 free_irq(bp->pdev->irq, dev);
5006 pci_disable_msi(bp->pdev);
5007 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
5009 free_irq(bp->pdev->irq, dev);
5012 /* Called with rtnl_lock */
5014 bnx2_open(struct net_device *dev)
5016 struct bnx2 *bp = netdev_priv(dev);
5019 netif_carrier_off(dev);
5021 bnx2_set_power_state(bp, PCI_D0);
5022 bnx2_disable_int(bp);
5024 rc = bnx2_alloc_mem(bp);
5028 if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
5029 if (pci_enable_msi(bp->pdev) == 0) {
5030 bp->flags |= USING_MSI_FLAG;
5031 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5032 bp->flags |= ONE_SHOT_MSI_FLAG;
5035 rc = bnx2_request_irq(bp);
5042 rc = bnx2_init_nic(bp);
5051 mod_timer(&bp->timer, jiffies + bp->current_interval);
5053 atomic_set(&bp->intr_sem, 0);
5055 bnx2_enable_int(bp);
5057 if (bp->flags & USING_MSI_FLAG) {
5058 /* Test MSI to make sure it is working
5059 * If MSI test fails, go back to INTx mode
5061 if (bnx2_test_intr(bp) != 0) {
5062 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5063 " using MSI, switching to INTx mode. Please"
5064 " report this failure to the PCI maintainer"
5065 " and include system chipset information.\n",
5068 bnx2_disable_int(bp);
5071 rc = bnx2_init_nic(bp);
5074 rc = bnx2_request_irq(bp);
5079 del_timer_sync(&bp->timer);
5082 bnx2_enable_int(bp);
5085 if (bp->flags & USING_MSI_FLAG) {
5086 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5089 netif_start_queue(dev);
5095 bnx2_reset_task(struct work_struct *work)
5097 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5099 if (!netif_running(bp->dev))
5102 bp->in_reset_task = 1;
5103 bnx2_netif_stop(bp);
5107 atomic_set(&bp->intr_sem, 1);
5108 bnx2_netif_start(bp);
5109 bp->in_reset_task = 0;
5113 bnx2_tx_timeout(struct net_device *dev)
5115 struct bnx2 *bp = netdev_priv(dev);
5117 /* This allows the netif to be shutdown gracefully before resetting */
5118 schedule_work(&bp->reset_task);
5122 /* Called with rtnl_lock */
5124 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5126 struct bnx2 *bp = netdev_priv(dev);
5128 bnx2_netif_stop(bp);
5131 bnx2_set_rx_mode(dev);
5133 bnx2_netif_start(bp);
5137 /* Called with netif_tx_lock.
5138 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5139 * netif_wake_queue().
5142 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5144 struct bnx2 *bp = netdev_priv(dev);
5147 struct sw_bd *tx_buf;
5148 u32 len, vlan_tag_flags, last_frag, mss;
5149 u16 prod, ring_prod;
5152 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
5153 netif_stop_queue(dev);
5154 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5157 return NETDEV_TX_BUSY;
5159 len = skb_headlen(skb);
5161 ring_prod = TX_RING_IDX(prod);
5164 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5165 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5168 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5170 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5172 if ((mss = skb_shinfo(skb)->gso_size)) {
5173 u32 tcp_opt_len, ip_tcp_len;
5176 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5178 tcp_opt_len = tcp_optlen(skb);
5180 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5181 u32 tcp_off = skb_transport_offset(skb) -
5182 sizeof(struct ipv6hdr) - ETH_HLEN;
5184 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5185 TX_BD_FLAGS_SW_FLAGS;
5186 if (likely(tcp_off == 0))
5187 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5190 vlan_tag_flags |= ((tcp_off & 0x3) <<
5191 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5192 ((tcp_off & 0x10) <<
5193 TX_BD_FLAGS_TCP6_OFF4_SHL);
5194 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5197 if (skb_header_cloned(skb) &&
5198 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5200 return NETDEV_TX_OK;
5203 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5207 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5208 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5212 if (tcp_opt_len || (iph->ihl > 5)) {
5213 vlan_tag_flags |= ((iph->ihl - 5) +
5214 (tcp_opt_len >> 2)) << 8;
5220 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5222 tx_buf = &bp->tx_buf_ring[ring_prod];
5224 pci_unmap_addr_set(tx_buf, mapping, mapping);
5226 txbd = &bp->tx_desc_ring[ring_prod];
5228 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5229 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5230 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5231 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5233 last_frag = skb_shinfo(skb)->nr_frags;
5235 for (i = 0; i < last_frag; i++) {
5236 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5238 prod = NEXT_TX_BD(prod);
5239 ring_prod = TX_RING_IDX(prod);
5240 txbd = &bp->tx_desc_ring[ring_prod];
5243 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5244 len, PCI_DMA_TODEVICE);
5245 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5248 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5249 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5250 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5251 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5254 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5256 prod = NEXT_TX_BD(prod);
5257 bp->tx_prod_bseq += skb->len;
5259 REG_WR16(bp, bp->tx_bidx_addr, prod);
5260 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5265 dev->trans_start = jiffies;
5267 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
5268 netif_stop_queue(dev);
5269 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
5270 netif_wake_queue(dev);
5273 return NETDEV_TX_OK;
5276 /* Called with rtnl_lock */
5278 bnx2_close(struct net_device *dev)
5280 struct bnx2 *bp = netdev_priv(dev);
5283 /* Calling flush_scheduled_work() may deadlock because
5284 * linkwatch_event() may be on the workqueue and it will try to get
5285 * the rtnl_lock which we are holding.
5287 while (bp->in_reset_task)
5290 bnx2_netif_stop(bp);
5291 del_timer_sync(&bp->timer);
5292 if (bp->flags & NO_WOL_FLAG)
5293 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5295 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5297 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5298 bnx2_reset_chip(bp, reset_code);
5303 netif_carrier_off(bp->dev);
5304 bnx2_set_power_state(bp, PCI_D3hot);
5308 #define GET_NET_STATS64(ctr) \
5309 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5310 (unsigned long) (ctr##_lo)
5312 #define GET_NET_STATS32(ctr) \
5315 #if (BITS_PER_LONG == 64)
5316 #define GET_NET_STATS GET_NET_STATS64
5318 #define GET_NET_STATS GET_NET_STATS32
5321 static struct net_device_stats *
5322 bnx2_get_stats(struct net_device *dev)
5324 struct bnx2 *bp = netdev_priv(dev);
5325 struct statistics_block *stats_blk = bp->stats_blk;
5326 struct net_device_stats *net_stats = &bp->net_stats;
5328 if (bp->stats_blk == NULL) {
5331 net_stats->rx_packets =
5332 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5333 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5334 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5336 net_stats->tx_packets =
5337 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5338 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5339 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5341 net_stats->rx_bytes =
5342 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5344 net_stats->tx_bytes =
5345 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5347 net_stats->multicast =
5348 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5350 net_stats->collisions =
5351 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5353 net_stats->rx_length_errors =
5354 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5355 stats_blk->stat_EtherStatsOverrsizePkts);
5357 net_stats->rx_over_errors =
5358 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5360 net_stats->rx_frame_errors =
5361 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5363 net_stats->rx_crc_errors =
5364 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5366 net_stats->rx_errors = net_stats->rx_length_errors +
5367 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5368 net_stats->rx_crc_errors;
5370 net_stats->tx_aborted_errors =
5371 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5372 stats_blk->stat_Dot3StatsLateCollisions);
5374 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5375 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5376 net_stats->tx_carrier_errors = 0;
5378 net_stats->tx_carrier_errors =
5380 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5383 net_stats->tx_errors =
5385 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5387 net_stats->tx_aborted_errors +
5388 net_stats->tx_carrier_errors;
5390 net_stats->rx_missed_errors =
5391 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5392 stats_blk->stat_FwRxDrop);
5397 /* All ethtool functions called with rtnl_lock */
5400 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5402 struct bnx2 *bp = netdev_priv(dev);
5403 int support_serdes = 0, support_copper = 0;
5405 cmd->supported = SUPPORTED_Autoneg;
5406 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5409 } else if (bp->phy_port == PORT_FIBRE)
5414 if (support_serdes) {
5415 cmd->supported |= SUPPORTED_1000baseT_Full |
5417 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5418 cmd->supported |= SUPPORTED_2500baseX_Full;
5421 if (support_copper) {
5422 cmd->supported |= SUPPORTED_10baseT_Half |
5423 SUPPORTED_10baseT_Full |
5424 SUPPORTED_100baseT_Half |
5425 SUPPORTED_100baseT_Full |
5426 SUPPORTED_1000baseT_Full |
5431 spin_lock_bh(&bp->phy_lock);
5432 cmd->port = bp->phy_port;
5433 cmd->advertising = bp->advertising;
5435 if (bp->autoneg & AUTONEG_SPEED) {
5436 cmd->autoneg = AUTONEG_ENABLE;
5439 cmd->autoneg = AUTONEG_DISABLE;
5442 if (netif_carrier_ok(dev)) {
5443 cmd->speed = bp->line_speed;
5444 cmd->duplex = bp->duplex;
5450 spin_unlock_bh(&bp->phy_lock);
5452 cmd->transceiver = XCVR_INTERNAL;
5453 cmd->phy_address = bp->phy_addr;
5459 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5461 struct bnx2 *bp = netdev_priv(dev);
5462 u8 autoneg = bp->autoneg;
5463 u8 req_duplex = bp->req_duplex;
5464 u16 req_line_speed = bp->req_line_speed;
5465 u32 advertising = bp->advertising;
5468 spin_lock_bh(&bp->phy_lock);
5470 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5471 goto err_out_unlock;
5473 if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5474 goto err_out_unlock;
5476 if (cmd->autoneg == AUTONEG_ENABLE) {
5477 autoneg |= AUTONEG_SPEED;
5479 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5481 /* allow advertising 1 speed */
5482 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5483 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5484 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5485 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5487 if (cmd->port == PORT_FIBRE)
5488 goto err_out_unlock;
5490 advertising = cmd->advertising;
5492 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5493 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5494 (cmd->port == PORT_TP))
5495 goto err_out_unlock;
5496 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
5497 advertising = cmd->advertising;
5498 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5499 goto err_out_unlock;
5501 if (cmd->port == PORT_FIBRE)
5502 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5504 advertising = ETHTOOL_ALL_COPPER_SPEED;
5506 advertising |= ADVERTISED_Autoneg;
5509 if (cmd->port == PORT_FIBRE) {
5510 if ((cmd->speed != SPEED_1000 &&
5511 cmd->speed != SPEED_2500) ||
5512 (cmd->duplex != DUPLEX_FULL))
5513 goto err_out_unlock;
5515 if (cmd->speed == SPEED_2500 &&
5516 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5517 goto err_out_unlock;
5519 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5520 goto err_out_unlock;
5522 autoneg &= ~AUTONEG_SPEED;
5523 req_line_speed = cmd->speed;
5524 req_duplex = cmd->duplex;
5528 bp->autoneg = autoneg;
5529 bp->advertising = advertising;
5530 bp->req_line_speed = req_line_speed;
5531 bp->req_duplex = req_duplex;
5533 err = bnx2_setup_phy(bp, cmd->port);
5536 spin_unlock_bh(&bp->phy_lock);
5542 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5544 struct bnx2 *bp = netdev_priv(dev);
5546 strcpy(info->driver, DRV_MODULE_NAME);
5547 strcpy(info->version, DRV_MODULE_VERSION);
5548 strcpy(info->bus_info, pci_name(bp->pdev));
5549 strcpy(info->fw_version, bp->fw_version);
5552 #define BNX2_REGDUMP_LEN (32 * 1024)
5555 bnx2_get_regs_len(struct net_device *dev)
5557 return BNX2_REGDUMP_LEN;
5561 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5563 u32 *p = _p, i, offset;
5565 struct bnx2 *bp = netdev_priv(dev);
5566 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5567 0x0800, 0x0880, 0x0c00, 0x0c10,
5568 0x0c30, 0x0d08, 0x1000, 0x101c,
5569 0x1040, 0x1048, 0x1080, 0x10a4,
5570 0x1400, 0x1490, 0x1498, 0x14f0,
5571 0x1500, 0x155c, 0x1580, 0x15dc,
5572 0x1600, 0x1658, 0x1680, 0x16d8,
5573 0x1800, 0x1820, 0x1840, 0x1854,
5574 0x1880, 0x1894, 0x1900, 0x1984,
5575 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5576 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5577 0x2000, 0x2030, 0x23c0, 0x2400,
5578 0x2800, 0x2820, 0x2830, 0x2850,
5579 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5580 0x3c00, 0x3c94, 0x4000, 0x4010,
5581 0x4080, 0x4090, 0x43c0, 0x4458,
5582 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5583 0x4fc0, 0x5010, 0x53c0, 0x5444,
5584 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5585 0x5fc0, 0x6000, 0x6400, 0x6428,
5586 0x6800, 0x6848, 0x684c, 0x6860,
5587 0x6888, 0x6910, 0x8000 };
5591 memset(p, 0, BNX2_REGDUMP_LEN);
5593 if (!netif_running(bp->dev))
5597 offset = reg_boundaries[0];
5599 while (offset < BNX2_REGDUMP_LEN) {
5600 *p++ = REG_RD(bp, offset);
5602 if (offset == reg_boundaries[i + 1]) {
5603 offset = reg_boundaries[i + 2];
5604 p = (u32 *) (orig_p + offset);
5611 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5613 struct bnx2 *bp = netdev_priv(dev);
5615 if (bp->flags & NO_WOL_FLAG) {
5620 wol->supported = WAKE_MAGIC;
5622 wol->wolopts = WAKE_MAGIC;
5626 memset(&wol->sopass, 0, sizeof(wol->sopass));
5630 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5632 struct bnx2 *bp = netdev_priv(dev);
5634 if (wol->wolopts & ~WAKE_MAGIC)
5637 if (wol->wolopts & WAKE_MAGIC) {
5638 if (bp->flags & NO_WOL_FLAG)
5650 bnx2_nway_reset(struct net_device *dev)
5652 struct bnx2 *bp = netdev_priv(dev);
5655 if (!(bp->autoneg & AUTONEG_SPEED)) {
5659 spin_lock_bh(&bp->phy_lock);
5661 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5664 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5665 spin_unlock_bh(&bp->phy_lock);
5669 /* Force a link down visible on the other side */
5670 if (bp->phy_flags & PHY_SERDES_FLAG) {
5671 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5672 spin_unlock_bh(&bp->phy_lock);
5676 spin_lock_bh(&bp->phy_lock);
5678 bp->current_interval = SERDES_AN_TIMEOUT;
5679 bp->serdes_an_pending = 1;
5680 mod_timer(&bp->timer, jiffies + bp->current_interval);
5683 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5684 bmcr &= ~BMCR_LOOPBACK;
5685 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5687 spin_unlock_bh(&bp->phy_lock);
5693 bnx2_get_eeprom_len(struct net_device *dev)
5695 struct bnx2 *bp = netdev_priv(dev);
5697 if (bp->flash_info == NULL)
5700 return (int) bp->flash_size;
5704 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5707 struct bnx2 *bp = netdev_priv(dev);
5710 /* parameters already validated in ethtool_get_eeprom */
5712 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5718 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5721 struct bnx2 *bp = netdev_priv(dev);
5724 /* parameters already validated in ethtool_set_eeprom */
5726 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5732 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5734 struct bnx2 *bp = netdev_priv(dev);
5736 memset(coal, 0, sizeof(struct ethtool_coalesce));
5738 coal->rx_coalesce_usecs = bp->rx_ticks;
5739 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5740 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5741 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5743 coal->tx_coalesce_usecs = bp->tx_ticks;
5744 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5745 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5746 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5748 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5754 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5756 struct bnx2 *bp = netdev_priv(dev);
5758 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5759 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5761 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5762 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5764 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5765 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5767 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5768 if (bp->rx_quick_cons_trip_int > 0xff)
5769 bp->rx_quick_cons_trip_int = 0xff;
5771 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5772 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5774 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5775 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5777 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5778 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5780 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5781 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5784 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5785 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5786 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
5787 bp->stats_ticks = USEC_PER_SEC;
5789 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5790 bp->stats_ticks &= 0xffff00;
5792 if (netif_running(bp->dev)) {
5793 bnx2_netif_stop(bp);
5795 bnx2_netif_start(bp);
5802 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5804 struct bnx2 *bp = netdev_priv(dev);
5806 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5807 ering->rx_mini_max_pending = 0;
5808 ering->rx_jumbo_max_pending = 0;
5810 ering->rx_pending = bp->rx_ring_size;
5811 ering->rx_mini_pending = 0;
5812 ering->rx_jumbo_pending = 0;
5814 ering->tx_max_pending = MAX_TX_DESC_CNT;
5815 ering->tx_pending = bp->tx_ring_size;
5819 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5821 struct bnx2 *bp = netdev_priv(dev);
5823 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5824 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5825 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5829 if (netif_running(bp->dev)) {
5830 bnx2_netif_stop(bp);
5831 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5836 bnx2_set_rx_ring_size(bp, ering->rx_pending);
5837 bp->tx_ring_size = ering->tx_pending;
5839 if (netif_running(bp->dev)) {
5842 rc = bnx2_alloc_mem(bp);
5846 bnx2_netif_start(bp);
5853 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5855 struct bnx2 *bp = netdev_priv(dev);
5857 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5858 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5859 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5863 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5865 struct bnx2 *bp = netdev_priv(dev);
5867 bp->req_flow_ctrl = 0;
5868 if (epause->rx_pause)
5869 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5870 if (epause->tx_pause)
5871 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5873 if (epause->autoneg) {
5874 bp->autoneg |= AUTONEG_FLOW_CTRL;
5877 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5880 spin_lock_bh(&bp->phy_lock);
5882 bnx2_setup_phy(bp, bp->phy_port);
5884 spin_unlock_bh(&bp->phy_lock);
5890 bnx2_get_rx_csum(struct net_device *dev)
5892 struct bnx2 *bp = netdev_priv(dev);
5898 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5900 struct bnx2 *bp = netdev_priv(dev);
5907 bnx2_set_tso(struct net_device *dev, u32 data)
5909 struct bnx2 *bp = netdev_priv(dev);
5912 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5913 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5914 dev->features |= NETIF_F_TSO6;
5916 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5921 #define BNX2_NUM_STATS 46
5924 char string[ETH_GSTRING_LEN];
5925 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5927 { "rx_error_bytes" },
5929 { "tx_error_bytes" },
5930 { "rx_ucast_packets" },
5931 { "rx_mcast_packets" },
5932 { "rx_bcast_packets" },
5933 { "tx_ucast_packets" },
5934 { "tx_mcast_packets" },
5935 { "tx_bcast_packets" },
5936 { "tx_mac_errors" },
5937 { "tx_carrier_errors" },
5938 { "rx_crc_errors" },
5939 { "rx_align_errors" },
5940 { "tx_single_collisions" },
5941 { "tx_multi_collisions" },
5943 { "tx_excess_collisions" },
5944 { "tx_late_collisions" },
5945 { "tx_total_collisions" },
5948 { "rx_undersize_packets" },
5949 { "rx_oversize_packets" },
5950 { "rx_64_byte_packets" },
5951 { "rx_65_to_127_byte_packets" },
5952 { "rx_128_to_255_byte_packets" },
5953 { "rx_256_to_511_byte_packets" },
5954 { "rx_512_to_1023_byte_packets" },
5955 { "rx_1024_to_1522_byte_packets" },
5956 { "rx_1523_to_9022_byte_packets" },
5957 { "tx_64_byte_packets" },
5958 { "tx_65_to_127_byte_packets" },
5959 { "tx_128_to_255_byte_packets" },
5960 { "tx_256_to_511_byte_packets" },
5961 { "tx_512_to_1023_byte_packets" },
5962 { "tx_1024_to_1522_byte_packets" },
5963 { "tx_1523_to_9022_byte_packets" },
5964 { "rx_xon_frames" },
5965 { "rx_xoff_frames" },
5966 { "tx_xon_frames" },
5967 { "tx_xoff_frames" },
5968 { "rx_mac_ctrl_frames" },
5969 { "rx_filtered_packets" },
5971 { "rx_fw_discards" },
5974 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5976 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5977 STATS_OFFSET32(stat_IfHCInOctets_hi),
5978 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5979 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5980 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5981 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5982 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5983 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5984 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5985 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5986 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5987 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5988 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5989 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5990 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5991 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5992 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5993 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5994 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5995 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5996 STATS_OFFSET32(stat_EtherStatsCollisions),
5997 STATS_OFFSET32(stat_EtherStatsFragments),
5998 STATS_OFFSET32(stat_EtherStatsJabbers),
5999 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6000 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6001 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6002 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6003 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6004 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6005 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6006 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6007 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6008 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6009 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6010 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6011 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6012 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6013 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6014 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6015 STATS_OFFSET32(stat_XonPauseFramesReceived),
6016 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6017 STATS_OFFSET32(stat_OutXonSent),
6018 STATS_OFFSET32(stat_OutXoffSent),
6019 STATS_OFFSET32(stat_MacControlFramesReceived),
6020 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6021 STATS_OFFSET32(stat_IfInMBUFDiscards),
6022 STATS_OFFSET32(stat_FwRxDrop),
6025 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6026 * skipped because of errata.
6028 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6029 8,0,8,8,8,8,8,8,8,8,
6030 4,0,4,4,4,4,4,4,4,4,
6031 4,4,4,4,4,4,4,4,4,4,
6032 4,4,4,4,4,4,4,4,4,4,
6036 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6037 8,0,8,8,8,8,8,8,8,8,
6038 4,4,4,4,4,4,4,4,4,4,
6039 4,4,4,4,4,4,4,4,4,4,
6040 4,4,4,4,4,4,4,4,4,4,
6044 #define BNX2_NUM_TESTS 6
6047 char string[ETH_GSTRING_LEN];
6048 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6049 { "register_test (offline)" },
6050 { "memory_test (offline)" },
6051 { "loopback_test (offline)" },
6052 { "nvram_test (online)" },
6053 { "interrupt_test (online)" },
6054 { "link_test (online)" },
6058 bnx2_self_test_count(struct net_device *dev)
6060 return BNX2_NUM_TESTS;
6064 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6066 struct bnx2 *bp = netdev_priv(dev);
6068 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6069 if (etest->flags & ETH_TEST_FL_OFFLINE) {
6072 bnx2_netif_stop(bp);
6073 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6076 if (bnx2_test_registers(bp) != 0) {
6078 etest->flags |= ETH_TEST_FL_FAILED;
6080 if (bnx2_test_memory(bp) != 0) {
6082 etest->flags |= ETH_TEST_FL_FAILED;
6084 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6085 etest->flags |= ETH_TEST_FL_FAILED;
6087 if (!netif_running(bp->dev)) {
6088 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6092 bnx2_netif_start(bp);
6095 /* wait for link up */
6096 for (i = 0; i < 7; i++) {
6099 msleep_interruptible(1000);
6103 if (bnx2_test_nvram(bp) != 0) {
6105 etest->flags |= ETH_TEST_FL_FAILED;
6107 if (bnx2_test_intr(bp) != 0) {
6109 etest->flags |= ETH_TEST_FL_FAILED;
6112 if (bnx2_test_link(bp) != 0) {
6114 etest->flags |= ETH_TEST_FL_FAILED;
6120 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6122 switch (stringset) {
6124 memcpy(buf, bnx2_stats_str_arr,
6125 sizeof(bnx2_stats_str_arr));
6128 memcpy(buf, bnx2_tests_str_arr,
6129 sizeof(bnx2_tests_str_arr));
6135 bnx2_get_stats_count(struct net_device *dev)
6137 return BNX2_NUM_STATS;
6141 bnx2_get_ethtool_stats(struct net_device *dev,
6142 struct ethtool_stats *stats, u64 *buf)
6144 struct bnx2 *bp = netdev_priv(dev);
6146 u32 *hw_stats = (u32 *) bp->stats_blk;
6147 u8 *stats_len_arr = NULL;
6149 if (hw_stats == NULL) {
6150 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6154 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6155 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6156 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6157 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6158 stats_len_arr = bnx2_5706_stats_len_arr;
6160 stats_len_arr = bnx2_5708_stats_len_arr;
6162 for (i = 0; i < BNX2_NUM_STATS; i++) {
6163 if (stats_len_arr[i] == 0) {
6164 /* skip this counter */
6168 if (stats_len_arr[i] == 4) {
6169 /* 4-byte counter */
6171 *(hw_stats + bnx2_stats_offset_arr[i]);
6174 /* 8-byte counter */
6175 buf[i] = (((u64) *(hw_stats +
6176 bnx2_stats_offset_arr[i])) << 32) +
6177 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6182 bnx2_phys_id(struct net_device *dev, u32 data)
6184 struct bnx2 *bp = netdev_priv(dev);
6191 save = REG_RD(bp, BNX2_MISC_CFG);
6192 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6194 for (i = 0; i < (data * 2); i++) {
6196 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6199 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6200 BNX2_EMAC_LED_1000MB_OVERRIDE |
6201 BNX2_EMAC_LED_100MB_OVERRIDE |
6202 BNX2_EMAC_LED_10MB_OVERRIDE |
6203 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6204 BNX2_EMAC_LED_TRAFFIC);
6206 msleep_interruptible(500);
6207 if (signal_pending(current))
6210 REG_WR(bp, BNX2_EMAC_LED, 0);
6211 REG_WR(bp, BNX2_MISC_CFG, save);
6216 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6218 struct bnx2 *bp = netdev_priv(dev);
6220 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6221 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6223 return (ethtool_op_set_tx_csum(dev, data));
6226 static const struct ethtool_ops bnx2_ethtool_ops = {
6227 .get_settings = bnx2_get_settings,
6228 .set_settings = bnx2_set_settings,
6229 .get_drvinfo = bnx2_get_drvinfo,
6230 .get_regs_len = bnx2_get_regs_len,
6231 .get_regs = bnx2_get_regs,
6232 .get_wol = bnx2_get_wol,
6233 .set_wol = bnx2_set_wol,
6234 .nway_reset = bnx2_nway_reset,
6235 .get_link = ethtool_op_get_link,
6236 .get_eeprom_len = bnx2_get_eeprom_len,
6237 .get_eeprom = bnx2_get_eeprom,
6238 .set_eeprom = bnx2_set_eeprom,
6239 .get_coalesce = bnx2_get_coalesce,
6240 .set_coalesce = bnx2_set_coalesce,
6241 .get_ringparam = bnx2_get_ringparam,
6242 .set_ringparam = bnx2_set_ringparam,
6243 .get_pauseparam = bnx2_get_pauseparam,
6244 .set_pauseparam = bnx2_set_pauseparam,
6245 .get_rx_csum = bnx2_get_rx_csum,
6246 .set_rx_csum = bnx2_set_rx_csum,
6247 .get_tx_csum = ethtool_op_get_tx_csum,
6248 .set_tx_csum = bnx2_set_tx_csum,
6249 .get_sg = ethtool_op_get_sg,
6250 .set_sg = ethtool_op_set_sg,
6251 .get_tso = ethtool_op_get_tso,
6252 .set_tso = bnx2_set_tso,
6253 .self_test_count = bnx2_self_test_count,
6254 .self_test = bnx2_self_test,
6255 .get_strings = bnx2_get_strings,
6256 .phys_id = bnx2_phys_id,
6257 .get_stats_count = bnx2_get_stats_count,
6258 .get_ethtool_stats = bnx2_get_ethtool_stats,
6259 .get_perm_addr = ethtool_op_get_perm_addr,
6262 /* Called with rtnl_lock */
6264 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6266 struct mii_ioctl_data *data = if_mii(ifr);
6267 struct bnx2 *bp = netdev_priv(dev);
6272 data->phy_id = bp->phy_addr;
6278 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6281 if (!netif_running(dev))
6284 spin_lock_bh(&bp->phy_lock);
6285 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6286 spin_unlock_bh(&bp->phy_lock);
6288 data->val_out = mii_regval;
6294 if (!capable(CAP_NET_ADMIN))
6297 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6300 if (!netif_running(dev))
6303 spin_lock_bh(&bp->phy_lock);
6304 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6305 spin_unlock_bh(&bp->phy_lock);
6316 /* Called with rtnl_lock */
6318 bnx2_change_mac_addr(struct net_device *dev, void *p)
6320 struct sockaddr *addr = p;
6321 struct bnx2 *bp = netdev_priv(dev);
6323 if (!is_valid_ether_addr(addr->sa_data))
6326 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6327 if (netif_running(dev))
6328 bnx2_set_mac_addr(bp);
6333 /* Called with rtnl_lock */
6335 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6337 struct bnx2 *bp = netdev_priv(dev);
6339 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6340 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6344 if (netif_running(dev)) {
6345 bnx2_netif_stop(bp);
6349 bnx2_netif_start(bp);
6354 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6356 poll_bnx2(struct net_device *dev)
6358 struct bnx2 *bp = netdev_priv(dev);
6360 disable_irq(bp->pdev->irq);
6361 bnx2_interrupt(bp->pdev->irq, dev);
6362 enable_irq(bp->pdev->irq);
6366 static void __devinit
6367 bnx2_get_5709_media(struct bnx2 *bp)
6369 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6370 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6373 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6375 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6376 bp->phy_flags |= PHY_SERDES_FLAG;
6380 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6381 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6383 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6385 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6390 bp->phy_flags |= PHY_SERDES_FLAG;
6398 bp->phy_flags |= PHY_SERDES_FLAG;
6404 static void __devinit
6405 bnx2_get_pci_speed(struct bnx2 *bp)
6409 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6410 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6413 bp->flags |= PCIX_FLAG;
6415 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6417 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6419 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6420 bp->bus_speed_mhz = 133;
6423 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6424 bp->bus_speed_mhz = 100;
6427 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6428 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6429 bp->bus_speed_mhz = 66;
6432 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6433 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6434 bp->bus_speed_mhz = 50;
6437 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6438 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6439 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6440 bp->bus_speed_mhz = 33;
6445 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6446 bp->bus_speed_mhz = 66;
6448 bp->bus_speed_mhz = 33;
6451 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6452 bp->flags |= PCI_32BIT_FLAG;
6456 static int __devinit
6457 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6460 unsigned long mem_len;
6463 u64 dma_mask, persist_dma_mask;
6465 SET_MODULE_OWNER(dev);
6466 SET_NETDEV_DEV(dev, &pdev->dev);
6467 bp = netdev_priv(dev);
6472 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6473 rc = pci_enable_device(pdev);
6475 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
6479 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6481 "Cannot find PCI device base address, aborting.\n");
6483 goto err_out_disable;
6486 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6488 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
6489 goto err_out_disable;
6492 pci_set_master(pdev);
6494 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6495 if (bp->pm_cap == 0) {
6497 "Cannot find power management capability, aborting.\n");
6499 goto err_out_release;
6505 spin_lock_init(&bp->phy_lock);
6506 spin_lock_init(&bp->indirect_lock);
6507 INIT_WORK(&bp->reset_task, bnx2_reset_task);
6509 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
6510 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
6511 dev->mem_end = dev->mem_start + mem_len;
6512 dev->irq = pdev->irq;
6514 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6517 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
6519 goto err_out_release;
6522 /* Configure byte swap and enable write to the reg_window registers.
6523 * Rely on CPU to do target byte swapping on big endian systems
6524 * The chip's target access swapping will not swap all accesses
6526 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6527 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6528 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6530 bnx2_set_power_state(bp, PCI_D0);
6532 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6534 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6535 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6537 "Cannot find PCIE capability, aborting.\n");
6541 bp->flags |= PCIE_FLAG;
6543 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6544 if (bp->pcix_cap == 0) {
6546 "Cannot find PCIX capability, aborting.\n");
6552 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6553 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6554 bp->flags |= MSI_CAP_FLAG;
6557 /* 5708 cannot support DMA addresses > 40-bit. */
6558 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6559 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6561 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6563 /* Configure DMA attributes. */
6564 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6565 dev->features |= NETIF_F_HIGHDMA;
6566 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6569 "pci_set_consistent_dma_mask failed, aborting.\n");
6572 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6573 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6577 if (!(bp->flags & PCIE_FLAG))
6578 bnx2_get_pci_speed(bp);
6580 /* 5706A0 may falsely detect SERR and PERR. */
6581 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6582 reg = REG_RD(bp, PCI_COMMAND);
6583 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6584 REG_WR(bp, PCI_COMMAND, reg);
6586 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6587 !(bp->flags & PCIX_FLAG)) {
6590 "5706 A1 can only be used in a PCIX bus, aborting.\n");
6594 bnx2_init_nvram(bp);
6596 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6598 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
6599 BNX2_SHM_HDR_SIGNATURE_SIG) {
6600 u32 off = PCI_FUNC(pdev->devfn) << 2;
6602 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6604 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6606 /* Get the permanent MAC address. First we need to make sure the
6607 * firmware is actually running.
6609 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
6611 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6612 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
6613 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
6618 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6619 for (i = 0, j = 0; i < 3; i++) {
6622 num = (u8) (reg >> (24 - (i * 8)));
6623 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
6624 if (num >= k || !skip0 || k == 1) {
6625 bp->fw_version[j++] = (num / k) + '0';
6630 bp->fw_version[j++] = '.';
6632 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
6633 reg &= BNX2_CONDITION_MFW_RUN_MASK;
6634 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
6635 reg != BNX2_CONDITION_MFW_RUN_NONE) {
6637 u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
6639 bp->fw_version[j++] = ' ';
6640 for (i = 0; i < 3; i++) {
6641 reg = REG_RD_IND(bp, addr + i * 4);
6643 memcpy(&bp->fw_version[j], ®, 4);
6648 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
6649 bp->mac_addr[0] = (u8) (reg >> 8);
6650 bp->mac_addr[1] = (u8) reg;
6652 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
6653 bp->mac_addr[2] = (u8) (reg >> 24);
6654 bp->mac_addr[3] = (u8) (reg >> 16);
6655 bp->mac_addr[4] = (u8) (reg >> 8);
6656 bp->mac_addr[5] = (u8) reg;
6658 bp->tx_ring_size = MAX_TX_DESC_CNT;
6659 bnx2_set_rx_ring_size(bp, 255);
6663 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6665 bp->tx_quick_cons_trip_int = 20;
6666 bp->tx_quick_cons_trip = 20;
6667 bp->tx_ticks_int = 80;
6670 bp->rx_quick_cons_trip_int = 6;
6671 bp->rx_quick_cons_trip = 6;
6672 bp->rx_ticks_int = 18;
6675 bp->stats_ticks = 1000000 & 0xffff00;
6677 bp->timer_interval = HZ;
6678 bp->current_interval = HZ;
6682 /* Disable WOL support if we are running on a SERDES chip. */
6683 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6684 bnx2_get_5709_media(bp);
6685 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
6686 bp->phy_flags |= PHY_SERDES_FLAG;
6688 bp->phy_port = PORT_TP;
6689 if (bp->phy_flags & PHY_SERDES_FLAG) {
6690 bp->phy_port = PORT_FIBRE;
6691 bp->flags |= NO_WOL_FLAG;
6692 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
6694 reg = REG_RD_IND(bp, bp->shmem_base +
6695 BNX2_SHARED_HW_CFG_CONFIG);
6696 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6697 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6699 bnx2_init_remote_phy(bp);
6701 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6702 CHIP_NUM(bp) == CHIP_NUM_5708)
6703 bp->phy_flags |= PHY_CRC_FIX_FLAG;
6704 else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
6705 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
6707 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6708 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6709 (CHIP_ID(bp) == CHIP_ID_5708_B1))
6710 bp->flags |= NO_WOL_FLAG;
6712 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6713 bp->tx_quick_cons_trip_int =
6714 bp->tx_quick_cons_trip;
6715 bp->tx_ticks_int = bp->tx_ticks;
6716 bp->rx_quick_cons_trip_int =
6717 bp->rx_quick_cons_trip;
6718 bp->rx_ticks_int = bp->rx_ticks;
6719 bp->comp_prod_trip_int = bp->comp_prod_trip;
6720 bp->com_ticks_int = bp->com_ticks;
6721 bp->cmd_ticks_int = bp->cmd_ticks;
6724 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6726 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6727 * with byte enables disabled on the unused 32-bit word. This is legal
6728 * but causes problems on the AMD 8132 which will eventually stop
6729 * responding after a while.
6731 * AMD believes this incompatibility is unique to the 5706, and
6732 * prefers to locally disable MSI rather than globally disabling it.
6734 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6735 struct pci_dev *amd_8132 = NULL;
6737 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6738 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6741 if (amd_8132->revision >= 0x10 &&
6742 amd_8132->revision <= 0x13) {
6744 pci_dev_put(amd_8132);
6750 bnx2_set_default_link(bp);
6751 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6753 init_timer(&bp->timer);
6754 bp->timer.expires = RUN_AT(bp->timer_interval);
6755 bp->timer.data = (unsigned long) bp;
6756 bp->timer.function = bnx2_timer;
6762 iounmap(bp->regview);
6767 pci_release_regions(pdev);
6770 pci_disable_device(pdev);
6771 pci_set_drvdata(pdev, NULL);
6777 static char * __devinit
6778 bnx2_bus_string(struct bnx2 *bp, char *str)
6782 if (bp->flags & PCIE_FLAG) {
6783 s += sprintf(s, "PCI Express");
6785 s += sprintf(s, "PCI");
6786 if (bp->flags & PCIX_FLAG)
6787 s += sprintf(s, "-X");
6788 if (bp->flags & PCI_32BIT_FLAG)
6789 s += sprintf(s, " 32-bit");
6791 s += sprintf(s, " 64-bit");
6792 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
6797 static int __devinit
6798 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6800 static int version_printed = 0;
6801 struct net_device *dev = NULL;
6806 if (version_printed++ == 0)
6807 printk(KERN_INFO "%s", version);
6809 /* dev zeroed in init_etherdev */
6810 dev = alloc_etherdev(sizeof(*bp));
6815 rc = bnx2_init_board(pdev, dev);
6821 dev->open = bnx2_open;
6822 dev->hard_start_xmit = bnx2_start_xmit;
6823 dev->stop = bnx2_close;
6824 dev->get_stats = bnx2_get_stats;
6825 dev->set_multicast_list = bnx2_set_rx_mode;
6826 dev->do_ioctl = bnx2_ioctl;
6827 dev->set_mac_address = bnx2_change_mac_addr;
6828 dev->change_mtu = bnx2_change_mtu;
6829 dev->tx_timeout = bnx2_tx_timeout;
6830 dev->watchdog_timeo = TX_TIMEOUT;
6832 dev->vlan_rx_register = bnx2_vlan_rx_register;
6834 dev->poll = bnx2_poll;
6835 dev->ethtool_ops = &bnx2_ethtool_ops;
6838 bp = netdev_priv(dev);
6840 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6841 dev->poll_controller = poll_bnx2;
6844 pci_set_drvdata(pdev, dev);
6846 memcpy(dev->dev_addr, bp->mac_addr, 6);
6847 memcpy(dev->perm_addr, bp->mac_addr, 6);
6848 bp->name = board_info[ent->driver_data].name;
6850 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
6851 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6852 dev->features |= NETIF_F_IPV6_CSUM;
6855 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6857 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6858 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6859 dev->features |= NETIF_F_TSO6;
6861 if ((rc = register_netdev(dev))) {
6862 dev_err(&pdev->dev, "Cannot register net device\n");
6864 iounmap(bp->regview);
6865 pci_release_regions(pdev);
6866 pci_disable_device(pdev);
6867 pci_set_drvdata(pdev, NULL);
6872 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
6876 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6877 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6878 bnx2_bus_string(bp, str),
6882 printk("node addr ");
6883 for (i = 0; i < 6; i++)
6884 printk("%2.2x", dev->dev_addr[i]);
6890 static void __devexit
6891 bnx2_remove_one(struct pci_dev *pdev)
6893 struct net_device *dev = pci_get_drvdata(pdev);
6894 struct bnx2 *bp = netdev_priv(dev);
6896 flush_scheduled_work();
6898 unregister_netdev(dev);
6901 iounmap(bp->regview);
6904 pci_release_regions(pdev);
6905 pci_disable_device(pdev);
6906 pci_set_drvdata(pdev, NULL);
6910 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6912 struct net_device *dev = pci_get_drvdata(pdev);
6913 struct bnx2 *bp = netdev_priv(dev);
6916 if (!netif_running(dev))
6919 flush_scheduled_work();
6920 bnx2_netif_stop(bp);
6921 netif_device_detach(dev);
6922 del_timer_sync(&bp->timer);
6923 if (bp->flags & NO_WOL_FLAG)
6924 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6926 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6928 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6929 bnx2_reset_chip(bp, reset_code);
6931 pci_save_state(pdev);
6932 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6937 bnx2_resume(struct pci_dev *pdev)
6939 struct net_device *dev = pci_get_drvdata(pdev);
6940 struct bnx2 *bp = netdev_priv(dev);
6942 if (!netif_running(dev))
6945 pci_restore_state(pdev);
6946 bnx2_set_power_state(bp, PCI_D0);
6947 netif_device_attach(dev);
6949 bnx2_netif_start(bp);
6953 static struct pci_driver bnx2_pci_driver = {
6954 .name = DRV_MODULE_NAME,
6955 .id_table = bnx2_pci_tbl,
6956 .probe = bnx2_init_one,
6957 .remove = __devexit_p(bnx2_remove_one),
6958 .suspend = bnx2_suspend,
6959 .resume = bnx2_resume,
6962 static int __init bnx2_init(void)
6964 return pci_register_driver(&bnx2_pci_driver);
6967 static void __exit bnx2_cleanup(void)
6969 pci_unregister_driver(&bnx2_pci_driver);
6972 module_init(bnx2_init);
6973 module_exit(bnx2_cleanup);