1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2007 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
43 #include <net/checksum.h>
44 #include <linux/workqueue.h>
45 #include <linux/crc32.h>
46 #include <linux/prefetch.h>
47 #include <linux/cache.h>
48 #include <linux/zlib.h>
54 #define DRV_MODULE_NAME "bnx2"
55 #define PFX DRV_MODULE_NAME ": "
56 #define DRV_MODULE_VERSION "1.6.2"
57 #define DRV_MODULE_RELDATE "July 6, 2007"
59 #define RUN_AT(x) (jiffies + (x))
61 /* Time in jiffies before concluding the transmitter is hung. */
62 #define TX_TIMEOUT (5*HZ)
64 static const char version[] __devinitdata =
65 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
68 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
69 MODULE_LICENSE("GPL");
70 MODULE_VERSION(DRV_MODULE_VERSION);
72 static int disable_msi = 0;
74 module_param(disable_msi, int, 0);
75 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
89 /* indexed by board_t, above */
92 } board_info[] __devinitdata = {
93 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
94 { "HP NC370T Multifunction Gigabit Server Adapter" },
95 { "HP NC370i Multifunction Gigabit Server Adapter" },
96 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
97 { "HP NC370F Multifunction Gigabit Server Adapter" },
98 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
99 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
100 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
101 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
104 static struct pci_device_id bnx2_pci_tbl[] = {
105 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
106 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
107 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
108 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
109 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
110 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
111 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
112 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
113 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
114 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
126 static struct flash_spec flash_table[] =
129 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
130 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
131 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
133 /* Expansion entry 0001 */
134 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
135 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
136 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
138 /* Saifun SA25F010 (non-buffered flash) */
139 /* strap, cfg1, & write1 need updates */
140 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
141 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
142 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
143 "Non-buffered flash (128kB)"},
144 /* Saifun SA25F020 (non-buffered flash) */
145 /* strap, cfg1, & write1 need updates */
146 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
147 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
148 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
149 "Non-buffered flash (256kB)"},
150 /* Expansion entry 0100 */
151 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
152 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
155 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
156 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
157 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
158 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
159 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
160 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
161 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
162 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
164 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
165 /* Saifun SA25F005 (non-buffered flash) */
166 /* strap, cfg1, & write1 need updates */
167 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
168 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
169 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
170 "Non-buffered flash (64kB)"},
172 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
173 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
174 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
176 /* Expansion entry 1001 */
177 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
178 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
181 /* Expansion entry 1010 */
182 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
183 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186 /* ATMEL AT45DB011B (buffered flash) */
187 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
188 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
189 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
190 "Buffered flash (128kB)"},
191 /* Expansion entry 1100 */
192 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
193 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
196 /* Expansion entry 1101 */
197 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
198 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201 /* Ateml Expansion entry 1110 */
202 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
203 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
204 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
205 "Entry 1110 (Atmel)"},
206 /* ATMEL AT45DB021B (buffered flash) */
207 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
208 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
210 "Buffered flash (256kB)"},
213 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
215 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
221 /* The ring uses 256 indices for 255 entries, one of them
222 * needs to be skipped.
224 diff = bp->tx_prod - bp->tx_cons;
225 if (unlikely(diff >= TX_DESC_CNT)) {
227 if (diff == TX_DESC_CNT)
228 diff = MAX_TX_DESC_CNT;
230 return (bp->tx_ring_size - diff);
234 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
238 spin_lock_bh(&bp->indirect_lock);
239 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
240 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
241 spin_unlock_bh(&bp->indirect_lock);
246 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
248 spin_lock_bh(&bp->indirect_lock);
249 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
250 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
251 spin_unlock_bh(&bp->indirect_lock);
255 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
258 spin_lock_bh(&bp->indirect_lock);
259 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
262 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
263 REG_WR(bp, BNX2_CTX_CTX_CTRL,
264 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
265 for (i = 0; i < 5; i++) {
267 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
268 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
273 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
274 REG_WR(bp, BNX2_CTX_DATA, val);
276 spin_unlock_bh(&bp->indirect_lock);
280 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
285 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
286 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
287 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
289 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
290 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
295 val1 = (bp->phy_addr << 21) | (reg << 16) |
296 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
297 BNX2_EMAC_MDIO_COMM_START_BUSY;
298 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
300 for (i = 0; i < 50; i++) {
303 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
304 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
307 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
308 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
314 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
323 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
324 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
325 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
327 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
328 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
337 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
342 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
343 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
344 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
346 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
347 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
352 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
353 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
354 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
355 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
357 for (i = 0; i < 50; i++) {
360 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
361 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
367 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
372 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
373 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
374 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
376 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
377 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
386 bnx2_disable_int(struct bnx2 *bp)
388 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
389 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
390 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
394 bnx2_enable_int(struct bnx2 *bp)
396 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
397 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
398 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
400 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
401 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
403 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
407 bnx2_disable_int_sync(struct bnx2 *bp)
409 atomic_inc(&bp->intr_sem);
410 bnx2_disable_int(bp);
411 synchronize_irq(bp->pdev->irq);
415 bnx2_netif_stop(struct bnx2 *bp)
417 bnx2_disable_int_sync(bp);
418 if (netif_running(bp->dev)) {
419 netif_poll_disable(bp->dev);
420 netif_tx_disable(bp->dev);
421 bp->dev->trans_start = jiffies; /* prevent tx timeout */
426 bnx2_netif_start(struct bnx2 *bp)
428 if (atomic_dec_and_test(&bp->intr_sem)) {
429 if (netif_running(bp->dev)) {
430 netif_wake_queue(bp->dev);
431 netif_poll_enable(bp->dev);
438 bnx2_free_mem(struct bnx2 *bp)
442 for (i = 0; i < bp->ctx_pages; i++) {
443 if (bp->ctx_blk[i]) {
444 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
446 bp->ctx_blk_mapping[i]);
447 bp->ctx_blk[i] = NULL;
450 if (bp->status_blk) {
451 pci_free_consistent(bp->pdev, bp->status_stats_size,
452 bp->status_blk, bp->status_blk_mapping);
453 bp->status_blk = NULL;
454 bp->stats_blk = NULL;
456 if (bp->tx_desc_ring) {
457 pci_free_consistent(bp->pdev,
458 sizeof(struct tx_bd) * TX_DESC_CNT,
459 bp->tx_desc_ring, bp->tx_desc_mapping);
460 bp->tx_desc_ring = NULL;
462 kfree(bp->tx_buf_ring);
463 bp->tx_buf_ring = NULL;
464 for (i = 0; i < bp->rx_max_ring; i++) {
465 if (bp->rx_desc_ring[i])
466 pci_free_consistent(bp->pdev,
467 sizeof(struct rx_bd) * RX_DESC_CNT,
469 bp->rx_desc_mapping[i]);
470 bp->rx_desc_ring[i] = NULL;
472 vfree(bp->rx_buf_ring);
473 bp->rx_buf_ring = NULL;
477 bnx2_alloc_mem(struct bnx2 *bp)
479 int i, status_blk_size;
481 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
483 if (bp->tx_buf_ring == NULL)
486 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
487 sizeof(struct tx_bd) *
489 &bp->tx_desc_mapping);
490 if (bp->tx_desc_ring == NULL)
493 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
495 if (bp->rx_buf_ring == NULL)
498 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
501 for (i = 0; i < bp->rx_max_ring; i++) {
502 bp->rx_desc_ring[i] =
503 pci_alloc_consistent(bp->pdev,
504 sizeof(struct rx_bd) * RX_DESC_CNT,
505 &bp->rx_desc_mapping[i]);
506 if (bp->rx_desc_ring[i] == NULL)
511 /* Combine status and statistics blocks into one allocation. */
512 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
513 bp->status_stats_size = status_blk_size +
514 sizeof(struct statistics_block);
516 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
517 &bp->status_blk_mapping);
518 if (bp->status_blk == NULL)
521 memset(bp->status_blk, 0, bp->status_stats_size);
523 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
526 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
528 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
529 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
530 if (bp->ctx_pages == 0)
532 for (i = 0; i < bp->ctx_pages; i++) {
533 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
535 &bp->ctx_blk_mapping[i]);
536 if (bp->ctx_blk[i] == NULL)
548 bnx2_report_fw_link(struct bnx2 *bp)
550 u32 fw_link_status = 0;
552 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
558 switch (bp->line_speed) {
560 if (bp->duplex == DUPLEX_HALF)
561 fw_link_status = BNX2_LINK_STATUS_10HALF;
563 fw_link_status = BNX2_LINK_STATUS_10FULL;
566 if (bp->duplex == DUPLEX_HALF)
567 fw_link_status = BNX2_LINK_STATUS_100HALF;
569 fw_link_status = BNX2_LINK_STATUS_100FULL;
572 if (bp->duplex == DUPLEX_HALF)
573 fw_link_status = BNX2_LINK_STATUS_1000HALF;
575 fw_link_status = BNX2_LINK_STATUS_1000FULL;
578 if (bp->duplex == DUPLEX_HALF)
579 fw_link_status = BNX2_LINK_STATUS_2500HALF;
581 fw_link_status = BNX2_LINK_STATUS_2500FULL;
585 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
588 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
590 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
591 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
593 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
594 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
595 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
597 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
601 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
603 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
607 bnx2_xceiver_str(struct bnx2 *bp)
609 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
610 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
615 bnx2_report_link(struct bnx2 *bp)
618 netif_carrier_on(bp->dev);
619 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
620 bnx2_xceiver_str(bp));
622 printk("%d Mbps ", bp->line_speed);
624 if (bp->duplex == DUPLEX_FULL)
625 printk("full duplex");
627 printk("half duplex");
630 if (bp->flow_ctrl & FLOW_CTRL_RX) {
631 printk(", receive ");
632 if (bp->flow_ctrl & FLOW_CTRL_TX)
633 printk("& transmit ");
636 printk(", transmit ");
638 printk("flow control ON");
643 netif_carrier_off(bp->dev);
644 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
645 bnx2_xceiver_str(bp));
648 bnx2_report_fw_link(bp);
652 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
654 u32 local_adv, remote_adv;
657 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
658 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
660 if (bp->duplex == DUPLEX_FULL) {
661 bp->flow_ctrl = bp->req_flow_ctrl;
666 if (bp->duplex != DUPLEX_FULL) {
670 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
671 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
674 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
675 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
676 bp->flow_ctrl |= FLOW_CTRL_TX;
677 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
678 bp->flow_ctrl |= FLOW_CTRL_RX;
682 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
683 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
685 if (bp->phy_flags & PHY_SERDES_FLAG) {
686 u32 new_local_adv = 0;
687 u32 new_remote_adv = 0;
689 if (local_adv & ADVERTISE_1000XPAUSE)
690 new_local_adv |= ADVERTISE_PAUSE_CAP;
691 if (local_adv & ADVERTISE_1000XPSE_ASYM)
692 new_local_adv |= ADVERTISE_PAUSE_ASYM;
693 if (remote_adv & ADVERTISE_1000XPAUSE)
694 new_remote_adv |= ADVERTISE_PAUSE_CAP;
695 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
696 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
698 local_adv = new_local_adv;
699 remote_adv = new_remote_adv;
702 /* See Table 28B-3 of 802.3ab-1999 spec. */
703 if (local_adv & ADVERTISE_PAUSE_CAP) {
704 if(local_adv & ADVERTISE_PAUSE_ASYM) {
705 if (remote_adv & ADVERTISE_PAUSE_CAP) {
706 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
708 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
709 bp->flow_ctrl = FLOW_CTRL_RX;
713 if (remote_adv & ADVERTISE_PAUSE_CAP) {
714 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
718 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
719 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
720 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
722 bp->flow_ctrl = FLOW_CTRL_TX;
728 bnx2_5709s_linkup(struct bnx2 *bp)
734 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
735 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
736 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
738 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
739 bp->line_speed = bp->req_line_speed;
740 bp->duplex = bp->req_duplex;
743 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
745 case MII_BNX2_GP_TOP_AN_SPEED_10:
746 bp->line_speed = SPEED_10;
748 case MII_BNX2_GP_TOP_AN_SPEED_100:
749 bp->line_speed = SPEED_100;
751 case MII_BNX2_GP_TOP_AN_SPEED_1G:
752 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
753 bp->line_speed = SPEED_1000;
755 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
756 bp->line_speed = SPEED_2500;
759 if (val & MII_BNX2_GP_TOP_AN_FD)
760 bp->duplex = DUPLEX_FULL;
762 bp->duplex = DUPLEX_HALF;
767 bnx2_5708s_linkup(struct bnx2 *bp)
772 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
773 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
774 case BCM5708S_1000X_STAT1_SPEED_10:
775 bp->line_speed = SPEED_10;
777 case BCM5708S_1000X_STAT1_SPEED_100:
778 bp->line_speed = SPEED_100;
780 case BCM5708S_1000X_STAT1_SPEED_1G:
781 bp->line_speed = SPEED_1000;
783 case BCM5708S_1000X_STAT1_SPEED_2G5:
784 bp->line_speed = SPEED_2500;
787 if (val & BCM5708S_1000X_STAT1_FD)
788 bp->duplex = DUPLEX_FULL;
790 bp->duplex = DUPLEX_HALF;
796 bnx2_5706s_linkup(struct bnx2 *bp)
798 u32 bmcr, local_adv, remote_adv, common;
801 bp->line_speed = SPEED_1000;
803 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
804 if (bmcr & BMCR_FULLDPLX) {
805 bp->duplex = DUPLEX_FULL;
808 bp->duplex = DUPLEX_HALF;
811 if (!(bmcr & BMCR_ANENABLE)) {
815 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
816 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
818 common = local_adv & remote_adv;
819 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
821 if (common & ADVERTISE_1000XFULL) {
822 bp->duplex = DUPLEX_FULL;
825 bp->duplex = DUPLEX_HALF;
833 bnx2_copper_linkup(struct bnx2 *bp)
837 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
838 if (bmcr & BMCR_ANENABLE) {
839 u32 local_adv, remote_adv, common;
841 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
842 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
844 common = local_adv & (remote_adv >> 2);
845 if (common & ADVERTISE_1000FULL) {
846 bp->line_speed = SPEED_1000;
847 bp->duplex = DUPLEX_FULL;
849 else if (common & ADVERTISE_1000HALF) {
850 bp->line_speed = SPEED_1000;
851 bp->duplex = DUPLEX_HALF;
854 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
855 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
857 common = local_adv & remote_adv;
858 if (common & ADVERTISE_100FULL) {
859 bp->line_speed = SPEED_100;
860 bp->duplex = DUPLEX_FULL;
862 else if (common & ADVERTISE_100HALF) {
863 bp->line_speed = SPEED_100;
864 bp->duplex = DUPLEX_HALF;
866 else if (common & ADVERTISE_10FULL) {
867 bp->line_speed = SPEED_10;
868 bp->duplex = DUPLEX_FULL;
870 else if (common & ADVERTISE_10HALF) {
871 bp->line_speed = SPEED_10;
872 bp->duplex = DUPLEX_HALF;
881 if (bmcr & BMCR_SPEED100) {
882 bp->line_speed = SPEED_100;
885 bp->line_speed = SPEED_10;
887 if (bmcr & BMCR_FULLDPLX) {
888 bp->duplex = DUPLEX_FULL;
891 bp->duplex = DUPLEX_HALF;
899 bnx2_set_mac_link(struct bnx2 *bp)
903 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
904 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
905 (bp->duplex == DUPLEX_HALF)) {
906 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
909 /* Configure the EMAC mode register. */
910 val = REG_RD(bp, BNX2_EMAC_MODE);
912 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
913 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
914 BNX2_EMAC_MODE_25G_MODE);
917 switch (bp->line_speed) {
919 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
920 val |= BNX2_EMAC_MODE_PORT_MII_10M;
925 val |= BNX2_EMAC_MODE_PORT_MII;
928 val |= BNX2_EMAC_MODE_25G_MODE;
931 val |= BNX2_EMAC_MODE_PORT_GMII;
936 val |= BNX2_EMAC_MODE_PORT_GMII;
939 /* Set the MAC to operate in the appropriate duplex mode. */
940 if (bp->duplex == DUPLEX_HALF)
941 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
942 REG_WR(bp, BNX2_EMAC_MODE, val);
944 /* Enable/disable rx PAUSE. */
945 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
947 if (bp->flow_ctrl & FLOW_CTRL_RX)
948 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
949 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
951 /* Enable/disable tx PAUSE. */
952 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
953 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
955 if (bp->flow_ctrl & FLOW_CTRL_TX)
956 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
957 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
959 /* Acknowledge the interrupt. */
960 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
966 bnx2_enable_bmsr1(struct bnx2 *bp)
968 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
969 (CHIP_NUM(bp) == CHIP_NUM_5709))
970 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
971 MII_BNX2_BLK_ADDR_GP_STATUS);
975 bnx2_disable_bmsr1(struct bnx2 *bp)
977 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
978 (CHIP_NUM(bp) == CHIP_NUM_5709))
979 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
980 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
984 bnx2_test_and_enable_2g5(struct bnx2 *bp)
989 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
992 if (bp->autoneg & AUTONEG_SPEED)
993 bp->advertising |= ADVERTISED_2500baseX_Full;
995 if (CHIP_NUM(bp) == CHIP_NUM_5709)
996 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
998 bnx2_read_phy(bp, bp->mii_up1, &up1);
999 if (!(up1 & BCM5708S_UP1_2G5)) {
1000 up1 |= BCM5708S_UP1_2G5;
1001 bnx2_write_phy(bp, bp->mii_up1, up1);
1005 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1006 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1007 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1013 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1018 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1021 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1022 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1024 bnx2_read_phy(bp, bp->mii_up1, &up1);
1025 if (up1 & BCM5708S_UP1_2G5) {
1026 up1 &= ~BCM5708S_UP1_2G5;
1027 bnx2_write_phy(bp, bp->mii_up1, up1);
1031 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1032 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1033 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1039 bnx2_enable_forced_2g5(struct bnx2 *bp)
1043 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1046 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1049 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1050 MII_BNX2_BLK_ADDR_SERDES_DIG);
1051 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1052 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1053 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1054 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1056 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1057 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1058 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1060 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1061 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1062 bmcr |= BCM5708S_BMCR_FORCE_2500;
1065 if (bp->autoneg & AUTONEG_SPEED) {
1066 bmcr &= ~BMCR_ANENABLE;
1067 if (bp->req_duplex == DUPLEX_FULL)
1068 bmcr |= BMCR_FULLDPLX;
1070 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1074 bnx2_disable_forced_2g5(struct bnx2 *bp)
1078 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1081 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1084 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1085 MII_BNX2_BLK_ADDR_SERDES_DIG);
1086 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1087 val &= ~MII_BNX2_SD_MISC1_FORCE;
1088 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1090 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1091 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1092 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1094 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1095 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1096 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1099 if (bp->autoneg & AUTONEG_SPEED)
1100 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1101 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1105 bnx2_set_link(struct bnx2 *bp)
1110 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1115 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1118 link_up = bp->link_up;
1120 bnx2_enable_bmsr1(bp);
1121 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1122 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1123 bnx2_disable_bmsr1(bp);
1125 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1126 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1129 val = REG_RD(bp, BNX2_EMAC_STATUS);
1130 if (val & BNX2_EMAC_STATUS_LINK)
1131 bmsr |= BMSR_LSTATUS;
1133 bmsr &= ~BMSR_LSTATUS;
1136 if (bmsr & BMSR_LSTATUS) {
1139 if (bp->phy_flags & PHY_SERDES_FLAG) {
1140 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1141 bnx2_5706s_linkup(bp);
1142 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1143 bnx2_5708s_linkup(bp);
1144 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1145 bnx2_5709s_linkup(bp);
1148 bnx2_copper_linkup(bp);
1150 bnx2_resolve_flow_ctrl(bp);
1153 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1154 (bp->autoneg & AUTONEG_SPEED))
1155 bnx2_disable_forced_2g5(bp);
1157 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1161 if (bp->link_up != link_up) {
1162 bnx2_report_link(bp);
1165 bnx2_set_mac_link(bp);
1171 bnx2_reset_phy(struct bnx2 *bp)
1176 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1178 #define PHY_RESET_MAX_WAIT 100
1179 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1182 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1183 if (!(reg & BMCR_RESET)) {
1188 if (i == PHY_RESET_MAX_WAIT) {
1195 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1199 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1200 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1202 if (bp->phy_flags & PHY_SERDES_FLAG) {
1203 adv = ADVERTISE_1000XPAUSE;
1206 adv = ADVERTISE_PAUSE_CAP;
1209 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1210 if (bp->phy_flags & PHY_SERDES_FLAG) {
1211 adv = ADVERTISE_1000XPSE_ASYM;
1214 adv = ADVERTISE_PAUSE_ASYM;
1217 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1218 if (bp->phy_flags & PHY_SERDES_FLAG) {
1219 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1222 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1228 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1231 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1233 u32 speed_arg = 0, pause_adv;
1235 pause_adv = bnx2_phy_get_pause_adv(bp);
1237 if (bp->autoneg & AUTONEG_SPEED) {
1238 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1239 if (bp->advertising & ADVERTISED_10baseT_Half)
1240 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1241 if (bp->advertising & ADVERTISED_10baseT_Full)
1242 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1243 if (bp->advertising & ADVERTISED_100baseT_Half)
1244 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1245 if (bp->advertising & ADVERTISED_100baseT_Full)
1246 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1247 if (bp->advertising & ADVERTISED_1000baseT_Full)
1248 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1249 if (bp->advertising & ADVERTISED_2500baseX_Full)
1250 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1252 if (bp->req_line_speed == SPEED_2500)
1253 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1254 else if (bp->req_line_speed == SPEED_1000)
1255 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1256 else if (bp->req_line_speed == SPEED_100) {
1257 if (bp->req_duplex == DUPLEX_FULL)
1258 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1260 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1261 } else if (bp->req_line_speed == SPEED_10) {
1262 if (bp->req_duplex == DUPLEX_FULL)
1263 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1265 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1269 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1270 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1271 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1272 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1274 if (port == PORT_TP)
1275 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1276 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1278 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1280 spin_unlock_bh(&bp->phy_lock);
1281 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1282 spin_lock_bh(&bp->phy_lock);
1288 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1293 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1294 return (bnx2_setup_remote_phy(bp, port));
1296 if (!(bp->autoneg & AUTONEG_SPEED)) {
1298 int force_link_down = 0;
1300 if (bp->req_line_speed == SPEED_2500) {
1301 if (!bnx2_test_and_enable_2g5(bp))
1302 force_link_down = 1;
1303 } else if (bp->req_line_speed == SPEED_1000) {
1304 if (bnx2_test_and_disable_2g5(bp))
1305 force_link_down = 1;
1307 bnx2_read_phy(bp, bp->mii_adv, &adv);
1308 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1310 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1311 new_bmcr = bmcr & ~BMCR_ANENABLE;
1312 new_bmcr |= BMCR_SPEED1000;
1314 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1315 if (bp->req_line_speed == SPEED_2500)
1316 bnx2_enable_forced_2g5(bp);
1317 else if (bp->req_line_speed == SPEED_1000) {
1318 bnx2_disable_forced_2g5(bp);
1319 new_bmcr &= ~0x2000;
1322 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1323 if (bp->req_line_speed == SPEED_2500)
1324 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1326 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1329 if (bp->req_duplex == DUPLEX_FULL) {
1330 adv |= ADVERTISE_1000XFULL;
1331 new_bmcr |= BMCR_FULLDPLX;
1334 adv |= ADVERTISE_1000XHALF;
1335 new_bmcr &= ~BMCR_FULLDPLX;
1337 if ((new_bmcr != bmcr) || (force_link_down)) {
1338 /* Force a link down visible on the other side */
1340 bnx2_write_phy(bp, bp->mii_adv, adv &
1341 ~(ADVERTISE_1000XFULL |
1342 ADVERTISE_1000XHALF));
1343 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1344 BMCR_ANRESTART | BMCR_ANENABLE);
1347 netif_carrier_off(bp->dev);
1348 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1349 bnx2_report_link(bp);
1351 bnx2_write_phy(bp, bp->mii_adv, adv);
1352 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1354 bnx2_resolve_flow_ctrl(bp);
1355 bnx2_set_mac_link(bp);
1360 bnx2_test_and_enable_2g5(bp);
1362 if (bp->advertising & ADVERTISED_1000baseT_Full)
1363 new_adv |= ADVERTISE_1000XFULL;
1365 new_adv |= bnx2_phy_get_pause_adv(bp);
1367 bnx2_read_phy(bp, bp->mii_adv, &adv);
1368 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1370 bp->serdes_an_pending = 0;
1371 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1372 /* Force a link down visible on the other side */
1374 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1375 spin_unlock_bh(&bp->phy_lock);
1377 spin_lock_bh(&bp->phy_lock);
1380 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1381 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1383 /* Speed up link-up time when the link partner
1384 * does not autonegotiate which is very common
1385 * in blade servers. Some blade servers use
1386 * IPMI for kerboard input and it's important
1387 * to minimize link disruptions. Autoneg. involves
1388 * exchanging base pages plus 3 next pages and
1389 * normally completes in about 120 msec.
1391 bp->current_interval = SERDES_AN_TIMEOUT;
1392 bp->serdes_an_pending = 1;
1393 mod_timer(&bp->timer, jiffies + bp->current_interval);
1395 bnx2_resolve_flow_ctrl(bp);
1396 bnx2_set_mac_link(bp);
1402 #define ETHTOOL_ALL_FIBRE_SPEED \
1403 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1404 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1405 (ADVERTISED_1000baseT_Full)
1407 #define ETHTOOL_ALL_COPPER_SPEED \
1408 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1409 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1410 ADVERTISED_1000baseT_Full)
1412 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1413 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1415 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1418 bnx2_set_default_remote_link(struct bnx2 *bp)
1422 if (bp->phy_port == PORT_TP)
1423 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1425 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1427 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1428 bp->req_line_speed = 0;
1429 bp->autoneg |= AUTONEG_SPEED;
1430 bp->advertising = ADVERTISED_Autoneg;
1431 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1432 bp->advertising |= ADVERTISED_10baseT_Half;
1433 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1434 bp->advertising |= ADVERTISED_10baseT_Full;
1435 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1436 bp->advertising |= ADVERTISED_100baseT_Half;
1437 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1438 bp->advertising |= ADVERTISED_100baseT_Full;
1439 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1440 bp->advertising |= ADVERTISED_1000baseT_Full;
1441 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1442 bp->advertising |= ADVERTISED_2500baseX_Full;
1445 bp->advertising = 0;
1446 bp->req_duplex = DUPLEX_FULL;
1447 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1448 bp->req_line_speed = SPEED_10;
1449 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1450 bp->req_duplex = DUPLEX_HALF;
1452 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1453 bp->req_line_speed = SPEED_100;
1454 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1455 bp->req_duplex = DUPLEX_HALF;
1457 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1458 bp->req_line_speed = SPEED_1000;
1459 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1460 bp->req_line_speed = SPEED_2500;
1465 bnx2_set_default_link(struct bnx2 *bp)
1467 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1468 return bnx2_set_default_remote_link(bp);
1470 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1471 bp->req_line_speed = 0;
1472 if (bp->phy_flags & PHY_SERDES_FLAG) {
1475 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1477 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1478 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1479 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1481 bp->req_line_speed = bp->line_speed = SPEED_1000;
1482 bp->req_duplex = DUPLEX_FULL;
1485 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1489 bnx2_send_heart_beat(struct bnx2 *bp)
1494 spin_lock(&bp->indirect_lock);
1495 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1496 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1497 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1498 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1499 spin_unlock(&bp->indirect_lock);
1503 bnx2_remote_phy_event(struct bnx2 *bp)
1506 u8 link_up = bp->link_up;
1509 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1511 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1512 bnx2_send_heart_beat(bp);
1514 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1516 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1522 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1523 bp->duplex = DUPLEX_FULL;
1525 case BNX2_LINK_STATUS_10HALF:
1526 bp->duplex = DUPLEX_HALF;
1527 case BNX2_LINK_STATUS_10FULL:
1528 bp->line_speed = SPEED_10;
1530 case BNX2_LINK_STATUS_100HALF:
1531 bp->duplex = DUPLEX_HALF;
1532 case BNX2_LINK_STATUS_100BASE_T4:
1533 case BNX2_LINK_STATUS_100FULL:
1534 bp->line_speed = SPEED_100;
1536 case BNX2_LINK_STATUS_1000HALF:
1537 bp->duplex = DUPLEX_HALF;
1538 case BNX2_LINK_STATUS_1000FULL:
1539 bp->line_speed = SPEED_1000;
1541 case BNX2_LINK_STATUS_2500HALF:
1542 bp->duplex = DUPLEX_HALF;
1543 case BNX2_LINK_STATUS_2500FULL:
1544 bp->line_speed = SPEED_2500;
1551 spin_lock(&bp->phy_lock);
1553 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1554 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1555 if (bp->duplex == DUPLEX_FULL)
1556 bp->flow_ctrl = bp->req_flow_ctrl;
1558 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1559 bp->flow_ctrl |= FLOW_CTRL_TX;
1560 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1561 bp->flow_ctrl |= FLOW_CTRL_RX;
1564 old_port = bp->phy_port;
1565 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1566 bp->phy_port = PORT_FIBRE;
1568 bp->phy_port = PORT_TP;
1570 if (old_port != bp->phy_port)
1571 bnx2_set_default_link(bp);
1573 spin_unlock(&bp->phy_lock);
1575 if (bp->link_up != link_up)
1576 bnx2_report_link(bp);
1578 bnx2_set_mac_link(bp);
1582 bnx2_set_remote_link(struct bnx2 *bp)
1586 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1588 case BNX2_FW_EVT_CODE_LINK_EVENT:
1589 bnx2_remote_phy_event(bp);
1591 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1593 bnx2_send_heart_beat(bp);
1600 bnx2_setup_copper_phy(struct bnx2 *bp)
1605 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1607 if (bp->autoneg & AUTONEG_SPEED) {
1608 u32 adv_reg, adv1000_reg;
1609 u32 new_adv_reg = 0;
1610 u32 new_adv1000_reg = 0;
1612 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1613 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1614 ADVERTISE_PAUSE_ASYM);
1616 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1617 adv1000_reg &= PHY_ALL_1000_SPEED;
1619 if (bp->advertising & ADVERTISED_10baseT_Half)
1620 new_adv_reg |= ADVERTISE_10HALF;
1621 if (bp->advertising & ADVERTISED_10baseT_Full)
1622 new_adv_reg |= ADVERTISE_10FULL;
1623 if (bp->advertising & ADVERTISED_100baseT_Half)
1624 new_adv_reg |= ADVERTISE_100HALF;
1625 if (bp->advertising & ADVERTISED_100baseT_Full)
1626 new_adv_reg |= ADVERTISE_100FULL;
1627 if (bp->advertising & ADVERTISED_1000baseT_Full)
1628 new_adv1000_reg |= ADVERTISE_1000FULL;
1630 new_adv_reg |= ADVERTISE_CSMA;
1632 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1634 if ((adv1000_reg != new_adv1000_reg) ||
1635 (adv_reg != new_adv_reg) ||
1636 ((bmcr & BMCR_ANENABLE) == 0)) {
1638 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1639 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1640 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1643 else if (bp->link_up) {
1644 /* Flow ctrl may have changed from auto to forced */
1645 /* or vice-versa. */
1647 bnx2_resolve_flow_ctrl(bp);
1648 bnx2_set_mac_link(bp);
1654 if (bp->req_line_speed == SPEED_100) {
1655 new_bmcr |= BMCR_SPEED100;
1657 if (bp->req_duplex == DUPLEX_FULL) {
1658 new_bmcr |= BMCR_FULLDPLX;
1660 if (new_bmcr != bmcr) {
1663 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1664 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1666 if (bmsr & BMSR_LSTATUS) {
1667 /* Force link down */
1668 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1669 spin_unlock_bh(&bp->phy_lock);
1671 spin_lock_bh(&bp->phy_lock);
1673 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1674 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1677 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1679 /* Normally, the new speed is setup after the link has
1680 * gone down and up again. In some cases, link will not go
1681 * down so we need to set up the new speed here.
1683 if (bmsr & BMSR_LSTATUS) {
1684 bp->line_speed = bp->req_line_speed;
1685 bp->duplex = bp->req_duplex;
1686 bnx2_resolve_flow_ctrl(bp);
1687 bnx2_set_mac_link(bp);
1690 bnx2_resolve_flow_ctrl(bp);
1691 bnx2_set_mac_link(bp);
1697 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1699 if (bp->loopback == MAC_LOOPBACK)
1702 if (bp->phy_flags & PHY_SERDES_FLAG) {
1703 return (bnx2_setup_serdes_phy(bp, port));
1706 return (bnx2_setup_copper_phy(bp));
1711 bnx2_init_5709s_phy(struct bnx2 *bp)
1715 bp->mii_bmcr = MII_BMCR + 0x10;
1716 bp->mii_bmsr = MII_BMSR + 0x10;
1717 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1718 bp->mii_adv = MII_ADVERTISE + 0x10;
1719 bp->mii_lpa = MII_LPA + 0x10;
1720 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1722 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1723 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1725 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1728 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1730 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1731 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1732 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1733 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1735 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1736 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1737 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1738 val |= BCM5708S_UP1_2G5;
1740 val &= ~BCM5708S_UP1_2G5;
1741 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1743 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1744 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1745 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1746 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1748 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1750 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1751 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1752 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1754 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1760 bnx2_init_5708s_phy(struct bnx2 *bp)
1766 bp->mii_up1 = BCM5708S_UP1;
1768 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1769 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1770 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1772 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1773 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1774 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1776 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1777 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1778 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1780 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1781 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1782 val |= BCM5708S_UP1_2G5;
1783 bnx2_write_phy(bp, BCM5708S_UP1, val);
1786 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1787 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1788 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1789 /* increase tx signal amplitude */
1790 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1791 BCM5708S_BLK_ADDR_TX_MISC);
1792 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1793 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1794 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1795 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1798 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1799 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1804 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1805 BNX2_SHARED_HW_CFG_CONFIG);
1806 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1807 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1808 BCM5708S_BLK_ADDR_TX_MISC);
1809 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1810 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1811 BCM5708S_BLK_ADDR_DIG);
1818 bnx2_init_5706s_phy(struct bnx2 *bp)
1822 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1824 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1825 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1827 if (bp->dev->mtu > 1500) {
1830 /* Set extended packet length bit */
1831 bnx2_write_phy(bp, 0x18, 0x7);
1832 bnx2_read_phy(bp, 0x18, &val);
1833 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1835 bnx2_write_phy(bp, 0x1c, 0x6c00);
1836 bnx2_read_phy(bp, 0x1c, &val);
1837 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1842 bnx2_write_phy(bp, 0x18, 0x7);
1843 bnx2_read_phy(bp, 0x18, &val);
1844 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1846 bnx2_write_phy(bp, 0x1c, 0x6c00);
1847 bnx2_read_phy(bp, 0x1c, &val);
1848 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1855 bnx2_init_copper_phy(struct bnx2 *bp)
1861 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1862 bnx2_write_phy(bp, 0x18, 0x0c00);
1863 bnx2_write_phy(bp, 0x17, 0x000a);
1864 bnx2_write_phy(bp, 0x15, 0x310b);
1865 bnx2_write_phy(bp, 0x17, 0x201f);
1866 bnx2_write_phy(bp, 0x15, 0x9506);
1867 bnx2_write_phy(bp, 0x17, 0x401f);
1868 bnx2_write_phy(bp, 0x15, 0x14e2);
1869 bnx2_write_phy(bp, 0x18, 0x0400);
1872 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1873 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1874 MII_BNX2_DSP_EXPAND_REG | 0x8);
1875 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1877 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1880 if (bp->dev->mtu > 1500) {
1881 /* Set extended packet length bit */
1882 bnx2_write_phy(bp, 0x18, 0x7);
1883 bnx2_read_phy(bp, 0x18, &val);
1884 bnx2_write_phy(bp, 0x18, val | 0x4000);
1886 bnx2_read_phy(bp, 0x10, &val);
1887 bnx2_write_phy(bp, 0x10, val | 0x1);
1890 bnx2_write_phy(bp, 0x18, 0x7);
1891 bnx2_read_phy(bp, 0x18, &val);
1892 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1894 bnx2_read_phy(bp, 0x10, &val);
1895 bnx2_write_phy(bp, 0x10, val & ~0x1);
1898 /* ethernet@wirespeed */
1899 bnx2_write_phy(bp, 0x18, 0x7007);
1900 bnx2_read_phy(bp, 0x18, &val);
1901 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1907 bnx2_init_phy(struct bnx2 *bp)
1912 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1913 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1915 bp->mii_bmcr = MII_BMCR;
1916 bp->mii_bmsr = MII_BMSR;
1917 bp->mii_bmsr1 = MII_BMSR;
1918 bp->mii_adv = MII_ADVERTISE;
1919 bp->mii_lpa = MII_LPA;
1921 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1923 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1926 bnx2_read_phy(bp, MII_PHYSID1, &val);
1927 bp->phy_id = val << 16;
1928 bnx2_read_phy(bp, MII_PHYSID2, &val);
1929 bp->phy_id |= val & 0xffff;
1931 if (bp->phy_flags & PHY_SERDES_FLAG) {
1932 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1933 rc = bnx2_init_5706s_phy(bp);
1934 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1935 rc = bnx2_init_5708s_phy(bp);
1936 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1937 rc = bnx2_init_5709s_phy(bp);
1940 rc = bnx2_init_copper_phy(bp);
1945 rc = bnx2_setup_phy(bp, bp->phy_port);
1951 bnx2_set_mac_loopback(struct bnx2 *bp)
1955 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1956 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1957 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1958 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1963 static int bnx2_test_link(struct bnx2 *);
1966 bnx2_set_phy_loopback(struct bnx2 *bp)
1971 spin_lock_bh(&bp->phy_lock);
1972 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
1974 spin_unlock_bh(&bp->phy_lock);
1978 for (i = 0; i < 10; i++) {
1979 if (bnx2_test_link(bp) == 0)
1984 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1985 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1986 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1987 BNX2_EMAC_MODE_25G_MODE);
1989 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1990 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1996 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2002 msg_data |= bp->fw_wr_seq;
2004 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2006 /* wait for an acknowledgement. */
2007 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2010 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
2012 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2015 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2018 /* If we timed out, inform the firmware that this is the case. */
2019 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2021 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2024 msg_data &= ~BNX2_DRV_MSG_CODE;
2025 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2027 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2032 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2039 bnx2_init_5709_context(struct bnx2 *bp)
2044 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2045 val |= (BCM_PAGE_BITS - 8) << 16;
2046 REG_WR(bp, BNX2_CTX_COMMAND, val);
2047 for (i = 0; i < 10; i++) {
2048 val = REG_RD(bp, BNX2_CTX_COMMAND);
2049 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2053 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2056 for (i = 0; i < bp->ctx_pages; i++) {
2059 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2060 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2061 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2062 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2063 (u64) bp->ctx_blk_mapping[i] >> 32);
2064 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2065 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2066 for (j = 0; j < 10; j++) {
2068 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2069 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2073 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2082 bnx2_init_context(struct bnx2 *bp)
2088 u32 vcid_addr, pcid_addr, offset;
2093 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2096 vcid_addr = GET_PCID_ADDR(vcid);
2098 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2103 pcid_addr = GET_PCID_ADDR(new_vcid);
2106 vcid_addr = GET_CID_ADDR(vcid);
2107 pcid_addr = vcid_addr;
2110 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2111 vcid_addr += (i << PHY_CTX_SHIFT);
2112 pcid_addr += (i << PHY_CTX_SHIFT);
2114 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
2115 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2117 /* Zero out the context. */
2118 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2119 CTX_WR(bp, 0x00, offset, 0);
2121 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2122 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2128 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2134 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2135 if (good_mbuf == NULL) {
2136 printk(KERN_ERR PFX "Failed to allocate memory in "
2137 "bnx2_alloc_bad_rbuf\n");
2141 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2142 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2146 /* Allocate a bunch of mbufs and save the good ones in an array. */
2147 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2148 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2149 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2151 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2153 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2155 /* The addresses with Bit 9 set are bad memory blocks. */
2156 if (!(val & (1 << 9))) {
2157 good_mbuf[good_mbuf_cnt] = (u16) val;
2161 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2164 /* Free the good ones back to the mbuf pool thus discarding
2165 * all the bad ones. */
2166 while (good_mbuf_cnt) {
2169 val = good_mbuf[good_mbuf_cnt];
2170 val = (val << 9) | val | 1;
2172 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2179 bnx2_set_mac_addr(struct bnx2 *bp)
2182 u8 *mac_addr = bp->dev->dev_addr;
2184 val = (mac_addr[0] << 8) | mac_addr[1];
2186 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2188 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2189 (mac_addr[4] << 8) | mac_addr[5];
2191 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2195 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2197 struct sk_buff *skb;
2198 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2200 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2201 unsigned long align;
2203 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2208 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2209 skb_reserve(skb, BNX2_RX_ALIGN - align);
2211 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2212 PCI_DMA_FROMDEVICE);
2215 pci_unmap_addr_set(rx_buf, mapping, mapping);
2217 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2218 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2220 bp->rx_prod_bseq += bp->rx_buf_use_size;
2226 bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
2228 struct status_block *sblk = bp->status_blk;
2229 u32 new_link_state, old_link_state;
2232 new_link_state = sblk->status_attn_bits & event;
2233 old_link_state = sblk->status_attn_bits_ack & event;
2234 if (new_link_state != old_link_state) {
2236 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2238 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2246 bnx2_phy_int(struct bnx2 *bp)
2248 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2249 spin_lock(&bp->phy_lock);
2251 spin_unlock(&bp->phy_lock);
2253 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2254 bnx2_set_remote_link(bp);
2259 bnx2_tx_int(struct bnx2 *bp)
2261 struct status_block *sblk = bp->status_blk;
2262 u16 hw_cons, sw_cons, sw_ring_cons;
2265 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
2266 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2269 sw_cons = bp->tx_cons;
2271 while (sw_cons != hw_cons) {
2272 struct sw_bd *tx_buf;
2273 struct sk_buff *skb;
2276 sw_ring_cons = TX_RING_IDX(sw_cons);
2278 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2281 /* partial BD completions possible with TSO packets */
2282 if (skb_is_gso(skb)) {
2283 u16 last_idx, last_ring_idx;
2285 last_idx = sw_cons +
2286 skb_shinfo(skb)->nr_frags + 1;
2287 last_ring_idx = sw_ring_cons +
2288 skb_shinfo(skb)->nr_frags + 1;
2289 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2292 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2297 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2298 skb_headlen(skb), PCI_DMA_TODEVICE);
2301 last = skb_shinfo(skb)->nr_frags;
2303 for (i = 0; i < last; i++) {
2304 sw_cons = NEXT_TX_BD(sw_cons);
2306 pci_unmap_page(bp->pdev,
2308 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2310 skb_shinfo(skb)->frags[i].size,
2314 sw_cons = NEXT_TX_BD(sw_cons);
2316 tx_free_bd += last + 1;
2320 hw_cons = bp->hw_tx_cons =
2321 sblk->status_tx_quick_consumer_index0;
2323 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2328 bp->tx_cons = sw_cons;
2329 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2330 * before checking for netif_queue_stopped(). Without the
2331 * memory barrier, there is a small possibility that bnx2_start_xmit()
2332 * will miss it and cause the queue to be stopped forever.
2336 if (unlikely(netif_queue_stopped(bp->dev)) &&
2337 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2338 netif_tx_lock(bp->dev);
2339 if ((netif_queue_stopped(bp->dev)) &&
2340 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2341 netif_wake_queue(bp->dev);
2342 netif_tx_unlock(bp->dev);
2347 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2350 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2351 struct rx_bd *cons_bd, *prod_bd;
2353 cons_rx_buf = &bp->rx_buf_ring[cons];
2354 prod_rx_buf = &bp->rx_buf_ring[prod];
2356 pci_dma_sync_single_for_device(bp->pdev,
2357 pci_unmap_addr(cons_rx_buf, mapping),
2358 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2360 bp->rx_prod_bseq += bp->rx_buf_use_size;
2362 prod_rx_buf->skb = skb;
2367 pci_unmap_addr_set(prod_rx_buf, mapping,
2368 pci_unmap_addr(cons_rx_buf, mapping));
2370 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2371 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2372 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2373 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2377 bnx2_rx_int(struct bnx2 *bp, int budget)
2379 struct status_block *sblk = bp->status_blk;
2380 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2381 struct l2_fhdr *rx_hdr;
2384 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
2385 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2388 sw_cons = bp->rx_cons;
2389 sw_prod = bp->rx_prod;
2391 /* Memory barrier necessary as speculative reads of the rx
2392 * buffer can be ahead of the index in the status block
2395 while (sw_cons != hw_cons) {
2398 struct sw_bd *rx_buf;
2399 struct sk_buff *skb;
2400 dma_addr_t dma_addr;
2402 sw_ring_cons = RX_RING_IDX(sw_cons);
2403 sw_ring_prod = RX_RING_IDX(sw_prod);
2405 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2410 dma_addr = pci_unmap_addr(rx_buf, mapping);
2412 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2413 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2415 rx_hdr = (struct l2_fhdr *) skb->data;
2416 len = rx_hdr->l2_fhdr_pkt_len - 4;
2418 if ((status = rx_hdr->l2_fhdr_status) &
2419 (L2_FHDR_ERRORS_BAD_CRC |
2420 L2_FHDR_ERRORS_PHY_DECODE |
2421 L2_FHDR_ERRORS_ALIGNMENT |
2422 L2_FHDR_ERRORS_TOO_SHORT |
2423 L2_FHDR_ERRORS_GIANT_FRAME)) {
2428 /* Since we don't have a jumbo ring, copy small packets
2431 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2432 struct sk_buff *new_skb;
2434 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2435 if (new_skb == NULL)
2439 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2440 new_skb->data, len + 2);
2441 skb_reserve(new_skb, 2);
2442 skb_put(new_skb, len);
2444 bnx2_reuse_rx_skb(bp, skb,
2445 sw_ring_cons, sw_ring_prod);
2449 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
2450 pci_unmap_single(bp->pdev, dma_addr,
2451 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2453 skb_reserve(skb, bp->rx_offset);
2458 bnx2_reuse_rx_skb(bp, skb,
2459 sw_ring_cons, sw_ring_prod);
2463 skb->protocol = eth_type_trans(skb, bp->dev);
2465 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2466 (ntohs(skb->protocol) != 0x8100)) {
2473 skb->ip_summed = CHECKSUM_NONE;
2475 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2476 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2478 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2479 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2480 skb->ip_summed = CHECKSUM_UNNECESSARY;
2484 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2485 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2486 rx_hdr->l2_fhdr_vlan_tag);
2490 netif_receive_skb(skb);
2492 bp->dev->last_rx = jiffies;
2496 sw_cons = NEXT_RX_BD(sw_cons);
2497 sw_prod = NEXT_RX_BD(sw_prod);
2499 if ((rx_pkt == budget))
2502 /* Refresh hw_cons to see if there is new work */
2503 if (sw_cons == hw_cons) {
2504 hw_cons = bp->hw_rx_cons =
2505 sblk->status_rx_quick_consumer_index0;
2506 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2511 bp->rx_cons = sw_cons;
2512 bp->rx_prod = sw_prod;
2514 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2516 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2524 /* MSI ISR - The only difference between this and the INTx ISR
2525 * is that the MSI interrupt is always serviced.
2528 bnx2_msi(int irq, void *dev_instance)
2530 struct net_device *dev = dev_instance;
2531 struct bnx2 *bp = netdev_priv(dev);
2533 prefetch(bp->status_blk);
2534 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2535 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2536 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2538 /* Return here if interrupt is disabled. */
2539 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2542 netif_rx_schedule(dev);
2548 bnx2_msi_1shot(int irq, void *dev_instance)
2550 struct net_device *dev = dev_instance;
2551 struct bnx2 *bp = netdev_priv(dev);
2553 prefetch(bp->status_blk);
2555 /* Return here if interrupt is disabled. */
2556 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2559 netif_rx_schedule(dev);
2565 bnx2_interrupt(int irq, void *dev_instance)
2567 struct net_device *dev = dev_instance;
2568 struct bnx2 *bp = netdev_priv(dev);
2569 struct status_block *sblk = bp->status_blk;
2571 /* When using INTx, it is possible for the interrupt to arrive
2572 * at the CPU before the status block posted prior to the
2573 * interrupt. Reading a register will flush the status block.
2574 * When using MSI, the MSI message will always complete after
2575 * the status block write.
2577 if ((sblk->status_idx == bp->last_status_idx) &&
2578 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2579 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2582 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2583 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2584 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2586 /* Read back to deassert IRQ immediately to avoid too many
2587 * spurious interrupts.
2589 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2591 /* Return here if interrupt is shared and is disabled. */
2592 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2595 if (netif_rx_schedule_prep(dev)) {
2596 bp->last_status_idx = sblk->status_idx;
2597 __netif_rx_schedule(dev);
2603 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2604 STATUS_ATTN_BITS_TIMER_ABORT)
2607 bnx2_has_work(struct bnx2 *bp)
2609 struct status_block *sblk = bp->status_blk;
2611 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2612 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2615 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2616 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2623 bnx2_poll(struct net_device *dev, int *budget)
2625 struct bnx2 *bp = netdev_priv(dev);
2626 struct status_block *sblk = bp->status_blk;
2627 u32 status_attn_bits = sblk->status_attn_bits;
2628 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2630 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2631 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2635 /* This is needed to take care of transient status
2636 * during link changes.
2638 REG_WR(bp, BNX2_HC_COMMAND,
2639 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2640 REG_RD(bp, BNX2_HC_COMMAND);
2643 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2646 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2647 int orig_budget = *budget;
2650 if (orig_budget > dev->quota)
2651 orig_budget = dev->quota;
2653 work_done = bnx2_rx_int(bp, orig_budget);
2654 *budget -= work_done;
2655 dev->quota -= work_done;
2658 bp->last_status_idx = bp->status_blk->status_idx;
2661 if (!bnx2_has_work(bp)) {
2662 netif_rx_complete(dev);
2663 if (likely(bp->flags & USING_MSI_FLAG)) {
2664 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2665 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2666 bp->last_status_idx);
2669 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2670 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2671 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2672 bp->last_status_idx);
2674 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2675 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2676 bp->last_status_idx);
2683 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2684 * from set_multicast.
2687 bnx2_set_rx_mode(struct net_device *dev)
2689 struct bnx2 *bp = netdev_priv(dev);
2690 u32 rx_mode, sort_mode;
2693 spin_lock_bh(&bp->phy_lock);
2695 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2696 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2697 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2699 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2700 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2702 if (!(bp->flags & ASF_ENABLE_FLAG))
2703 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2705 if (dev->flags & IFF_PROMISC) {
2706 /* Promiscuous mode. */
2707 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2708 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2709 BNX2_RPM_SORT_USER0_PROM_VLAN;
2711 else if (dev->flags & IFF_ALLMULTI) {
2712 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2713 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2716 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2719 /* Accept one or more multicast(s). */
2720 struct dev_mc_list *mclist;
2721 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2726 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2728 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2729 i++, mclist = mclist->next) {
2731 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2733 regidx = (bit & 0xe0) >> 5;
2735 mc_filter[regidx] |= (1 << bit);
2738 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2739 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2743 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2746 if (rx_mode != bp->rx_mode) {
2747 bp->rx_mode = rx_mode;
2748 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2751 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2752 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2753 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2755 spin_unlock_bh(&bp->phy_lock);
2758 #define FW_BUF_SIZE 0x8000
2761 bnx2_gunzip_init(struct bnx2 *bp)
2763 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2766 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2769 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2770 if (bp->strm->workspace == NULL)
2780 vfree(bp->gunzip_buf);
2781 bp->gunzip_buf = NULL;
2784 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2785 "uncompression.\n", bp->dev->name);
2790 bnx2_gunzip_end(struct bnx2 *bp)
2792 kfree(bp->strm->workspace);
2797 if (bp->gunzip_buf) {
2798 vfree(bp->gunzip_buf);
2799 bp->gunzip_buf = NULL;
2804 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2808 /* check gzip header */
2809 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2815 if (zbuf[3] & FNAME)
2816 while ((zbuf[n++] != 0) && (n < len));
2818 bp->strm->next_in = zbuf + n;
2819 bp->strm->avail_in = len - n;
2820 bp->strm->next_out = bp->gunzip_buf;
2821 bp->strm->avail_out = FW_BUF_SIZE;
2823 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2827 rc = zlib_inflate(bp->strm, Z_FINISH);
2829 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2830 *outbuf = bp->gunzip_buf;
2832 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2833 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2834 bp->dev->name, bp->strm->msg);
2836 zlib_inflateEnd(bp->strm);
2838 if (rc == Z_STREAM_END)
2845 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2852 for (i = 0; i < rv2p_code_len; i += 8) {
2853 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2855 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2858 if (rv2p_proc == RV2P_PROC1) {
2859 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2860 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2863 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2864 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2868 /* Reset the processor, un-stall is done later. */
2869 if (rv2p_proc == RV2P_PROC1) {
2870 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2873 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2878 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2885 val = REG_RD_IND(bp, cpu_reg->mode);
2886 val |= cpu_reg->mode_value_halt;
2887 REG_WR_IND(bp, cpu_reg->mode, val);
2888 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2890 /* Load the Text area. */
2891 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2896 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2906 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2907 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2911 /* Load the Data area. */
2912 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2916 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2917 REG_WR_IND(bp, offset, fw->data[j]);
2921 /* Load the SBSS area. */
2922 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2926 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2927 REG_WR_IND(bp, offset, fw->sbss[j]);
2931 /* Load the BSS area. */
2932 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2936 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2937 REG_WR_IND(bp, offset, fw->bss[j]);
2941 /* Load the Read-Only area. */
2942 offset = cpu_reg->spad_base +
2943 (fw->rodata_addr - cpu_reg->mips_view_base);
2947 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2948 REG_WR_IND(bp, offset, fw->rodata[j]);
2952 /* Clear the pre-fetch instruction. */
2953 REG_WR_IND(bp, cpu_reg->inst, 0);
2954 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2956 /* Start the CPU. */
2957 val = REG_RD_IND(bp, cpu_reg->mode);
2958 val &= ~cpu_reg->mode_value_halt;
2959 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2960 REG_WR_IND(bp, cpu_reg->mode, val);
2966 bnx2_init_cpus(struct bnx2 *bp)
2968 struct cpu_reg cpu_reg;
2974 if ((rc = bnx2_gunzip_init(bp)) != 0)
2977 /* Initialize the RV2P processor. */
2978 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2983 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2985 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2990 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2992 /* Initialize the RX Processor. */
2993 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2994 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2995 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2996 cpu_reg.state = BNX2_RXP_CPU_STATE;
2997 cpu_reg.state_value_clear = 0xffffff;
2998 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2999 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3000 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3001 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3002 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3003 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3004 cpu_reg.mips_view_base = 0x8000000;
3006 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3007 fw = &bnx2_rxp_fw_09;
3009 fw = &bnx2_rxp_fw_06;
3011 rc = load_cpu_fw(bp, &cpu_reg, fw);
3015 /* Initialize the TX Processor. */
3016 cpu_reg.mode = BNX2_TXP_CPU_MODE;
3017 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3018 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3019 cpu_reg.state = BNX2_TXP_CPU_STATE;
3020 cpu_reg.state_value_clear = 0xffffff;
3021 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3022 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3023 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3024 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3025 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3026 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3027 cpu_reg.mips_view_base = 0x8000000;
3029 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3030 fw = &bnx2_txp_fw_09;
3032 fw = &bnx2_txp_fw_06;
3034 rc = load_cpu_fw(bp, &cpu_reg, fw);
3038 /* Initialize the TX Patch-up Processor. */
3039 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3040 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3041 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3042 cpu_reg.state = BNX2_TPAT_CPU_STATE;
3043 cpu_reg.state_value_clear = 0xffffff;
3044 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3045 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3046 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3047 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3048 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3049 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3050 cpu_reg.mips_view_base = 0x8000000;
3052 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3053 fw = &bnx2_tpat_fw_09;
3055 fw = &bnx2_tpat_fw_06;
3057 rc = load_cpu_fw(bp, &cpu_reg, fw);
3061 /* Initialize the Completion Processor. */
3062 cpu_reg.mode = BNX2_COM_CPU_MODE;
3063 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3064 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3065 cpu_reg.state = BNX2_COM_CPU_STATE;
3066 cpu_reg.state_value_clear = 0xffffff;
3067 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3068 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3069 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3070 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3071 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3072 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3073 cpu_reg.mips_view_base = 0x8000000;
3075 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3076 fw = &bnx2_com_fw_09;
3078 fw = &bnx2_com_fw_06;
3080 rc = load_cpu_fw(bp, &cpu_reg, fw);
3084 /* Initialize the Command Processor. */
3085 cpu_reg.mode = BNX2_CP_CPU_MODE;
3086 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3087 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3088 cpu_reg.state = BNX2_CP_CPU_STATE;
3089 cpu_reg.state_value_clear = 0xffffff;
3090 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3091 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3092 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3093 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3094 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3095 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3096 cpu_reg.mips_view_base = 0x8000000;
3098 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3099 fw = &bnx2_cp_fw_09;
3101 rc = load_cpu_fw(bp, &cpu_reg, fw);
3106 bnx2_gunzip_end(bp);
3111 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3115 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3121 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3122 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3123 PCI_PM_CTRL_PME_STATUS);
3125 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3126 /* delay required during transition out of D3hot */
3129 val = REG_RD(bp, BNX2_EMAC_MODE);
3130 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3131 val &= ~BNX2_EMAC_MODE_MPKT;
3132 REG_WR(bp, BNX2_EMAC_MODE, val);
3134 val = REG_RD(bp, BNX2_RPM_CONFIG);
3135 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3136 REG_WR(bp, BNX2_RPM_CONFIG, val);
3147 autoneg = bp->autoneg;
3148 advertising = bp->advertising;
3150 bp->autoneg = AUTONEG_SPEED;
3151 bp->advertising = ADVERTISED_10baseT_Half |
3152 ADVERTISED_10baseT_Full |
3153 ADVERTISED_100baseT_Half |
3154 ADVERTISED_100baseT_Full |
3157 bnx2_setup_copper_phy(bp);
3159 bp->autoneg = autoneg;
3160 bp->advertising = advertising;
3162 bnx2_set_mac_addr(bp);
3164 val = REG_RD(bp, BNX2_EMAC_MODE);
3166 /* Enable port mode. */
3167 val &= ~BNX2_EMAC_MODE_PORT;
3168 val |= BNX2_EMAC_MODE_PORT_MII |
3169 BNX2_EMAC_MODE_MPKT_RCVD |
3170 BNX2_EMAC_MODE_ACPI_RCVD |
3171 BNX2_EMAC_MODE_MPKT;
3173 REG_WR(bp, BNX2_EMAC_MODE, val);
3175 /* receive all multicast */
3176 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3177 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3180 REG_WR(bp, BNX2_EMAC_RX_MODE,
3181 BNX2_EMAC_RX_MODE_SORT_MODE);
3183 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3184 BNX2_RPM_SORT_USER0_MC_EN;
3185 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3186 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3187 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3188 BNX2_RPM_SORT_USER0_ENA);
3190 /* Need to enable EMAC and RPM for WOL. */
3191 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3192 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3193 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3194 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3196 val = REG_RD(bp, BNX2_RPM_CONFIG);
3197 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3198 REG_WR(bp, BNX2_RPM_CONFIG, val);
3200 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3203 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3206 if (!(bp->flags & NO_WOL_FLAG))
3207 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3209 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3210 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3211 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3220 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3222 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3225 /* No more memory access after this point until
3226 * device is brought back to D0.
3238 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3243 /* Request access to the flash interface. */
3244 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3245 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3246 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3247 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3253 if (j >= NVRAM_TIMEOUT_COUNT)
3260 bnx2_release_nvram_lock(struct bnx2 *bp)
3265 /* Relinquish nvram interface. */
3266 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3268 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3269 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3270 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3276 if (j >= NVRAM_TIMEOUT_COUNT)
3284 bnx2_enable_nvram_write(struct bnx2 *bp)
3288 val = REG_RD(bp, BNX2_MISC_CFG);
3289 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3291 if (!bp->flash_info->buffered) {
3294 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3295 REG_WR(bp, BNX2_NVM_COMMAND,
3296 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3298 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3301 val = REG_RD(bp, BNX2_NVM_COMMAND);
3302 if (val & BNX2_NVM_COMMAND_DONE)
3306 if (j >= NVRAM_TIMEOUT_COUNT)
3313 bnx2_disable_nvram_write(struct bnx2 *bp)
3317 val = REG_RD(bp, BNX2_MISC_CFG);
3318 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3323 bnx2_enable_nvram_access(struct bnx2 *bp)
3327 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3328 /* Enable both bits, even on read. */
3329 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3330 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3334 bnx2_disable_nvram_access(struct bnx2 *bp)
3338 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3339 /* Disable both bits, even after read. */
3340 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3341 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3342 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3346 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3351 if (bp->flash_info->buffered)
3352 /* Buffered flash, no erase needed */
3355 /* Build an erase command */
3356 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3357 BNX2_NVM_COMMAND_DOIT;
3359 /* Need to clear DONE bit separately. */
3360 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3362 /* Address of the NVRAM to read from. */
3363 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3365 /* Issue an erase command. */
3366 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3368 /* Wait for completion. */
3369 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3374 val = REG_RD(bp, BNX2_NVM_COMMAND);
3375 if (val & BNX2_NVM_COMMAND_DONE)
3379 if (j >= NVRAM_TIMEOUT_COUNT)
3386 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3391 /* Build the command word. */
3392 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3394 /* Calculate an offset of a buffered flash. */
3395 if (bp->flash_info->buffered) {
3396 offset = ((offset / bp->flash_info->page_size) <<
3397 bp->flash_info->page_bits) +
3398 (offset % bp->flash_info->page_size);
3401 /* Need to clear DONE bit separately. */
3402 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3404 /* Address of the NVRAM to read from. */
3405 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3407 /* Issue a read command. */
3408 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3410 /* Wait for completion. */
3411 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3416 val = REG_RD(bp, BNX2_NVM_COMMAND);
3417 if (val & BNX2_NVM_COMMAND_DONE) {
3418 val = REG_RD(bp, BNX2_NVM_READ);
3420 val = be32_to_cpu(val);
3421 memcpy(ret_val, &val, 4);
3425 if (j >= NVRAM_TIMEOUT_COUNT)
3433 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3438 /* Build the command word. */
3439 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3441 /* Calculate an offset of a buffered flash. */
3442 if (bp->flash_info->buffered) {
3443 offset = ((offset / bp->flash_info->page_size) <<
3444 bp->flash_info->page_bits) +
3445 (offset % bp->flash_info->page_size);
3448 /* Need to clear DONE bit separately. */
3449 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3451 memcpy(&val32, val, 4);
3452 val32 = cpu_to_be32(val32);
3454 /* Write the data. */
3455 REG_WR(bp, BNX2_NVM_WRITE, val32);
3457 /* Address of the NVRAM to write to. */
3458 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3460 /* Issue the write command. */
3461 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3463 /* Wait for completion. */
3464 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3467 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3470 if (j >= NVRAM_TIMEOUT_COUNT)
3477 bnx2_init_nvram(struct bnx2 *bp)
3480 int j, entry_count, rc;
3481 struct flash_spec *flash;
3483 /* Determine the selected interface. */
3484 val = REG_RD(bp, BNX2_NVM_CFG1);
3486 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
3489 if (val & 0x40000000) {
3491 /* Flash interface has been reconfigured */
3492 for (j = 0, flash = &flash_table[0]; j < entry_count;
3494 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3495 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3496 bp->flash_info = flash;
3503 /* Not yet been reconfigured */
3505 if (val & (1 << 23))
3506 mask = FLASH_BACKUP_STRAP_MASK;
3508 mask = FLASH_STRAP_MASK;
3510 for (j = 0, flash = &flash_table[0]; j < entry_count;
3513 if ((val & mask) == (flash->strapping & mask)) {
3514 bp->flash_info = flash;
3516 /* Request access to the flash interface. */
3517 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3520 /* Enable access to flash interface */
3521 bnx2_enable_nvram_access(bp);
3523 /* Reconfigure the flash interface */
3524 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3525 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3526 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3527 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3529 /* Disable access to flash interface */
3530 bnx2_disable_nvram_access(bp);
3531 bnx2_release_nvram_lock(bp);
3536 } /* if (val & 0x40000000) */
3538 if (j == entry_count) {
3539 bp->flash_info = NULL;
3540 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3544 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3545 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3547 bp->flash_size = val;
3549 bp->flash_size = bp->flash_info->total_size;
3555 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3559 u32 cmd_flags, offset32, len32, extra;
3564 /* Request access to the flash interface. */
3565 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3568 /* Enable access to flash interface */
3569 bnx2_enable_nvram_access(bp);
3582 pre_len = 4 - (offset & 3);
3584 if (pre_len >= len32) {
3586 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3587 BNX2_NVM_COMMAND_LAST;
3590 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3593 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3598 memcpy(ret_buf, buf + (offset & 3), pre_len);
3605 extra = 4 - (len32 & 3);
3606 len32 = (len32 + 4) & ~3;
3613 cmd_flags = BNX2_NVM_COMMAND_LAST;
3615 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3616 BNX2_NVM_COMMAND_LAST;
3618 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3620 memcpy(ret_buf, buf, 4 - extra);
3622 else if (len32 > 0) {
3625 /* Read the first word. */
3629 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3631 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3633 /* Advance to the next dword. */
3638 while (len32 > 4 && rc == 0) {
3639 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3641 /* Advance to the next dword. */
3650 cmd_flags = BNX2_NVM_COMMAND_LAST;
3651 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3653 memcpy(ret_buf, buf, 4 - extra);
3656 /* Disable access to flash interface */
3657 bnx2_disable_nvram_access(bp);
3659 bnx2_release_nvram_lock(bp);
3665 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3668 u32 written, offset32, len32;
3669 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3671 int align_start, align_end;
3676 align_start = align_end = 0;
3678 if ((align_start = (offset32 & 3))) {
3680 len32 += align_start;
3683 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3688 align_end = 4 - (len32 & 3);
3690 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3694 if (align_start || align_end) {
3695 align_buf = kmalloc(len32, GFP_KERNEL);
3696 if (align_buf == NULL)
3699 memcpy(align_buf, start, 4);
3702 memcpy(align_buf + len32 - 4, end, 4);
3704 memcpy(align_buf + align_start, data_buf, buf_size);
3708 if (bp->flash_info->buffered == 0) {
3709 flash_buffer = kmalloc(264, GFP_KERNEL);
3710 if (flash_buffer == NULL) {
3712 goto nvram_write_end;
3717 while ((written < len32) && (rc == 0)) {
3718 u32 page_start, page_end, data_start, data_end;
3719 u32 addr, cmd_flags;
3722 /* Find the page_start addr */
3723 page_start = offset32 + written;
3724 page_start -= (page_start % bp->flash_info->page_size);
3725 /* Find the page_end addr */
3726 page_end = page_start + bp->flash_info->page_size;
3727 /* Find the data_start addr */
3728 data_start = (written == 0) ? offset32 : page_start;
3729 /* Find the data_end addr */
3730 data_end = (page_end > offset32 + len32) ?
3731 (offset32 + len32) : page_end;
3733 /* Request access to the flash interface. */
3734 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3735 goto nvram_write_end;
3737 /* Enable access to flash interface */
3738 bnx2_enable_nvram_access(bp);
3740 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3741 if (bp->flash_info->buffered == 0) {
3744 /* Read the whole page into the buffer
3745 * (non-buffer flash only) */
3746 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3747 if (j == (bp->flash_info->page_size - 4)) {
3748 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3750 rc = bnx2_nvram_read_dword(bp,
3756 goto nvram_write_end;
3762 /* Enable writes to flash interface (unlock write-protect) */
3763 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3764 goto nvram_write_end;
3766 /* Loop to write back the buffer data from page_start to
3769 if (bp->flash_info->buffered == 0) {
3770 /* Erase the page */
3771 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3772 goto nvram_write_end;
3774 /* Re-enable the write again for the actual write */
3775 bnx2_enable_nvram_write(bp);
3777 for (addr = page_start; addr < data_start;
3778 addr += 4, i += 4) {
3780 rc = bnx2_nvram_write_dword(bp, addr,
3781 &flash_buffer[i], cmd_flags);
3784 goto nvram_write_end;
3790 /* Loop to write the new data from data_start to data_end */
3791 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3792 if ((addr == page_end - 4) ||
3793 ((bp->flash_info->buffered) &&
3794 (addr == data_end - 4))) {
3796 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3798 rc = bnx2_nvram_write_dword(bp, addr, buf,
3802 goto nvram_write_end;
3808 /* Loop to write back the buffer data from data_end
3810 if (bp->flash_info->buffered == 0) {
3811 for (addr = data_end; addr < page_end;
3812 addr += 4, i += 4) {
3814 if (addr == page_end-4) {
3815 cmd_flags = BNX2_NVM_COMMAND_LAST;
3817 rc = bnx2_nvram_write_dword(bp, addr,
3818 &flash_buffer[i], cmd_flags);
3821 goto nvram_write_end;
3827 /* Disable writes to flash interface (lock write-protect) */
3828 bnx2_disable_nvram_write(bp);
3830 /* Disable access to flash interface */
3831 bnx2_disable_nvram_access(bp);
3832 bnx2_release_nvram_lock(bp);
3834 /* Increment written */
3835 written += data_end - data_start;
3839 kfree(flash_buffer);
3845 bnx2_init_remote_phy(struct bnx2 *bp)
3849 bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
3850 if (!(bp->phy_flags & PHY_SERDES_FLAG))
3853 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
3854 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
3857 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
3858 if (netif_running(bp->dev)) {
3859 val = BNX2_DRV_ACK_CAP_SIGNATURE |
3860 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
3861 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
3864 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
3866 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
3867 if (val & BNX2_LINK_STATUS_SERDES_LINK)
3868 bp->phy_port = PORT_FIBRE;
3870 bp->phy_port = PORT_TP;
3875 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3880 /* Wait for the current PCI transaction to complete before
3881 * issuing a reset. */
3882 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3883 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3884 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3885 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3886 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3887 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3890 /* Wait for the firmware to tell us it is ok to issue a reset. */
3891 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3893 /* Deposit a driver reset signature so the firmware knows that
3894 * this is a soft reset. */
3895 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3896 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3898 /* Do a dummy read to force the chip to complete all current transaction
3899 * before we issue a reset. */
3900 val = REG_RD(bp, BNX2_MISC_ID);
3902 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3903 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3904 REG_RD(bp, BNX2_MISC_COMMAND);
3907 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3908 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3910 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3913 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3914 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3915 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3918 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3920 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3921 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3922 current->state = TASK_UNINTERRUPTIBLE;
3923 schedule_timeout(HZ / 50);
3926 /* Reset takes approximate 30 usec */
3927 for (i = 0; i < 10; i++) {
3928 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3929 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3930 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3935 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3936 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3937 printk(KERN_ERR PFX "Chip reset did not complete\n");
3942 /* Make sure byte swapping is properly configured. */
3943 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3944 if (val != 0x01020304) {
3945 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3949 /* Wait for the firmware to finish its initialization. */
3950 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3954 spin_lock_bh(&bp->phy_lock);
3955 bnx2_init_remote_phy(bp);
3956 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
3957 bnx2_set_default_remote_link(bp);
3958 spin_unlock_bh(&bp->phy_lock);
3960 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3961 /* Adjust the voltage regular to two steps lower. The default
3962 * of this register is 0x0000000e. */
3963 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3965 /* Remove bad rbuf memory from the free pool. */
3966 rc = bnx2_alloc_bad_rbuf(bp);
3973 bnx2_init_chip(struct bnx2 *bp)
3978 /* Make sure the interrupt is not active. */
3979 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3981 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3982 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3984 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3986 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3987 DMA_READ_CHANS << 12 |
3988 DMA_WRITE_CHANS << 16;
3990 val |= (0x2 << 20) | (1 << 11);
3992 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3995 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3996 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3997 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3999 REG_WR(bp, BNX2_DMA_CONFIG, val);
4001 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4002 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4003 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4004 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4007 if (bp->flags & PCIX_FLAG) {
4010 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4012 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4013 val16 & ~PCI_X_CMD_ERO);
4016 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4017 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4018 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4019 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4021 /* Initialize context mapping and zero out the quick contexts. The
4022 * context block must have already been enabled. */
4023 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4024 rc = bnx2_init_5709_context(bp);
4028 bnx2_init_context(bp);
4030 if ((rc = bnx2_init_cpus(bp)) != 0)
4033 bnx2_init_nvram(bp);
4035 bnx2_set_mac_addr(bp);
4037 val = REG_RD(bp, BNX2_MQ_CONFIG);
4038 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4039 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4040 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4041 val |= BNX2_MQ_CONFIG_HALT_DIS;
4043 REG_WR(bp, BNX2_MQ_CONFIG, val);
4045 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4046 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4047 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4049 val = (BCM_PAGE_BITS - 8) << 24;
4050 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4052 /* Configure page size. */
4053 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4054 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4055 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4056 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4058 val = bp->mac_addr[0] +
4059 (bp->mac_addr[1] << 8) +
4060 (bp->mac_addr[2] << 16) +
4062 (bp->mac_addr[4] << 8) +
4063 (bp->mac_addr[5] << 16);
4064 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4066 /* Program the MTU. Also include 4 bytes for CRC32. */
4067 val = bp->dev->mtu + ETH_HLEN + 4;
4068 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4069 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4070 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4072 bp->last_status_idx = 0;
4073 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4075 /* Set up how to generate a link change interrupt. */
4076 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4078 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4079 (u64) bp->status_blk_mapping & 0xffffffff);
4080 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4082 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4083 (u64) bp->stats_blk_mapping & 0xffffffff);
4084 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4085 (u64) bp->stats_blk_mapping >> 32);
4087 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4088 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4090 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4091 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4093 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4094 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4096 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4098 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4100 REG_WR(bp, BNX2_HC_COM_TICKS,
4101 (bp->com_ticks_int << 16) | bp->com_ticks);
4103 REG_WR(bp, BNX2_HC_CMD_TICKS,
4104 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4106 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4107 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4109 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
4110 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4112 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4113 val = BNX2_HC_CONFIG_COLLECT_STATS;
4115 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4116 BNX2_HC_CONFIG_COLLECT_STATS;
4119 if (bp->flags & ONE_SHOT_MSI_FLAG)
4120 val |= BNX2_HC_CONFIG_ONE_SHOT;
4122 REG_WR(bp, BNX2_HC_CONFIG, val);
4124 /* Clear internal stats counters. */
4125 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4127 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4129 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
4130 BNX2_PORT_FEATURE_ASF_ENABLED)
4131 bp->flags |= ASF_ENABLE_FLAG;
4133 /* Initialize the receive filter. */
4134 bnx2_set_rx_mode(bp->dev);
4136 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4137 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4138 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4139 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4141 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4144 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4145 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4149 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4155 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4157 u32 val, offset0, offset1, offset2, offset3;
4159 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4160 offset0 = BNX2_L2CTX_TYPE_XI;
4161 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4162 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4163 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4165 offset0 = BNX2_L2CTX_TYPE;
4166 offset1 = BNX2_L2CTX_CMD_TYPE;
4167 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4168 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4170 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4171 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4173 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4174 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4176 val = (u64) bp->tx_desc_mapping >> 32;
4177 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4179 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4180 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4184 bnx2_init_tx_ring(struct bnx2 *bp)
4189 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4191 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4193 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4194 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4199 bp->tx_prod_bseq = 0;
4202 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4203 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4205 bnx2_init_tx_context(bp, cid);
4209 bnx2_init_rx_ring(struct bnx2 *bp)
4213 u16 prod, ring_prod;
4216 /* 8 for CRC and VLAN */
4217 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4219 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4221 ring_prod = prod = bp->rx_prod = 0;
4224 bp->rx_prod_bseq = 0;
4226 for (i = 0; i < bp->rx_max_ring; i++) {
4229 rxbd = &bp->rx_desc_ring[i][0];
4230 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4231 rxbd->rx_bd_len = bp->rx_buf_use_size;
4232 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4234 if (i == (bp->rx_max_ring - 1))
4238 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
4239 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
4243 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4244 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4246 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
4248 val = (u64) bp->rx_desc_mapping[0] >> 32;
4249 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
4251 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4252 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
4254 for (i = 0; i < bp->rx_ring_size; i++) {
4255 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4258 prod = NEXT_RX_BD(prod);
4259 ring_prod = RX_RING_IDX(prod);
4263 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4265 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4269 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4273 bp->rx_ring_size = size;
4275 while (size > MAX_RX_DESC_CNT) {
4276 size -= MAX_RX_DESC_CNT;
4279 /* round to next power of 2 */
4281 while ((max & num_rings) == 0)
4284 if (num_rings != max)
4287 bp->rx_max_ring = max;
4288 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4292 bnx2_free_tx_skbs(struct bnx2 *bp)
4296 if (bp->tx_buf_ring == NULL)
4299 for (i = 0; i < TX_DESC_CNT; ) {
4300 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4301 struct sk_buff *skb = tx_buf->skb;
4309 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4310 skb_headlen(skb), PCI_DMA_TODEVICE);
4314 last = skb_shinfo(skb)->nr_frags;
4315 for (j = 0; j < last; j++) {
4316 tx_buf = &bp->tx_buf_ring[i + j + 1];
4317 pci_unmap_page(bp->pdev,
4318 pci_unmap_addr(tx_buf, mapping),
4319 skb_shinfo(skb)->frags[j].size,
4329 bnx2_free_rx_skbs(struct bnx2 *bp)
4333 if (bp->rx_buf_ring == NULL)
4336 for (i = 0; i < bp->rx_max_ring_idx; i++) {
4337 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4338 struct sk_buff *skb = rx_buf->skb;
4343 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4344 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4353 bnx2_free_skbs(struct bnx2 *bp)
4355 bnx2_free_tx_skbs(bp);
4356 bnx2_free_rx_skbs(bp);
4360 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4364 rc = bnx2_reset_chip(bp, reset_code);
4369 if ((rc = bnx2_init_chip(bp)) != 0)
4372 bnx2_init_tx_ring(bp);
4373 bnx2_init_rx_ring(bp);
4378 bnx2_init_nic(struct bnx2 *bp)
4382 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4385 spin_lock_bh(&bp->phy_lock);
4388 spin_unlock_bh(&bp->phy_lock);
4393 bnx2_test_registers(struct bnx2 *bp)
4397 static const struct {
4400 #define BNX2_FL_NOT_5709 1
4404 { 0x006c, 0, 0x00000000, 0x0000003f },
4405 { 0x0090, 0, 0xffffffff, 0x00000000 },
4406 { 0x0094, 0, 0x00000000, 0x00000000 },
4408 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4409 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4410 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4411 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4412 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4413 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4414 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4415 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4416 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4418 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4419 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4420 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4421 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4422 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4423 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4425 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4426 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4427 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
4429 { 0x1000, 0, 0x00000000, 0x00000001 },
4430 { 0x1004, 0, 0x00000000, 0x000f0001 },
4432 { 0x1408, 0, 0x01c00800, 0x00000000 },
4433 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4434 { 0x14a8, 0, 0x00000000, 0x000001ff },
4435 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4436 { 0x14b0, 0, 0x00000002, 0x00000001 },
4437 { 0x14b8, 0, 0x00000000, 0x00000000 },
4438 { 0x14c0, 0, 0x00000000, 0x00000009 },
4439 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4440 { 0x14cc, 0, 0x00000000, 0x00000001 },
4441 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4443 { 0x1800, 0, 0x00000000, 0x00000001 },
4444 { 0x1804, 0, 0x00000000, 0x00000003 },
4446 { 0x2800, 0, 0x00000000, 0x00000001 },
4447 { 0x2804, 0, 0x00000000, 0x00003f01 },
4448 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4449 { 0x2810, 0, 0xffff0000, 0x00000000 },
4450 { 0x2814, 0, 0xffff0000, 0x00000000 },
4451 { 0x2818, 0, 0xffff0000, 0x00000000 },
4452 { 0x281c, 0, 0xffff0000, 0x00000000 },
4453 { 0x2834, 0, 0xffffffff, 0x00000000 },
4454 { 0x2840, 0, 0x00000000, 0xffffffff },
4455 { 0x2844, 0, 0x00000000, 0xffffffff },
4456 { 0x2848, 0, 0xffffffff, 0x00000000 },
4457 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4459 { 0x2c00, 0, 0x00000000, 0x00000011 },
4460 { 0x2c04, 0, 0x00000000, 0x00030007 },
4462 { 0x3c00, 0, 0x00000000, 0x00000001 },
4463 { 0x3c04, 0, 0x00000000, 0x00070000 },
4464 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4465 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4466 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4467 { 0x3c14, 0, 0x00000000, 0xffffffff },
4468 { 0x3c18, 0, 0x00000000, 0xffffffff },
4469 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4470 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4472 { 0x5004, 0, 0x00000000, 0x0000007f },
4473 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4475 { 0x5c00, 0, 0x00000000, 0x00000001 },
4476 { 0x5c04, 0, 0x00000000, 0x0003000f },
4477 { 0x5c08, 0, 0x00000003, 0x00000000 },
4478 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4479 { 0x5c10, 0, 0x00000000, 0xffffffff },
4480 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4481 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4482 { 0x5c88, 0, 0x00000000, 0x00077373 },
4483 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4485 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4486 { 0x680c, 0, 0xffffffff, 0x00000000 },
4487 { 0x6810, 0, 0xffffffff, 0x00000000 },
4488 { 0x6814, 0, 0xffffffff, 0x00000000 },
4489 { 0x6818, 0, 0xffffffff, 0x00000000 },
4490 { 0x681c, 0, 0xffffffff, 0x00000000 },
4491 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4492 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4493 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4494 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4495 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4496 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4497 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4498 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4499 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4500 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4501 { 0x684c, 0, 0xffffffff, 0x00000000 },
4502 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4503 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4504 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4505 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4506 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4507 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4509 { 0xffff, 0, 0x00000000, 0x00000000 },
4514 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4517 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4518 u32 offset, rw_mask, ro_mask, save_val, val;
4519 u16 flags = reg_tbl[i].flags;
4521 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4524 offset = (u32) reg_tbl[i].offset;
4525 rw_mask = reg_tbl[i].rw_mask;
4526 ro_mask = reg_tbl[i].ro_mask;
4528 save_val = readl(bp->regview + offset);
4530 writel(0, bp->regview + offset);
4532 val = readl(bp->regview + offset);
4533 if ((val & rw_mask) != 0) {
4537 if ((val & ro_mask) != (save_val & ro_mask)) {
4541 writel(0xffffffff, bp->regview + offset);
4543 val = readl(bp->regview + offset);
4544 if ((val & rw_mask) != rw_mask) {
4548 if ((val & ro_mask) != (save_val & ro_mask)) {
4552 writel(save_val, bp->regview + offset);
4556 writel(save_val, bp->regview + offset);
4564 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4566 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4567 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4570 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4573 for (offset = 0; offset < size; offset += 4) {
4575 REG_WR_IND(bp, start + offset, test_pattern[i]);
4577 if (REG_RD_IND(bp, start + offset) !=
4587 bnx2_test_memory(struct bnx2 *bp)
4591 static struct mem_entry {
4594 } mem_tbl_5706[] = {
4595 { 0x60000, 0x4000 },
4596 { 0xa0000, 0x3000 },
4597 { 0xe0000, 0x4000 },
4598 { 0x120000, 0x4000 },
4599 { 0x1a0000, 0x4000 },
4600 { 0x160000, 0x4000 },
4604 { 0x60000, 0x4000 },
4605 { 0xa0000, 0x3000 },
4606 { 0xe0000, 0x4000 },
4607 { 0x120000, 0x4000 },
4608 { 0x1a0000, 0x4000 },
4611 struct mem_entry *mem_tbl;
4613 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4614 mem_tbl = mem_tbl_5709;
4616 mem_tbl = mem_tbl_5706;
4618 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4619 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4620 mem_tbl[i].len)) != 0) {
4628 #define BNX2_MAC_LOOPBACK 0
4629 #define BNX2_PHY_LOOPBACK 1
4632 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4634 unsigned int pkt_size, num_pkts, i;
4635 struct sk_buff *skb, *rx_skb;
4636 unsigned char *packet;
4637 u16 rx_start_idx, rx_idx;
4640 struct sw_bd *rx_buf;
4641 struct l2_fhdr *rx_hdr;
4644 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4645 bp->loopback = MAC_LOOPBACK;
4646 bnx2_set_mac_loopback(bp);
4648 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4649 bp->loopback = PHY_LOOPBACK;
4650 bnx2_set_phy_loopback(bp);
4656 skb = netdev_alloc_skb(bp->dev, pkt_size);
4659 packet = skb_put(skb, pkt_size);
4660 memcpy(packet, bp->dev->dev_addr, 6);
4661 memset(packet + 6, 0x0, 8);
4662 for (i = 14; i < pkt_size; i++)
4663 packet[i] = (unsigned char) (i & 0xff);
4665 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4668 REG_WR(bp, BNX2_HC_COMMAND,
4669 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4671 REG_RD(bp, BNX2_HC_COMMAND);
4674 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4678 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4680 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4681 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4682 txbd->tx_bd_mss_nbytes = pkt_size;
4683 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4686 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4687 bp->tx_prod_bseq += pkt_size;
4689 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4690 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4694 REG_WR(bp, BNX2_HC_COMMAND,
4695 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4697 REG_RD(bp, BNX2_HC_COMMAND);
4701 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4704 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4705 goto loopback_test_done;
4708 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4709 if (rx_idx != rx_start_idx + num_pkts) {
4710 goto loopback_test_done;
4713 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4714 rx_skb = rx_buf->skb;
4716 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4717 skb_reserve(rx_skb, bp->rx_offset);
4719 pci_dma_sync_single_for_cpu(bp->pdev,
4720 pci_unmap_addr(rx_buf, mapping),
4721 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4723 if (rx_hdr->l2_fhdr_status &
4724 (L2_FHDR_ERRORS_BAD_CRC |
4725 L2_FHDR_ERRORS_PHY_DECODE |
4726 L2_FHDR_ERRORS_ALIGNMENT |
4727 L2_FHDR_ERRORS_TOO_SHORT |
4728 L2_FHDR_ERRORS_GIANT_FRAME)) {
4730 goto loopback_test_done;
4733 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4734 goto loopback_test_done;
4737 for (i = 14; i < pkt_size; i++) {
4738 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4739 goto loopback_test_done;
4750 #define BNX2_MAC_LOOPBACK_FAILED 1
4751 #define BNX2_PHY_LOOPBACK_FAILED 2
4752 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4753 BNX2_PHY_LOOPBACK_FAILED)
4756 bnx2_test_loopback(struct bnx2 *bp)
4760 if (!netif_running(bp->dev))
4761 return BNX2_LOOPBACK_FAILED;
4763 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4764 spin_lock_bh(&bp->phy_lock);
4766 spin_unlock_bh(&bp->phy_lock);
4767 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4768 rc |= BNX2_MAC_LOOPBACK_FAILED;
4769 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4770 rc |= BNX2_PHY_LOOPBACK_FAILED;
4774 #define NVRAM_SIZE 0x200
4775 #define CRC32_RESIDUAL 0xdebb20e3
4778 bnx2_test_nvram(struct bnx2 *bp)
4780 u32 buf[NVRAM_SIZE / 4];
4781 u8 *data = (u8 *) buf;
4785 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4786 goto test_nvram_done;
4788 magic = be32_to_cpu(buf[0]);
4789 if (magic != 0x669955aa) {
4791 goto test_nvram_done;
4794 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4795 goto test_nvram_done;
4797 csum = ether_crc_le(0x100, data);
4798 if (csum != CRC32_RESIDUAL) {
4800 goto test_nvram_done;
4803 csum = ether_crc_le(0x100, data + 0x100);
4804 if (csum != CRC32_RESIDUAL) {
4813 bnx2_test_link(struct bnx2 *bp)
4817 spin_lock_bh(&bp->phy_lock);
4818 bnx2_enable_bmsr1(bp);
4819 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4820 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4821 bnx2_disable_bmsr1(bp);
4822 spin_unlock_bh(&bp->phy_lock);
4824 if (bmsr & BMSR_LSTATUS) {
4831 bnx2_test_intr(struct bnx2 *bp)
4836 if (!netif_running(bp->dev))
4839 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4841 /* This register is not touched during run-time. */
4842 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4843 REG_RD(bp, BNX2_HC_COMMAND);
4845 for (i = 0; i < 10; i++) {
4846 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4852 msleep_interruptible(10);
4861 bnx2_5706_serdes_timer(struct bnx2 *bp)
4863 spin_lock(&bp->phy_lock);
4864 if (bp->serdes_an_pending)
4865 bp->serdes_an_pending--;
4866 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4869 bp->current_interval = bp->timer_interval;
4871 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4873 if (bmcr & BMCR_ANENABLE) {
4876 bnx2_write_phy(bp, 0x1c, 0x7c00);
4877 bnx2_read_phy(bp, 0x1c, &phy1);
4879 bnx2_write_phy(bp, 0x17, 0x0f01);
4880 bnx2_read_phy(bp, 0x15, &phy2);
4881 bnx2_write_phy(bp, 0x17, 0x0f01);
4882 bnx2_read_phy(bp, 0x15, &phy2);
4884 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4885 !(phy2 & 0x20)) { /* no CONFIG */
4887 bmcr &= ~BMCR_ANENABLE;
4888 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4889 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4890 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4894 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4895 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4898 bnx2_write_phy(bp, 0x17, 0x0f01);
4899 bnx2_read_phy(bp, 0x15, &phy2);
4903 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4904 bmcr |= BMCR_ANENABLE;
4905 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4907 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4910 bp->current_interval = bp->timer_interval;
4912 spin_unlock(&bp->phy_lock);
4916 bnx2_5708_serdes_timer(struct bnx2 *bp)
4918 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4921 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4922 bp->serdes_an_pending = 0;
4926 spin_lock(&bp->phy_lock);
4927 if (bp->serdes_an_pending)
4928 bp->serdes_an_pending--;
4929 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4932 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4933 if (bmcr & BMCR_ANENABLE) {
4934 bnx2_enable_forced_2g5(bp);
4935 bp->current_interval = SERDES_FORCED_TIMEOUT;
4937 bnx2_disable_forced_2g5(bp);
4938 bp->serdes_an_pending = 2;
4939 bp->current_interval = bp->timer_interval;
4943 bp->current_interval = bp->timer_interval;
4945 spin_unlock(&bp->phy_lock);
4949 bnx2_timer(unsigned long data)
4951 struct bnx2 *bp = (struct bnx2 *) data;
4953 if (!netif_running(bp->dev))
4956 if (atomic_read(&bp->intr_sem) != 0)
4957 goto bnx2_restart_timer;
4959 bnx2_send_heart_beat(bp);
4961 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4963 /* workaround occasional corrupted counters */
4964 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
4965 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
4966 BNX2_HC_COMMAND_STATS_NOW);
4968 if (bp->phy_flags & PHY_SERDES_FLAG) {
4969 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4970 bnx2_5706_serdes_timer(bp);
4972 bnx2_5708_serdes_timer(bp);
4976 mod_timer(&bp->timer, jiffies + bp->current_interval);
4980 bnx2_request_irq(struct bnx2 *bp)
4982 struct net_device *dev = bp->dev;
4985 if (bp->flags & USING_MSI_FLAG) {
4986 irq_handler_t fn = bnx2_msi;
4988 if (bp->flags & ONE_SHOT_MSI_FLAG)
4989 fn = bnx2_msi_1shot;
4991 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
4993 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4994 IRQF_SHARED, dev->name, dev);
4999 bnx2_free_irq(struct bnx2 *bp)
5001 struct net_device *dev = bp->dev;
5003 if (bp->flags & USING_MSI_FLAG) {
5004 free_irq(bp->pdev->irq, dev);
5005 pci_disable_msi(bp->pdev);
5006 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
5008 free_irq(bp->pdev->irq, dev);
5011 /* Called with rtnl_lock */
5013 bnx2_open(struct net_device *dev)
5015 struct bnx2 *bp = netdev_priv(dev);
5018 netif_carrier_off(dev);
5020 bnx2_set_power_state(bp, PCI_D0);
5021 bnx2_disable_int(bp);
5023 rc = bnx2_alloc_mem(bp);
5027 if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
5028 if (pci_enable_msi(bp->pdev) == 0) {
5029 bp->flags |= USING_MSI_FLAG;
5030 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5031 bp->flags |= ONE_SHOT_MSI_FLAG;
5034 rc = bnx2_request_irq(bp);
5041 rc = bnx2_init_nic(bp);
5050 mod_timer(&bp->timer, jiffies + bp->current_interval);
5052 atomic_set(&bp->intr_sem, 0);
5054 bnx2_enable_int(bp);
5056 if (bp->flags & USING_MSI_FLAG) {
5057 /* Test MSI to make sure it is working
5058 * If MSI test fails, go back to INTx mode
5060 if (bnx2_test_intr(bp) != 0) {
5061 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5062 " using MSI, switching to INTx mode. Please"
5063 " report this failure to the PCI maintainer"
5064 " and include system chipset information.\n",
5067 bnx2_disable_int(bp);
5070 rc = bnx2_init_nic(bp);
5073 rc = bnx2_request_irq(bp);
5078 del_timer_sync(&bp->timer);
5081 bnx2_enable_int(bp);
5084 if (bp->flags & USING_MSI_FLAG) {
5085 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5088 netif_start_queue(dev);
5094 bnx2_reset_task(struct work_struct *work)
5096 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5098 if (!netif_running(bp->dev))
5101 bp->in_reset_task = 1;
5102 bnx2_netif_stop(bp);
5106 atomic_set(&bp->intr_sem, 1);
5107 bnx2_netif_start(bp);
5108 bp->in_reset_task = 0;
5112 bnx2_tx_timeout(struct net_device *dev)
5114 struct bnx2 *bp = netdev_priv(dev);
5116 /* This allows the netif to be shutdown gracefully before resetting */
5117 schedule_work(&bp->reset_task);
5121 /* Called with rtnl_lock */
5123 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5125 struct bnx2 *bp = netdev_priv(dev);
5127 bnx2_netif_stop(bp);
5130 bnx2_set_rx_mode(dev);
5132 bnx2_netif_start(bp);
5136 /* Called with netif_tx_lock.
5137 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5138 * netif_wake_queue().
5141 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5143 struct bnx2 *bp = netdev_priv(dev);
5146 struct sw_bd *tx_buf;
5147 u32 len, vlan_tag_flags, last_frag, mss;
5148 u16 prod, ring_prod;
5151 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
5152 netif_stop_queue(dev);
5153 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5156 return NETDEV_TX_BUSY;
5158 len = skb_headlen(skb);
5160 ring_prod = TX_RING_IDX(prod);
5163 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5164 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5167 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5169 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5171 if ((mss = skb_shinfo(skb)->gso_size)) {
5172 u32 tcp_opt_len, ip_tcp_len;
5175 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5177 tcp_opt_len = tcp_optlen(skb);
5179 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5180 u32 tcp_off = skb_transport_offset(skb) -
5181 sizeof(struct ipv6hdr) - ETH_HLEN;
5183 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5184 TX_BD_FLAGS_SW_FLAGS;
5185 if (likely(tcp_off == 0))
5186 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5189 vlan_tag_flags |= ((tcp_off & 0x3) <<
5190 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5191 ((tcp_off & 0x10) <<
5192 TX_BD_FLAGS_TCP6_OFF4_SHL);
5193 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5196 if (skb_header_cloned(skb) &&
5197 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5199 return NETDEV_TX_OK;
5202 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5206 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5207 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5211 if (tcp_opt_len || (iph->ihl > 5)) {
5212 vlan_tag_flags |= ((iph->ihl - 5) +
5213 (tcp_opt_len >> 2)) << 8;
5219 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5221 tx_buf = &bp->tx_buf_ring[ring_prod];
5223 pci_unmap_addr_set(tx_buf, mapping, mapping);
5225 txbd = &bp->tx_desc_ring[ring_prod];
5227 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5228 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5229 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5230 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5232 last_frag = skb_shinfo(skb)->nr_frags;
5234 for (i = 0; i < last_frag; i++) {
5235 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5237 prod = NEXT_TX_BD(prod);
5238 ring_prod = TX_RING_IDX(prod);
5239 txbd = &bp->tx_desc_ring[ring_prod];
5242 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5243 len, PCI_DMA_TODEVICE);
5244 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5247 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5248 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5249 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5250 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5253 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5255 prod = NEXT_TX_BD(prod);
5256 bp->tx_prod_bseq += skb->len;
5258 REG_WR16(bp, bp->tx_bidx_addr, prod);
5259 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5264 dev->trans_start = jiffies;
5266 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
5267 netif_stop_queue(dev);
5268 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
5269 netif_wake_queue(dev);
5272 return NETDEV_TX_OK;
5275 /* Called with rtnl_lock */
5277 bnx2_close(struct net_device *dev)
5279 struct bnx2 *bp = netdev_priv(dev);
5282 /* Calling flush_scheduled_work() may deadlock because
5283 * linkwatch_event() may be on the workqueue and it will try to get
5284 * the rtnl_lock which we are holding.
5286 while (bp->in_reset_task)
5289 bnx2_netif_stop(bp);
5290 del_timer_sync(&bp->timer);
5291 if (bp->flags & NO_WOL_FLAG)
5292 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5294 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5296 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5297 bnx2_reset_chip(bp, reset_code);
5302 netif_carrier_off(bp->dev);
5303 bnx2_set_power_state(bp, PCI_D3hot);
5307 #define GET_NET_STATS64(ctr) \
5308 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5309 (unsigned long) (ctr##_lo)
5311 #define GET_NET_STATS32(ctr) \
5314 #if (BITS_PER_LONG == 64)
5315 #define GET_NET_STATS GET_NET_STATS64
5317 #define GET_NET_STATS GET_NET_STATS32
5320 static struct net_device_stats *
5321 bnx2_get_stats(struct net_device *dev)
5323 struct bnx2 *bp = netdev_priv(dev);
5324 struct statistics_block *stats_blk = bp->stats_blk;
5325 struct net_device_stats *net_stats = &bp->net_stats;
5327 if (bp->stats_blk == NULL) {
5330 net_stats->rx_packets =
5331 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5332 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5333 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5335 net_stats->tx_packets =
5336 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5337 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5338 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5340 net_stats->rx_bytes =
5341 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5343 net_stats->tx_bytes =
5344 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5346 net_stats->multicast =
5347 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5349 net_stats->collisions =
5350 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5352 net_stats->rx_length_errors =
5353 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5354 stats_blk->stat_EtherStatsOverrsizePkts);
5356 net_stats->rx_over_errors =
5357 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5359 net_stats->rx_frame_errors =
5360 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5362 net_stats->rx_crc_errors =
5363 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5365 net_stats->rx_errors = net_stats->rx_length_errors +
5366 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5367 net_stats->rx_crc_errors;
5369 net_stats->tx_aborted_errors =
5370 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5371 stats_blk->stat_Dot3StatsLateCollisions);
5373 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5374 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5375 net_stats->tx_carrier_errors = 0;
5377 net_stats->tx_carrier_errors =
5379 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5382 net_stats->tx_errors =
5384 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5386 net_stats->tx_aborted_errors +
5387 net_stats->tx_carrier_errors;
5389 net_stats->rx_missed_errors =
5390 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5391 stats_blk->stat_FwRxDrop);
5396 /* All ethtool functions called with rtnl_lock */
5399 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5401 struct bnx2 *bp = netdev_priv(dev);
5402 int support_serdes = 0, support_copper = 0;
5404 cmd->supported = SUPPORTED_Autoneg;
5405 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5408 } else if (bp->phy_port == PORT_FIBRE)
5413 if (support_serdes) {
5414 cmd->supported |= SUPPORTED_1000baseT_Full |
5416 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5417 cmd->supported |= SUPPORTED_2500baseX_Full;
5420 if (support_copper) {
5421 cmd->supported |= SUPPORTED_10baseT_Half |
5422 SUPPORTED_10baseT_Full |
5423 SUPPORTED_100baseT_Half |
5424 SUPPORTED_100baseT_Full |
5425 SUPPORTED_1000baseT_Full |
5430 spin_lock_bh(&bp->phy_lock);
5431 cmd->port = bp->phy_port;
5432 cmd->advertising = bp->advertising;
5434 if (bp->autoneg & AUTONEG_SPEED) {
5435 cmd->autoneg = AUTONEG_ENABLE;
5438 cmd->autoneg = AUTONEG_DISABLE;
5441 if (netif_carrier_ok(dev)) {
5442 cmd->speed = bp->line_speed;
5443 cmd->duplex = bp->duplex;
5449 spin_unlock_bh(&bp->phy_lock);
5451 cmd->transceiver = XCVR_INTERNAL;
5452 cmd->phy_address = bp->phy_addr;
5458 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5460 struct bnx2 *bp = netdev_priv(dev);
5461 u8 autoneg = bp->autoneg;
5462 u8 req_duplex = bp->req_duplex;
5463 u16 req_line_speed = bp->req_line_speed;
5464 u32 advertising = bp->advertising;
5467 spin_lock_bh(&bp->phy_lock);
5469 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5470 goto err_out_unlock;
5472 if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5473 goto err_out_unlock;
5475 if (cmd->autoneg == AUTONEG_ENABLE) {
5476 autoneg |= AUTONEG_SPEED;
5478 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5480 /* allow advertising 1 speed */
5481 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5482 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5483 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5484 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5486 if (cmd->port == PORT_FIBRE)
5487 goto err_out_unlock;
5489 advertising = cmd->advertising;
5491 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5492 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5493 (cmd->port == PORT_TP))
5494 goto err_out_unlock;
5495 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
5496 advertising = cmd->advertising;
5497 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5498 goto err_out_unlock;
5500 if (cmd->port == PORT_FIBRE)
5501 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5503 advertising = ETHTOOL_ALL_COPPER_SPEED;
5505 advertising |= ADVERTISED_Autoneg;
5508 if (cmd->port == PORT_FIBRE) {
5509 if ((cmd->speed != SPEED_1000 &&
5510 cmd->speed != SPEED_2500) ||
5511 (cmd->duplex != DUPLEX_FULL))
5512 goto err_out_unlock;
5514 if (cmd->speed == SPEED_2500 &&
5515 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5516 goto err_out_unlock;
5518 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5519 goto err_out_unlock;
5521 autoneg &= ~AUTONEG_SPEED;
5522 req_line_speed = cmd->speed;
5523 req_duplex = cmd->duplex;
5527 bp->autoneg = autoneg;
5528 bp->advertising = advertising;
5529 bp->req_line_speed = req_line_speed;
5530 bp->req_duplex = req_duplex;
5532 err = bnx2_setup_phy(bp, cmd->port);
5535 spin_unlock_bh(&bp->phy_lock);
5541 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5543 struct bnx2 *bp = netdev_priv(dev);
5545 strcpy(info->driver, DRV_MODULE_NAME);
5546 strcpy(info->version, DRV_MODULE_VERSION);
5547 strcpy(info->bus_info, pci_name(bp->pdev));
5548 strcpy(info->fw_version, bp->fw_version);
5551 #define BNX2_REGDUMP_LEN (32 * 1024)
5554 bnx2_get_regs_len(struct net_device *dev)
5556 return BNX2_REGDUMP_LEN;
5560 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5562 u32 *p = _p, i, offset;
5564 struct bnx2 *bp = netdev_priv(dev);
5565 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5566 0x0800, 0x0880, 0x0c00, 0x0c10,
5567 0x0c30, 0x0d08, 0x1000, 0x101c,
5568 0x1040, 0x1048, 0x1080, 0x10a4,
5569 0x1400, 0x1490, 0x1498, 0x14f0,
5570 0x1500, 0x155c, 0x1580, 0x15dc,
5571 0x1600, 0x1658, 0x1680, 0x16d8,
5572 0x1800, 0x1820, 0x1840, 0x1854,
5573 0x1880, 0x1894, 0x1900, 0x1984,
5574 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5575 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5576 0x2000, 0x2030, 0x23c0, 0x2400,
5577 0x2800, 0x2820, 0x2830, 0x2850,
5578 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5579 0x3c00, 0x3c94, 0x4000, 0x4010,
5580 0x4080, 0x4090, 0x43c0, 0x4458,
5581 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5582 0x4fc0, 0x5010, 0x53c0, 0x5444,
5583 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5584 0x5fc0, 0x6000, 0x6400, 0x6428,
5585 0x6800, 0x6848, 0x684c, 0x6860,
5586 0x6888, 0x6910, 0x8000 };
5590 memset(p, 0, BNX2_REGDUMP_LEN);
5592 if (!netif_running(bp->dev))
5596 offset = reg_boundaries[0];
5598 while (offset < BNX2_REGDUMP_LEN) {
5599 *p++ = REG_RD(bp, offset);
5601 if (offset == reg_boundaries[i + 1]) {
5602 offset = reg_boundaries[i + 2];
5603 p = (u32 *) (orig_p + offset);
5610 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5612 struct bnx2 *bp = netdev_priv(dev);
5614 if (bp->flags & NO_WOL_FLAG) {
5619 wol->supported = WAKE_MAGIC;
5621 wol->wolopts = WAKE_MAGIC;
5625 memset(&wol->sopass, 0, sizeof(wol->sopass));
5629 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5631 struct bnx2 *bp = netdev_priv(dev);
5633 if (wol->wolopts & ~WAKE_MAGIC)
5636 if (wol->wolopts & WAKE_MAGIC) {
5637 if (bp->flags & NO_WOL_FLAG)
5649 bnx2_nway_reset(struct net_device *dev)
5651 struct bnx2 *bp = netdev_priv(dev);
5654 if (!(bp->autoneg & AUTONEG_SPEED)) {
5658 spin_lock_bh(&bp->phy_lock);
5660 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5663 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5664 spin_unlock_bh(&bp->phy_lock);
5668 /* Force a link down visible on the other side */
5669 if (bp->phy_flags & PHY_SERDES_FLAG) {
5670 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5671 spin_unlock_bh(&bp->phy_lock);
5675 spin_lock_bh(&bp->phy_lock);
5677 bp->current_interval = SERDES_AN_TIMEOUT;
5678 bp->serdes_an_pending = 1;
5679 mod_timer(&bp->timer, jiffies + bp->current_interval);
5682 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5683 bmcr &= ~BMCR_LOOPBACK;
5684 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5686 spin_unlock_bh(&bp->phy_lock);
5692 bnx2_get_eeprom_len(struct net_device *dev)
5694 struct bnx2 *bp = netdev_priv(dev);
5696 if (bp->flash_info == NULL)
5699 return (int) bp->flash_size;
5703 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5706 struct bnx2 *bp = netdev_priv(dev);
5709 /* parameters already validated in ethtool_get_eeprom */
5711 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5717 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5720 struct bnx2 *bp = netdev_priv(dev);
5723 /* parameters already validated in ethtool_set_eeprom */
5725 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5731 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5733 struct bnx2 *bp = netdev_priv(dev);
5735 memset(coal, 0, sizeof(struct ethtool_coalesce));
5737 coal->rx_coalesce_usecs = bp->rx_ticks;
5738 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5739 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5740 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5742 coal->tx_coalesce_usecs = bp->tx_ticks;
5743 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5744 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5745 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5747 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5753 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5755 struct bnx2 *bp = netdev_priv(dev);
5757 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5758 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5760 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5761 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5763 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5764 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5766 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5767 if (bp->rx_quick_cons_trip_int > 0xff)
5768 bp->rx_quick_cons_trip_int = 0xff;
5770 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5771 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5773 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5774 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5776 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5777 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5779 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5780 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5783 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5784 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5785 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
5786 bp->stats_ticks = USEC_PER_SEC;
5788 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5789 bp->stats_ticks &= 0xffff00;
5791 if (netif_running(bp->dev)) {
5792 bnx2_netif_stop(bp);
5794 bnx2_netif_start(bp);
5801 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5803 struct bnx2 *bp = netdev_priv(dev);
5805 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5806 ering->rx_mini_max_pending = 0;
5807 ering->rx_jumbo_max_pending = 0;
5809 ering->rx_pending = bp->rx_ring_size;
5810 ering->rx_mini_pending = 0;
5811 ering->rx_jumbo_pending = 0;
5813 ering->tx_max_pending = MAX_TX_DESC_CNT;
5814 ering->tx_pending = bp->tx_ring_size;
5818 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5820 struct bnx2 *bp = netdev_priv(dev);
5822 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5823 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5824 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5828 if (netif_running(bp->dev)) {
5829 bnx2_netif_stop(bp);
5830 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5835 bnx2_set_rx_ring_size(bp, ering->rx_pending);
5836 bp->tx_ring_size = ering->tx_pending;
5838 if (netif_running(bp->dev)) {
5841 rc = bnx2_alloc_mem(bp);
5845 bnx2_netif_start(bp);
5852 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5854 struct bnx2 *bp = netdev_priv(dev);
5856 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5857 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5858 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5862 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5864 struct bnx2 *bp = netdev_priv(dev);
5866 bp->req_flow_ctrl = 0;
5867 if (epause->rx_pause)
5868 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5869 if (epause->tx_pause)
5870 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5872 if (epause->autoneg) {
5873 bp->autoneg |= AUTONEG_FLOW_CTRL;
5876 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5879 spin_lock_bh(&bp->phy_lock);
5881 bnx2_setup_phy(bp, bp->phy_port);
5883 spin_unlock_bh(&bp->phy_lock);
5889 bnx2_get_rx_csum(struct net_device *dev)
5891 struct bnx2 *bp = netdev_priv(dev);
5897 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5899 struct bnx2 *bp = netdev_priv(dev);
5906 bnx2_set_tso(struct net_device *dev, u32 data)
5908 struct bnx2 *bp = netdev_priv(dev);
5911 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5912 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5913 dev->features |= NETIF_F_TSO6;
5915 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5920 #define BNX2_NUM_STATS 46
5923 char string[ETH_GSTRING_LEN];
5924 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5926 { "rx_error_bytes" },
5928 { "tx_error_bytes" },
5929 { "rx_ucast_packets" },
5930 { "rx_mcast_packets" },
5931 { "rx_bcast_packets" },
5932 { "tx_ucast_packets" },
5933 { "tx_mcast_packets" },
5934 { "tx_bcast_packets" },
5935 { "tx_mac_errors" },
5936 { "tx_carrier_errors" },
5937 { "rx_crc_errors" },
5938 { "rx_align_errors" },
5939 { "tx_single_collisions" },
5940 { "tx_multi_collisions" },
5942 { "tx_excess_collisions" },
5943 { "tx_late_collisions" },
5944 { "tx_total_collisions" },
5947 { "rx_undersize_packets" },
5948 { "rx_oversize_packets" },
5949 { "rx_64_byte_packets" },
5950 { "rx_65_to_127_byte_packets" },
5951 { "rx_128_to_255_byte_packets" },
5952 { "rx_256_to_511_byte_packets" },
5953 { "rx_512_to_1023_byte_packets" },
5954 { "rx_1024_to_1522_byte_packets" },
5955 { "rx_1523_to_9022_byte_packets" },
5956 { "tx_64_byte_packets" },
5957 { "tx_65_to_127_byte_packets" },
5958 { "tx_128_to_255_byte_packets" },
5959 { "tx_256_to_511_byte_packets" },
5960 { "tx_512_to_1023_byte_packets" },
5961 { "tx_1024_to_1522_byte_packets" },
5962 { "tx_1523_to_9022_byte_packets" },
5963 { "rx_xon_frames" },
5964 { "rx_xoff_frames" },
5965 { "tx_xon_frames" },
5966 { "tx_xoff_frames" },
5967 { "rx_mac_ctrl_frames" },
5968 { "rx_filtered_packets" },
5970 { "rx_fw_discards" },
5973 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5975 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5976 STATS_OFFSET32(stat_IfHCInOctets_hi),
5977 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5978 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5979 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5980 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5981 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5982 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5983 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5984 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5985 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5986 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5987 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5988 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5989 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5990 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5991 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5992 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5993 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5994 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5995 STATS_OFFSET32(stat_EtherStatsCollisions),
5996 STATS_OFFSET32(stat_EtherStatsFragments),
5997 STATS_OFFSET32(stat_EtherStatsJabbers),
5998 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5999 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6000 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6001 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6002 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6003 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6004 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6005 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6006 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6007 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6008 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6009 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6010 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6011 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6012 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6013 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6014 STATS_OFFSET32(stat_XonPauseFramesReceived),
6015 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6016 STATS_OFFSET32(stat_OutXonSent),
6017 STATS_OFFSET32(stat_OutXoffSent),
6018 STATS_OFFSET32(stat_MacControlFramesReceived),
6019 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6020 STATS_OFFSET32(stat_IfInMBUFDiscards),
6021 STATS_OFFSET32(stat_FwRxDrop),
6024 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6025 * skipped because of errata.
6027 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6028 8,0,8,8,8,8,8,8,8,8,
6029 4,0,4,4,4,4,4,4,4,4,
6030 4,4,4,4,4,4,4,4,4,4,
6031 4,4,4,4,4,4,4,4,4,4,
6035 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6036 8,0,8,8,8,8,8,8,8,8,
6037 4,4,4,4,4,4,4,4,4,4,
6038 4,4,4,4,4,4,4,4,4,4,
6039 4,4,4,4,4,4,4,4,4,4,
6043 #define BNX2_NUM_TESTS 6
6046 char string[ETH_GSTRING_LEN];
6047 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6048 { "register_test (offline)" },
6049 { "memory_test (offline)" },
6050 { "loopback_test (offline)" },
6051 { "nvram_test (online)" },
6052 { "interrupt_test (online)" },
6053 { "link_test (online)" },
6057 bnx2_self_test_count(struct net_device *dev)
6059 return BNX2_NUM_TESTS;
6063 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6065 struct bnx2 *bp = netdev_priv(dev);
6067 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6068 if (etest->flags & ETH_TEST_FL_OFFLINE) {
6071 bnx2_netif_stop(bp);
6072 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6075 if (bnx2_test_registers(bp) != 0) {
6077 etest->flags |= ETH_TEST_FL_FAILED;
6079 if (bnx2_test_memory(bp) != 0) {
6081 etest->flags |= ETH_TEST_FL_FAILED;
6083 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6084 etest->flags |= ETH_TEST_FL_FAILED;
6086 if (!netif_running(bp->dev)) {
6087 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6091 bnx2_netif_start(bp);
6094 /* wait for link up */
6095 for (i = 0; i < 7; i++) {
6098 msleep_interruptible(1000);
6102 if (bnx2_test_nvram(bp) != 0) {
6104 etest->flags |= ETH_TEST_FL_FAILED;
6106 if (bnx2_test_intr(bp) != 0) {
6108 etest->flags |= ETH_TEST_FL_FAILED;
6111 if (bnx2_test_link(bp) != 0) {
6113 etest->flags |= ETH_TEST_FL_FAILED;
6119 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6121 switch (stringset) {
6123 memcpy(buf, bnx2_stats_str_arr,
6124 sizeof(bnx2_stats_str_arr));
6127 memcpy(buf, bnx2_tests_str_arr,
6128 sizeof(bnx2_tests_str_arr));
6134 bnx2_get_stats_count(struct net_device *dev)
6136 return BNX2_NUM_STATS;
6140 bnx2_get_ethtool_stats(struct net_device *dev,
6141 struct ethtool_stats *stats, u64 *buf)
6143 struct bnx2 *bp = netdev_priv(dev);
6145 u32 *hw_stats = (u32 *) bp->stats_blk;
6146 u8 *stats_len_arr = NULL;
6148 if (hw_stats == NULL) {
6149 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6153 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6154 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6155 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6156 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6157 stats_len_arr = bnx2_5706_stats_len_arr;
6159 stats_len_arr = bnx2_5708_stats_len_arr;
6161 for (i = 0; i < BNX2_NUM_STATS; i++) {
6162 if (stats_len_arr[i] == 0) {
6163 /* skip this counter */
6167 if (stats_len_arr[i] == 4) {
6168 /* 4-byte counter */
6170 *(hw_stats + bnx2_stats_offset_arr[i]);
6173 /* 8-byte counter */
6174 buf[i] = (((u64) *(hw_stats +
6175 bnx2_stats_offset_arr[i])) << 32) +
6176 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6181 bnx2_phys_id(struct net_device *dev, u32 data)
6183 struct bnx2 *bp = netdev_priv(dev);
6190 save = REG_RD(bp, BNX2_MISC_CFG);
6191 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6193 for (i = 0; i < (data * 2); i++) {
6195 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6198 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6199 BNX2_EMAC_LED_1000MB_OVERRIDE |
6200 BNX2_EMAC_LED_100MB_OVERRIDE |
6201 BNX2_EMAC_LED_10MB_OVERRIDE |
6202 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6203 BNX2_EMAC_LED_TRAFFIC);
6205 msleep_interruptible(500);
6206 if (signal_pending(current))
6209 REG_WR(bp, BNX2_EMAC_LED, 0);
6210 REG_WR(bp, BNX2_MISC_CFG, save);
6215 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6217 struct bnx2 *bp = netdev_priv(dev);
6219 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6220 return (ethtool_op_set_tx_hw_csum(dev, data));
6222 return (ethtool_op_set_tx_csum(dev, data));
6225 static const struct ethtool_ops bnx2_ethtool_ops = {
6226 .get_settings = bnx2_get_settings,
6227 .set_settings = bnx2_set_settings,
6228 .get_drvinfo = bnx2_get_drvinfo,
6229 .get_regs_len = bnx2_get_regs_len,
6230 .get_regs = bnx2_get_regs,
6231 .get_wol = bnx2_get_wol,
6232 .set_wol = bnx2_set_wol,
6233 .nway_reset = bnx2_nway_reset,
6234 .get_link = ethtool_op_get_link,
6235 .get_eeprom_len = bnx2_get_eeprom_len,
6236 .get_eeprom = bnx2_get_eeprom,
6237 .set_eeprom = bnx2_set_eeprom,
6238 .get_coalesce = bnx2_get_coalesce,
6239 .set_coalesce = bnx2_set_coalesce,
6240 .get_ringparam = bnx2_get_ringparam,
6241 .set_ringparam = bnx2_set_ringparam,
6242 .get_pauseparam = bnx2_get_pauseparam,
6243 .set_pauseparam = bnx2_set_pauseparam,
6244 .get_rx_csum = bnx2_get_rx_csum,
6245 .set_rx_csum = bnx2_set_rx_csum,
6246 .get_tx_csum = ethtool_op_get_tx_csum,
6247 .set_tx_csum = bnx2_set_tx_csum,
6248 .get_sg = ethtool_op_get_sg,
6249 .set_sg = ethtool_op_set_sg,
6250 .get_tso = ethtool_op_get_tso,
6251 .set_tso = bnx2_set_tso,
6252 .self_test_count = bnx2_self_test_count,
6253 .self_test = bnx2_self_test,
6254 .get_strings = bnx2_get_strings,
6255 .phys_id = bnx2_phys_id,
6256 .get_stats_count = bnx2_get_stats_count,
6257 .get_ethtool_stats = bnx2_get_ethtool_stats,
6258 .get_perm_addr = ethtool_op_get_perm_addr,
6261 /* Called with rtnl_lock */
6263 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6265 struct mii_ioctl_data *data = if_mii(ifr);
6266 struct bnx2 *bp = netdev_priv(dev);
6271 data->phy_id = bp->phy_addr;
6277 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6280 if (!netif_running(dev))
6283 spin_lock_bh(&bp->phy_lock);
6284 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6285 spin_unlock_bh(&bp->phy_lock);
6287 data->val_out = mii_regval;
6293 if (!capable(CAP_NET_ADMIN))
6296 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6299 if (!netif_running(dev))
6302 spin_lock_bh(&bp->phy_lock);
6303 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6304 spin_unlock_bh(&bp->phy_lock);
6315 /* Called with rtnl_lock */
6317 bnx2_change_mac_addr(struct net_device *dev, void *p)
6319 struct sockaddr *addr = p;
6320 struct bnx2 *bp = netdev_priv(dev);
6322 if (!is_valid_ether_addr(addr->sa_data))
6325 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6326 if (netif_running(dev))
6327 bnx2_set_mac_addr(bp);
6332 /* Called with rtnl_lock */
6334 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6336 struct bnx2 *bp = netdev_priv(dev);
6338 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6339 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6343 if (netif_running(dev)) {
6344 bnx2_netif_stop(bp);
6348 bnx2_netif_start(bp);
6353 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6355 poll_bnx2(struct net_device *dev)
6357 struct bnx2 *bp = netdev_priv(dev);
6359 disable_irq(bp->pdev->irq);
6360 bnx2_interrupt(bp->pdev->irq, dev);
6361 enable_irq(bp->pdev->irq);
6365 static void __devinit
6366 bnx2_get_5709_media(struct bnx2 *bp)
6368 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6369 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6372 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6374 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6375 bp->phy_flags |= PHY_SERDES_FLAG;
6379 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6380 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6382 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6384 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6389 bp->phy_flags |= PHY_SERDES_FLAG;
6397 bp->phy_flags |= PHY_SERDES_FLAG;
6403 static void __devinit
6404 bnx2_get_pci_speed(struct bnx2 *bp)
6408 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6409 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6412 bp->flags |= PCIX_FLAG;
6414 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6416 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6418 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6419 bp->bus_speed_mhz = 133;
6422 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6423 bp->bus_speed_mhz = 100;
6426 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6427 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6428 bp->bus_speed_mhz = 66;
6431 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6432 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6433 bp->bus_speed_mhz = 50;
6436 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6437 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6438 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6439 bp->bus_speed_mhz = 33;
6444 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6445 bp->bus_speed_mhz = 66;
6447 bp->bus_speed_mhz = 33;
6450 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6451 bp->flags |= PCI_32BIT_FLAG;
6455 static int __devinit
6456 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6459 unsigned long mem_len;
6462 u64 dma_mask, persist_dma_mask;
6464 SET_MODULE_OWNER(dev);
6465 SET_NETDEV_DEV(dev, &pdev->dev);
6466 bp = netdev_priv(dev);
6471 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6472 rc = pci_enable_device(pdev);
6474 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
6478 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6480 "Cannot find PCI device base address, aborting.\n");
6482 goto err_out_disable;
6485 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6487 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
6488 goto err_out_disable;
6491 pci_set_master(pdev);
6493 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6494 if (bp->pm_cap == 0) {
6496 "Cannot find power management capability, aborting.\n");
6498 goto err_out_release;
6504 spin_lock_init(&bp->phy_lock);
6505 spin_lock_init(&bp->indirect_lock);
6506 INIT_WORK(&bp->reset_task, bnx2_reset_task);
6508 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
6509 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
6510 dev->mem_end = dev->mem_start + mem_len;
6511 dev->irq = pdev->irq;
6513 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6516 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
6518 goto err_out_release;
6521 /* Configure byte swap and enable write to the reg_window registers.
6522 * Rely on CPU to do target byte swapping on big endian systems
6523 * The chip's target access swapping will not swap all accesses
6525 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6526 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6527 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6529 bnx2_set_power_state(bp, PCI_D0);
6531 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6533 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6534 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6536 "Cannot find PCIE capability, aborting.\n");
6540 bp->flags |= PCIE_FLAG;
6542 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6543 if (bp->pcix_cap == 0) {
6545 "Cannot find PCIX capability, aborting.\n");
6551 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6552 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6553 bp->flags |= MSI_CAP_FLAG;
6556 /* 5708 cannot support DMA addresses > 40-bit. */
6557 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6558 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6560 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6562 /* Configure DMA attributes. */
6563 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6564 dev->features |= NETIF_F_HIGHDMA;
6565 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6568 "pci_set_consistent_dma_mask failed, aborting.\n");
6571 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6572 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6576 if (!(bp->flags & PCIE_FLAG))
6577 bnx2_get_pci_speed(bp);
6579 /* 5706A0 may falsely detect SERR and PERR. */
6580 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6581 reg = REG_RD(bp, PCI_COMMAND);
6582 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6583 REG_WR(bp, PCI_COMMAND, reg);
6585 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6586 !(bp->flags & PCIX_FLAG)) {
6589 "5706 A1 can only be used in a PCIX bus, aborting.\n");
6593 bnx2_init_nvram(bp);
6595 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6597 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
6598 BNX2_SHM_HDR_SIGNATURE_SIG) {
6599 u32 off = PCI_FUNC(pdev->devfn) << 2;
6601 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6603 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6605 /* Get the permanent MAC address. First we need to make sure the
6606 * firmware is actually running.
6608 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
6610 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6611 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
6612 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
6617 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6618 for (i = 0, j = 0; i < 3; i++) {
6621 num = (u8) (reg >> (24 - (i * 8)));
6622 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
6623 if (num >= k || !skip0 || k == 1) {
6624 bp->fw_version[j++] = (num / k) + '0';
6629 bp->fw_version[j++] = '.';
6631 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
6632 reg &= BNX2_CONDITION_MFW_RUN_MASK;
6633 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
6634 reg != BNX2_CONDITION_MFW_RUN_NONE) {
6636 u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
6638 bp->fw_version[j++] = ' ';
6639 for (i = 0; i < 3; i++) {
6640 reg = REG_RD_IND(bp, addr + i * 4);
6642 memcpy(&bp->fw_version[j], ®, 4);
6647 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
6648 bp->mac_addr[0] = (u8) (reg >> 8);
6649 bp->mac_addr[1] = (u8) reg;
6651 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
6652 bp->mac_addr[2] = (u8) (reg >> 24);
6653 bp->mac_addr[3] = (u8) (reg >> 16);
6654 bp->mac_addr[4] = (u8) (reg >> 8);
6655 bp->mac_addr[5] = (u8) reg;
6657 bp->tx_ring_size = MAX_TX_DESC_CNT;
6658 bnx2_set_rx_ring_size(bp, 255);
6662 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6664 bp->tx_quick_cons_trip_int = 20;
6665 bp->tx_quick_cons_trip = 20;
6666 bp->tx_ticks_int = 80;
6669 bp->rx_quick_cons_trip_int = 6;
6670 bp->rx_quick_cons_trip = 6;
6671 bp->rx_ticks_int = 18;
6674 bp->stats_ticks = 1000000 & 0xffff00;
6676 bp->timer_interval = HZ;
6677 bp->current_interval = HZ;
6681 /* Disable WOL support if we are running on a SERDES chip. */
6682 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6683 bnx2_get_5709_media(bp);
6684 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
6685 bp->phy_flags |= PHY_SERDES_FLAG;
6687 bp->phy_port = PORT_TP;
6688 if (bp->phy_flags & PHY_SERDES_FLAG) {
6689 bp->phy_port = PORT_FIBRE;
6690 bp->flags |= NO_WOL_FLAG;
6691 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
6693 reg = REG_RD_IND(bp, bp->shmem_base +
6694 BNX2_SHARED_HW_CFG_CONFIG);
6695 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6696 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6698 bnx2_init_remote_phy(bp);
6700 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6701 CHIP_NUM(bp) == CHIP_NUM_5708)
6702 bp->phy_flags |= PHY_CRC_FIX_FLAG;
6703 else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
6704 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
6706 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6707 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6708 (CHIP_ID(bp) == CHIP_ID_5708_B1))
6709 bp->flags |= NO_WOL_FLAG;
6711 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6712 bp->tx_quick_cons_trip_int =
6713 bp->tx_quick_cons_trip;
6714 bp->tx_ticks_int = bp->tx_ticks;
6715 bp->rx_quick_cons_trip_int =
6716 bp->rx_quick_cons_trip;
6717 bp->rx_ticks_int = bp->rx_ticks;
6718 bp->comp_prod_trip_int = bp->comp_prod_trip;
6719 bp->com_ticks_int = bp->com_ticks;
6720 bp->cmd_ticks_int = bp->cmd_ticks;
6723 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6725 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6726 * with byte enables disabled on the unused 32-bit word. This is legal
6727 * but causes problems on the AMD 8132 which will eventually stop
6728 * responding after a while.
6730 * AMD believes this incompatibility is unique to the 5706, and
6731 * prefers to locally disable MSI rather than globally disabling it.
6733 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6734 struct pci_dev *amd_8132 = NULL;
6736 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6737 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6741 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
6742 if (rev >= 0x10 && rev <= 0x13) {
6744 pci_dev_put(amd_8132);
6750 bnx2_set_default_link(bp);
6751 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6753 init_timer(&bp->timer);
6754 bp->timer.expires = RUN_AT(bp->timer_interval);
6755 bp->timer.data = (unsigned long) bp;
6756 bp->timer.function = bnx2_timer;
6762 iounmap(bp->regview);
6767 pci_release_regions(pdev);
6770 pci_disable_device(pdev);
6771 pci_set_drvdata(pdev, NULL);
6777 static char * __devinit
6778 bnx2_bus_string(struct bnx2 *bp, char *str)
6782 if (bp->flags & PCIE_FLAG) {
6783 s += sprintf(s, "PCI Express");
6785 s += sprintf(s, "PCI");
6786 if (bp->flags & PCIX_FLAG)
6787 s += sprintf(s, "-X");
6788 if (bp->flags & PCI_32BIT_FLAG)
6789 s += sprintf(s, " 32-bit");
6791 s += sprintf(s, " 64-bit");
6792 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
6797 static int __devinit
6798 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6800 static int version_printed = 0;
6801 struct net_device *dev = NULL;
6806 if (version_printed++ == 0)
6807 printk(KERN_INFO "%s", version);
6809 /* dev zeroed in init_etherdev */
6810 dev = alloc_etherdev(sizeof(*bp));
6815 rc = bnx2_init_board(pdev, dev);
6821 dev->open = bnx2_open;
6822 dev->hard_start_xmit = bnx2_start_xmit;
6823 dev->stop = bnx2_close;
6824 dev->get_stats = bnx2_get_stats;
6825 dev->set_multicast_list = bnx2_set_rx_mode;
6826 dev->do_ioctl = bnx2_ioctl;
6827 dev->set_mac_address = bnx2_change_mac_addr;
6828 dev->change_mtu = bnx2_change_mtu;
6829 dev->tx_timeout = bnx2_tx_timeout;
6830 dev->watchdog_timeo = TX_TIMEOUT;
6832 dev->vlan_rx_register = bnx2_vlan_rx_register;
6834 dev->poll = bnx2_poll;
6835 dev->ethtool_ops = &bnx2_ethtool_ops;
6838 bp = netdev_priv(dev);
6840 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6841 dev->poll_controller = poll_bnx2;
6844 pci_set_drvdata(pdev, dev);
6846 memcpy(dev->dev_addr, bp->mac_addr, 6);
6847 memcpy(dev->perm_addr, bp->mac_addr, 6);
6848 bp->name = board_info[ent->driver_data].name;
6850 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
6851 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6852 dev->features |= NETIF_F_IPV6_CSUM;
6855 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6857 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6858 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6859 dev->features |= NETIF_F_TSO6;
6861 if ((rc = register_netdev(dev))) {
6862 dev_err(&pdev->dev, "Cannot register net device\n");
6864 iounmap(bp->regview);
6865 pci_release_regions(pdev);
6866 pci_disable_device(pdev);
6867 pci_set_drvdata(pdev, NULL);
6872 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
6876 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6877 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6878 bnx2_bus_string(bp, str),
6882 printk("node addr ");
6883 for (i = 0; i < 6; i++)
6884 printk("%2.2x", dev->dev_addr[i]);
6890 static void __devexit
6891 bnx2_remove_one(struct pci_dev *pdev)
6893 struct net_device *dev = pci_get_drvdata(pdev);
6894 struct bnx2 *bp = netdev_priv(dev);
6896 flush_scheduled_work();
6898 unregister_netdev(dev);
6901 iounmap(bp->regview);
6904 pci_release_regions(pdev);
6905 pci_disable_device(pdev);
6906 pci_set_drvdata(pdev, NULL);
6910 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6912 struct net_device *dev = pci_get_drvdata(pdev);
6913 struct bnx2 *bp = netdev_priv(dev);
6916 if (!netif_running(dev))
6919 flush_scheduled_work();
6920 bnx2_netif_stop(bp);
6921 netif_device_detach(dev);
6922 del_timer_sync(&bp->timer);
6923 if (bp->flags & NO_WOL_FLAG)
6924 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6926 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6928 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6929 bnx2_reset_chip(bp, reset_code);
6931 pci_save_state(pdev);
6932 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6937 bnx2_resume(struct pci_dev *pdev)
6939 struct net_device *dev = pci_get_drvdata(pdev);
6940 struct bnx2 *bp = netdev_priv(dev);
6942 if (!netif_running(dev))
6945 pci_restore_state(pdev);
6946 bnx2_set_power_state(bp, PCI_D0);
6947 netif_device_attach(dev);
6949 bnx2_netif_start(bp);
6953 static struct pci_driver bnx2_pci_driver = {
6954 .name = DRV_MODULE_NAME,
6955 .id_table = bnx2_pci_tbl,
6956 .probe = bnx2_init_one,
6957 .remove = __devexit_p(bnx2_remove_one),
6958 .suspend = bnx2_suspend,
6959 .resume = bnx2_resume,
6962 static int __init bnx2_init(void)
6964 return pci_register_driver(&bnx2_pci_driver);
6967 static void __exit bnx2_cleanup(void)
6969 pci_unregister_driver(&bnx2_pci_driver);
6972 module_init(bnx2_init);
6973 module_exit(bnx2_cleanup);