1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2007 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
55 #define DRV_MODULE_NAME "bnx2"
56 #define PFX DRV_MODULE_NAME ": "
57 #define DRV_MODULE_VERSION "1.5.10"
58 #define DRV_MODULE_RELDATE "May 1, 2007"
60 #define RUN_AT(x) (jiffies + (x))
62 /* Time in jiffies before concluding the transmitter is hung. */
63 #define TX_TIMEOUT (5*HZ)
65 static const char version[] __devinitdata =
66 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
68 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
69 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
73 static int disable_msi = 0;
75 module_param(disable_msi, int, 0);
76 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
90 /* indexed by board_t, above */
93 } board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
101 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
105 static struct pci_device_id bnx2_pci_tbl[] = {
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
127 static struct flash_spec flash_table[] =
130 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
131 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
132 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
134 /* Expansion entry 0001 */
135 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
139 /* Saifun SA25F010 (non-buffered flash) */
140 /* strap, cfg1, & write1 need updates */
141 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
142 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
144 "Non-buffered flash (128kB)"},
145 /* Saifun SA25F020 (non-buffered flash) */
146 /* strap, cfg1, & write1 need updates */
147 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
148 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
149 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
150 "Non-buffered flash (256kB)"},
151 /* Expansion entry 0100 */
152 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
153 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
154 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
156 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
157 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
158 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
159 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
160 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
161 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
162 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
163 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
164 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
165 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
166 /* Saifun SA25F005 (non-buffered flash) */
167 /* strap, cfg1, & write1 need updates */
168 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
169 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
170 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
171 "Non-buffered flash (64kB)"},
173 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
174 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
175 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
177 /* Expansion entry 1001 */
178 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
179 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
180 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
182 /* Expansion entry 1010 */
183 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
184 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
185 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
187 /* ATMEL AT45DB011B (buffered flash) */
188 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
189 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
190 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
191 "Buffered flash (128kB)"},
192 /* Expansion entry 1100 */
193 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
194 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
197 /* Expansion entry 1101 */
198 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
199 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
200 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
202 /* Ateml Expansion entry 1110 */
203 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
204 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
205 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
206 "Entry 1110 (Atmel)"},
207 /* ATMEL AT45DB021B (buffered flash) */
208 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
209 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
210 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
211 "Buffered flash (256kB)"},
214 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
216 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
222 /* The ring uses 256 indices for 255 entries, one of them
223 * needs to be skipped.
225 diff = bp->tx_prod - bp->tx_cons;
226 if (unlikely(diff >= TX_DESC_CNT)) {
228 if (diff == TX_DESC_CNT)
229 diff = MAX_TX_DESC_CNT;
231 return (bp->tx_ring_size - diff);
235 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
239 spin_lock_bh(&bp->indirect_lock);
240 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
241 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
242 spin_unlock_bh(&bp->indirect_lock);
247 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
249 spin_lock_bh(&bp->indirect_lock);
250 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
251 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
252 spin_unlock_bh(&bp->indirect_lock);
256 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
259 spin_lock_bh(&bp->indirect_lock);
260 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
263 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
264 REG_WR(bp, BNX2_CTX_CTX_CTRL,
265 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
266 for (i = 0; i < 5; i++) {
268 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
269 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
274 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
275 REG_WR(bp, BNX2_CTX_DATA, val);
277 spin_unlock_bh(&bp->indirect_lock);
281 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
286 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
287 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
288 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
290 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
291 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
296 val1 = (bp->phy_addr << 21) | (reg << 16) |
297 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
298 BNX2_EMAC_MDIO_COMM_START_BUSY;
299 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
301 for (i = 0; i < 50; i++) {
304 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
305 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
308 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
309 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
315 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
324 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
325 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
326 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
328 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
329 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
338 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
343 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
344 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
345 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
347 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
348 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
353 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
354 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
355 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
356 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
358 for (i = 0; i < 50; i++) {
361 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
362 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
368 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
373 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
375 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
377 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
378 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
387 bnx2_disable_int(struct bnx2 *bp)
389 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
390 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
391 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
395 bnx2_enable_int(struct bnx2 *bp)
397 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
398 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
399 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
401 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
402 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
404 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
408 bnx2_disable_int_sync(struct bnx2 *bp)
410 atomic_inc(&bp->intr_sem);
411 bnx2_disable_int(bp);
412 synchronize_irq(bp->pdev->irq);
416 bnx2_netif_stop(struct bnx2 *bp)
418 bnx2_disable_int_sync(bp);
419 if (netif_running(bp->dev)) {
420 netif_poll_disable(bp->dev);
421 netif_tx_disable(bp->dev);
422 bp->dev->trans_start = jiffies; /* prevent tx timeout */
427 bnx2_netif_start(struct bnx2 *bp)
429 if (atomic_dec_and_test(&bp->intr_sem)) {
430 if (netif_running(bp->dev)) {
431 netif_wake_queue(bp->dev);
432 netif_poll_enable(bp->dev);
439 bnx2_free_mem(struct bnx2 *bp)
443 for (i = 0; i < bp->ctx_pages; i++) {
444 if (bp->ctx_blk[i]) {
445 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
447 bp->ctx_blk_mapping[i]);
448 bp->ctx_blk[i] = NULL;
451 if (bp->status_blk) {
452 pci_free_consistent(bp->pdev, bp->status_stats_size,
453 bp->status_blk, bp->status_blk_mapping);
454 bp->status_blk = NULL;
455 bp->stats_blk = NULL;
457 if (bp->tx_desc_ring) {
458 pci_free_consistent(bp->pdev,
459 sizeof(struct tx_bd) * TX_DESC_CNT,
460 bp->tx_desc_ring, bp->tx_desc_mapping);
461 bp->tx_desc_ring = NULL;
463 kfree(bp->tx_buf_ring);
464 bp->tx_buf_ring = NULL;
465 for (i = 0; i < bp->rx_max_ring; i++) {
466 if (bp->rx_desc_ring[i])
467 pci_free_consistent(bp->pdev,
468 sizeof(struct rx_bd) * RX_DESC_CNT,
470 bp->rx_desc_mapping[i]);
471 bp->rx_desc_ring[i] = NULL;
473 vfree(bp->rx_buf_ring);
474 bp->rx_buf_ring = NULL;
478 bnx2_alloc_mem(struct bnx2 *bp)
480 int i, status_blk_size;
482 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
484 if (bp->tx_buf_ring == NULL)
487 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
488 sizeof(struct tx_bd) *
490 &bp->tx_desc_mapping);
491 if (bp->tx_desc_ring == NULL)
494 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
496 if (bp->rx_buf_ring == NULL)
499 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
502 for (i = 0; i < bp->rx_max_ring; i++) {
503 bp->rx_desc_ring[i] =
504 pci_alloc_consistent(bp->pdev,
505 sizeof(struct rx_bd) * RX_DESC_CNT,
506 &bp->rx_desc_mapping[i]);
507 if (bp->rx_desc_ring[i] == NULL)
512 /* Combine status and statistics blocks into one allocation. */
513 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
514 bp->status_stats_size = status_blk_size +
515 sizeof(struct statistics_block);
517 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
518 &bp->status_blk_mapping);
519 if (bp->status_blk == NULL)
522 memset(bp->status_blk, 0, bp->status_stats_size);
524 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
527 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
529 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
530 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
531 if (bp->ctx_pages == 0)
533 for (i = 0; i < bp->ctx_pages; i++) {
534 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
536 &bp->ctx_blk_mapping[i]);
537 if (bp->ctx_blk[i] == NULL)
549 bnx2_report_fw_link(struct bnx2 *bp)
551 u32 fw_link_status = 0;
556 switch (bp->line_speed) {
558 if (bp->duplex == DUPLEX_HALF)
559 fw_link_status = BNX2_LINK_STATUS_10HALF;
561 fw_link_status = BNX2_LINK_STATUS_10FULL;
564 if (bp->duplex == DUPLEX_HALF)
565 fw_link_status = BNX2_LINK_STATUS_100HALF;
567 fw_link_status = BNX2_LINK_STATUS_100FULL;
570 if (bp->duplex == DUPLEX_HALF)
571 fw_link_status = BNX2_LINK_STATUS_1000HALF;
573 fw_link_status = BNX2_LINK_STATUS_1000FULL;
576 if (bp->duplex == DUPLEX_HALF)
577 fw_link_status = BNX2_LINK_STATUS_2500HALF;
579 fw_link_status = BNX2_LINK_STATUS_2500FULL;
583 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
586 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
588 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
589 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
591 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
592 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
593 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
595 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
599 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
601 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
605 bnx2_report_link(struct bnx2 *bp)
608 netif_carrier_on(bp->dev);
609 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
611 printk("%d Mbps ", bp->line_speed);
613 if (bp->duplex == DUPLEX_FULL)
614 printk("full duplex");
616 printk("half duplex");
619 if (bp->flow_ctrl & FLOW_CTRL_RX) {
620 printk(", receive ");
621 if (bp->flow_ctrl & FLOW_CTRL_TX)
622 printk("& transmit ");
625 printk(", transmit ");
627 printk("flow control ON");
632 netif_carrier_off(bp->dev);
633 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
636 bnx2_report_fw_link(bp);
640 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
642 u32 local_adv, remote_adv;
645 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
646 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
648 if (bp->duplex == DUPLEX_FULL) {
649 bp->flow_ctrl = bp->req_flow_ctrl;
654 if (bp->duplex != DUPLEX_FULL) {
658 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
659 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
662 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
663 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
664 bp->flow_ctrl |= FLOW_CTRL_TX;
665 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
666 bp->flow_ctrl |= FLOW_CTRL_RX;
670 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
671 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
673 if (bp->phy_flags & PHY_SERDES_FLAG) {
674 u32 new_local_adv = 0;
675 u32 new_remote_adv = 0;
677 if (local_adv & ADVERTISE_1000XPAUSE)
678 new_local_adv |= ADVERTISE_PAUSE_CAP;
679 if (local_adv & ADVERTISE_1000XPSE_ASYM)
680 new_local_adv |= ADVERTISE_PAUSE_ASYM;
681 if (remote_adv & ADVERTISE_1000XPAUSE)
682 new_remote_adv |= ADVERTISE_PAUSE_CAP;
683 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
684 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
686 local_adv = new_local_adv;
687 remote_adv = new_remote_adv;
690 /* See Table 28B-3 of 802.3ab-1999 spec. */
691 if (local_adv & ADVERTISE_PAUSE_CAP) {
692 if(local_adv & ADVERTISE_PAUSE_ASYM) {
693 if (remote_adv & ADVERTISE_PAUSE_CAP) {
694 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
696 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
697 bp->flow_ctrl = FLOW_CTRL_RX;
701 if (remote_adv & ADVERTISE_PAUSE_CAP) {
702 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
706 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
707 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
708 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
710 bp->flow_ctrl = FLOW_CTRL_TX;
716 bnx2_5709s_linkup(struct bnx2 *bp)
722 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
723 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
724 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
726 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
727 bp->line_speed = bp->req_line_speed;
728 bp->duplex = bp->req_duplex;
731 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
733 case MII_BNX2_GP_TOP_AN_SPEED_10:
734 bp->line_speed = SPEED_10;
736 case MII_BNX2_GP_TOP_AN_SPEED_100:
737 bp->line_speed = SPEED_100;
739 case MII_BNX2_GP_TOP_AN_SPEED_1G:
740 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
741 bp->line_speed = SPEED_1000;
743 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
744 bp->line_speed = SPEED_2500;
747 if (val & MII_BNX2_GP_TOP_AN_FD)
748 bp->duplex = DUPLEX_FULL;
750 bp->duplex = DUPLEX_HALF;
755 bnx2_5708s_linkup(struct bnx2 *bp)
760 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
761 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
762 case BCM5708S_1000X_STAT1_SPEED_10:
763 bp->line_speed = SPEED_10;
765 case BCM5708S_1000X_STAT1_SPEED_100:
766 bp->line_speed = SPEED_100;
768 case BCM5708S_1000X_STAT1_SPEED_1G:
769 bp->line_speed = SPEED_1000;
771 case BCM5708S_1000X_STAT1_SPEED_2G5:
772 bp->line_speed = SPEED_2500;
775 if (val & BCM5708S_1000X_STAT1_FD)
776 bp->duplex = DUPLEX_FULL;
778 bp->duplex = DUPLEX_HALF;
784 bnx2_5706s_linkup(struct bnx2 *bp)
786 u32 bmcr, local_adv, remote_adv, common;
789 bp->line_speed = SPEED_1000;
791 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
792 if (bmcr & BMCR_FULLDPLX) {
793 bp->duplex = DUPLEX_FULL;
796 bp->duplex = DUPLEX_HALF;
799 if (!(bmcr & BMCR_ANENABLE)) {
803 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
804 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
806 common = local_adv & remote_adv;
807 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
809 if (common & ADVERTISE_1000XFULL) {
810 bp->duplex = DUPLEX_FULL;
813 bp->duplex = DUPLEX_HALF;
821 bnx2_copper_linkup(struct bnx2 *bp)
825 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
826 if (bmcr & BMCR_ANENABLE) {
827 u32 local_adv, remote_adv, common;
829 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
830 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
832 common = local_adv & (remote_adv >> 2);
833 if (common & ADVERTISE_1000FULL) {
834 bp->line_speed = SPEED_1000;
835 bp->duplex = DUPLEX_FULL;
837 else if (common & ADVERTISE_1000HALF) {
838 bp->line_speed = SPEED_1000;
839 bp->duplex = DUPLEX_HALF;
842 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
843 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
845 common = local_adv & remote_adv;
846 if (common & ADVERTISE_100FULL) {
847 bp->line_speed = SPEED_100;
848 bp->duplex = DUPLEX_FULL;
850 else if (common & ADVERTISE_100HALF) {
851 bp->line_speed = SPEED_100;
852 bp->duplex = DUPLEX_HALF;
854 else if (common & ADVERTISE_10FULL) {
855 bp->line_speed = SPEED_10;
856 bp->duplex = DUPLEX_FULL;
858 else if (common & ADVERTISE_10HALF) {
859 bp->line_speed = SPEED_10;
860 bp->duplex = DUPLEX_HALF;
869 if (bmcr & BMCR_SPEED100) {
870 bp->line_speed = SPEED_100;
873 bp->line_speed = SPEED_10;
875 if (bmcr & BMCR_FULLDPLX) {
876 bp->duplex = DUPLEX_FULL;
879 bp->duplex = DUPLEX_HALF;
887 bnx2_set_mac_link(struct bnx2 *bp)
891 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
892 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
893 (bp->duplex == DUPLEX_HALF)) {
894 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
897 /* Configure the EMAC mode register. */
898 val = REG_RD(bp, BNX2_EMAC_MODE);
900 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
901 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
902 BNX2_EMAC_MODE_25G_MODE);
905 switch (bp->line_speed) {
907 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
908 val |= BNX2_EMAC_MODE_PORT_MII_10M;
913 val |= BNX2_EMAC_MODE_PORT_MII;
916 val |= BNX2_EMAC_MODE_25G_MODE;
919 val |= BNX2_EMAC_MODE_PORT_GMII;
924 val |= BNX2_EMAC_MODE_PORT_GMII;
927 /* Set the MAC to operate in the appropriate duplex mode. */
928 if (bp->duplex == DUPLEX_HALF)
929 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
930 REG_WR(bp, BNX2_EMAC_MODE, val);
932 /* Enable/disable rx PAUSE. */
933 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
935 if (bp->flow_ctrl & FLOW_CTRL_RX)
936 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
937 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
939 /* Enable/disable tx PAUSE. */
940 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
941 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
943 if (bp->flow_ctrl & FLOW_CTRL_TX)
944 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
945 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
947 /* Acknowledge the interrupt. */
948 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
954 bnx2_enable_bmsr1(struct bnx2 *bp)
956 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
957 (CHIP_NUM(bp) == CHIP_NUM_5709))
958 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
959 MII_BNX2_BLK_ADDR_GP_STATUS);
963 bnx2_disable_bmsr1(struct bnx2 *bp)
965 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
966 (CHIP_NUM(bp) == CHIP_NUM_5709))
967 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
968 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
972 bnx2_test_and_enable_2g5(struct bnx2 *bp)
977 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
980 if (bp->autoneg & AUTONEG_SPEED)
981 bp->advertising |= ADVERTISED_2500baseX_Full;
983 if (CHIP_NUM(bp) == CHIP_NUM_5709)
984 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
986 bnx2_read_phy(bp, bp->mii_up1, &up1);
987 if (!(up1 & BCM5708S_UP1_2G5)) {
988 up1 |= BCM5708S_UP1_2G5;
989 bnx2_write_phy(bp, bp->mii_up1, up1);
993 if (CHIP_NUM(bp) == CHIP_NUM_5709)
994 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
995 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1001 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1006 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1009 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1010 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1012 bnx2_read_phy(bp, bp->mii_up1, &up1);
1013 if (up1 & BCM5708S_UP1_2G5) {
1014 up1 &= ~BCM5708S_UP1_2G5;
1015 bnx2_write_phy(bp, bp->mii_up1, up1);
1019 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1020 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1021 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1027 bnx2_enable_forced_2g5(struct bnx2 *bp)
1031 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1034 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1037 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1038 MII_BNX2_BLK_ADDR_SERDES_DIG);
1039 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1040 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1041 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1042 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1044 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1045 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1046 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1048 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1049 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1050 bmcr |= BCM5708S_BMCR_FORCE_2500;
1053 if (bp->autoneg & AUTONEG_SPEED) {
1054 bmcr &= ~BMCR_ANENABLE;
1055 if (bp->req_duplex == DUPLEX_FULL)
1056 bmcr |= BMCR_FULLDPLX;
1058 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1062 bnx2_disable_forced_2g5(struct bnx2 *bp)
1066 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1069 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1072 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1073 MII_BNX2_BLK_ADDR_SERDES_DIG);
1074 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1075 val &= ~MII_BNX2_SD_MISC1_FORCE;
1076 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1078 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1079 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1080 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1082 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1083 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1084 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1087 if (bp->autoneg & AUTONEG_SPEED)
1088 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1089 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1093 bnx2_set_link(struct bnx2 *bp)
1098 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1103 link_up = bp->link_up;
1105 bnx2_enable_bmsr1(bp);
1106 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1107 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1108 bnx2_disable_bmsr1(bp);
1110 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1111 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1114 val = REG_RD(bp, BNX2_EMAC_STATUS);
1115 if (val & BNX2_EMAC_STATUS_LINK)
1116 bmsr |= BMSR_LSTATUS;
1118 bmsr &= ~BMSR_LSTATUS;
1121 if (bmsr & BMSR_LSTATUS) {
1124 if (bp->phy_flags & PHY_SERDES_FLAG) {
1125 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1126 bnx2_5706s_linkup(bp);
1127 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1128 bnx2_5708s_linkup(bp);
1129 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1130 bnx2_5709s_linkup(bp);
1133 bnx2_copper_linkup(bp);
1135 bnx2_resolve_flow_ctrl(bp);
1138 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1139 (bp->autoneg & AUTONEG_SPEED))
1140 bnx2_disable_forced_2g5(bp);
1142 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1146 if (bp->link_up != link_up) {
1147 bnx2_report_link(bp);
1150 bnx2_set_mac_link(bp);
1156 bnx2_reset_phy(struct bnx2 *bp)
1161 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1163 #define PHY_RESET_MAX_WAIT 100
1164 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1167 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1168 if (!(reg & BMCR_RESET)) {
1173 if (i == PHY_RESET_MAX_WAIT) {
1180 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1184 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1185 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1187 if (bp->phy_flags & PHY_SERDES_FLAG) {
1188 adv = ADVERTISE_1000XPAUSE;
1191 adv = ADVERTISE_PAUSE_CAP;
1194 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1195 if (bp->phy_flags & PHY_SERDES_FLAG) {
1196 adv = ADVERTISE_1000XPSE_ASYM;
1199 adv = ADVERTISE_PAUSE_ASYM;
1202 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1203 if (bp->phy_flags & PHY_SERDES_FLAG) {
1204 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1207 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1214 bnx2_setup_serdes_phy(struct bnx2 *bp)
1219 if (!(bp->autoneg & AUTONEG_SPEED)) {
1221 int force_link_down = 0;
1223 if (bp->req_line_speed == SPEED_2500) {
1224 if (!bnx2_test_and_enable_2g5(bp))
1225 force_link_down = 1;
1226 } else if (bp->req_line_speed == SPEED_1000) {
1227 if (bnx2_test_and_disable_2g5(bp))
1228 force_link_down = 1;
1230 bnx2_read_phy(bp, bp->mii_adv, &adv);
1231 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1233 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1234 new_bmcr = bmcr & ~BMCR_ANENABLE;
1235 new_bmcr |= BMCR_SPEED1000;
1237 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1238 if (bp->req_line_speed == SPEED_2500)
1239 bnx2_enable_forced_2g5(bp);
1240 else if (bp->req_line_speed == SPEED_1000) {
1241 bnx2_disable_forced_2g5(bp);
1242 new_bmcr &= ~0x2000;
1245 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1246 if (bp->req_line_speed == SPEED_2500)
1247 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1249 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1252 if (bp->req_duplex == DUPLEX_FULL) {
1253 adv |= ADVERTISE_1000XFULL;
1254 new_bmcr |= BMCR_FULLDPLX;
1257 adv |= ADVERTISE_1000XHALF;
1258 new_bmcr &= ~BMCR_FULLDPLX;
1260 if ((new_bmcr != bmcr) || (force_link_down)) {
1261 /* Force a link down visible on the other side */
1263 bnx2_write_phy(bp, bp->mii_adv, adv &
1264 ~(ADVERTISE_1000XFULL |
1265 ADVERTISE_1000XHALF));
1266 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1267 BMCR_ANRESTART | BMCR_ANENABLE);
1270 netif_carrier_off(bp->dev);
1271 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1272 bnx2_report_link(bp);
1274 bnx2_write_phy(bp, bp->mii_adv, adv);
1275 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1277 bnx2_resolve_flow_ctrl(bp);
1278 bnx2_set_mac_link(bp);
1283 bnx2_test_and_enable_2g5(bp);
1285 if (bp->advertising & ADVERTISED_1000baseT_Full)
1286 new_adv |= ADVERTISE_1000XFULL;
1288 new_adv |= bnx2_phy_get_pause_adv(bp);
1290 bnx2_read_phy(bp, bp->mii_adv, &adv);
1291 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1293 bp->serdes_an_pending = 0;
1294 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1295 /* Force a link down visible on the other side */
1297 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1298 spin_unlock_bh(&bp->phy_lock);
1300 spin_lock_bh(&bp->phy_lock);
1303 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1304 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1306 /* Speed up link-up time when the link partner
1307 * does not autonegotiate which is very common
1308 * in blade servers. Some blade servers use
1309 * IPMI for kerboard input and it's important
1310 * to minimize link disruptions. Autoneg. involves
1311 * exchanging base pages plus 3 next pages and
1312 * normally completes in about 120 msec.
1314 bp->current_interval = SERDES_AN_TIMEOUT;
1315 bp->serdes_an_pending = 1;
1316 mod_timer(&bp->timer, jiffies + bp->current_interval);
1318 bnx2_resolve_flow_ctrl(bp);
1319 bnx2_set_mac_link(bp);
1325 #define ETHTOOL_ALL_FIBRE_SPEED \
1326 (ADVERTISED_1000baseT_Full)
1328 #define ETHTOOL_ALL_COPPER_SPEED \
1329 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1330 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1331 ADVERTISED_1000baseT_Full)
1333 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1334 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1336 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1339 bnx2_setup_copper_phy(struct bnx2 *bp)
1344 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1346 if (bp->autoneg & AUTONEG_SPEED) {
1347 u32 adv_reg, adv1000_reg;
1348 u32 new_adv_reg = 0;
1349 u32 new_adv1000_reg = 0;
1351 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1352 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1353 ADVERTISE_PAUSE_ASYM);
1355 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1356 adv1000_reg &= PHY_ALL_1000_SPEED;
1358 if (bp->advertising & ADVERTISED_10baseT_Half)
1359 new_adv_reg |= ADVERTISE_10HALF;
1360 if (bp->advertising & ADVERTISED_10baseT_Full)
1361 new_adv_reg |= ADVERTISE_10FULL;
1362 if (bp->advertising & ADVERTISED_100baseT_Half)
1363 new_adv_reg |= ADVERTISE_100HALF;
1364 if (bp->advertising & ADVERTISED_100baseT_Full)
1365 new_adv_reg |= ADVERTISE_100FULL;
1366 if (bp->advertising & ADVERTISED_1000baseT_Full)
1367 new_adv1000_reg |= ADVERTISE_1000FULL;
1369 new_adv_reg |= ADVERTISE_CSMA;
1371 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1373 if ((adv1000_reg != new_adv1000_reg) ||
1374 (adv_reg != new_adv_reg) ||
1375 ((bmcr & BMCR_ANENABLE) == 0)) {
1377 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1378 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1379 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1382 else if (bp->link_up) {
1383 /* Flow ctrl may have changed from auto to forced */
1384 /* or vice-versa. */
1386 bnx2_resolve_flow_ctrl(bp);
1387 bnx2_set_mac_link(bp);
1393 if (bp->req_line_speed == SPEED_100) {
1394 new_bmcr |= BMCR_SPEED100;
1396 if (bp->req_duplex == DUPLEX_FULL) {
1397 new_bmcr |= BMCR_FULLDPLX;
1399 if (new_bmcr != bmcr) {
1402 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1403 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1405 if (bmsr & BMSR_LSTATUS) {
1406 /* Force link down */
1407 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1408 spin_unlock_bh(&bp->phy_lock);
1410 spin_lock_bh(&bp->phy_lock);
1412 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1413 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1416 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1418 /* Normally, the new speed is setup after the link has
1419 * gone down and up again. In some cases, link will not go
1420 * down so we need to set up the new speed here.
1422 if (bmsr & BMSR_LSTATUS) {
1423 bp->line_speed = bp->req_line_speed;
1424 bp->duplex = bp->req_duplex;
1425 bnx2_resolve_flow_ctrl(bp);
1426 bnx2_set_mac_link(bp);
1429 bnx2_resolve_flow_ctrl(bp);
1430 bnx2_set_mac_link(bp);
1436 bnx2_setup_phy(struct bnx2 *bp)
1438 if (bp->loopback == MAC_LOOPBACK)
1441 if (bp->phy_flags & PHY_SERDES_FLAG) {
1442 return (bnx2_setup_serdes_phy(bp));
1445 return (bnx2_setup_copper_phy(bp));
1450 bnx2_init_5709s_phy(struct bnx2 *bp)
1454 bp->mii_bmcr = MII_BMCR + 0x10;
1455 bp->mii_bmsr = MII_BMSR + 0x10;
1456 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1457 bp->mii_adv = MII_ADVERTISE + 0x10;
1458 bp->mii_lpa = MII_LPA + 0x10;
1459 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1461 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1462 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1464 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1467 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1469 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1470 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1471 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1472 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1474 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1475 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1476 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1477 val |= BCM5708S_UP1_2G5;
1479 val &= ~BCM5708S_UP1_2G5;
1480 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1482 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1483 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1484 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1485 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1487 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1489 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1490 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1491 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1493 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1499 bnx2_init_5708s_phy(struct bnx2 *bp)
1505 bp->mii_up1 = BCM5708S_UP1;
1507 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1508 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1509 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1511 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1512 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1513 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1515 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1516 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1517 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1519 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1520 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1521 val |= BCM5708S_UP1_2G5;
1522 bnx2_write_phy(bp, BCM5708S_UP1, val);
1525 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1526 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1527 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1528 /* increase tx signal amplitude */
1529 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1530 BCM5708S_BLK_ADDR_TX_MISC);
1531 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1532 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1533 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1534 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1537 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1538 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1543 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1544 BNX2_SHARED_HW_CFG_CONFIG);
1545 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1546 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1547 BCM5708S_BLK_ADDR_TX_MISC);
1548 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1549 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1550 BCM5708S_BLK_ADDR_DIG);
1557 bnx2_init_5706s_phy(struct bnx2 *bp)
1561 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1563 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1564 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1566 if (bp->dev->mtu > 1500) {
1569 /* Set extended packet length bit */
1570 bnx2_write_phy(bp, 0x18, 0x7);
1571 bnx2_read_phy(bp, 0x18, &val);
1572 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1574 bnx2_write_phy(bp, 0x1c, 0x6c00);
1575 bnx2_read_phy(bp, 0x1c, &val);
1576 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1581 bnx2_write_phy(bp, 0x18, 0x7);
1582 bnx2_read_phy(bp, 0x18, &val);
1583 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1585 bnx2_write_phy(bp, 0x1c, 0x6c00);
1586 bnx2_read_phy(bp, 0x1c, &val);
1587 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1594 bnx2_init_copper_phy(struct bnx2 *bp)
1600 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1601 bnx2_write_phy(bp, 0x18, 0x0c00);
1602 bnx2_write_phy(bp, 0x17, 0x000a);
1603 bnx2_write_phy(bp, 0x15, 0x310b);
1604 bnx2_write_phy(bp, 0x17, 0x201f);
1605 bnx2_write_phy(bp, 0x15, 0x9506);
1606 bnx2_write_phy(bp, 0x17, 0x401f);
1607 bnx2_write_phy(bp, 0x15, 0x14e2);
1608 bnx2_write_phy(bp, 0x18, 0x0400);
1611 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1612 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1613 MII_BNX2_DSP_EXPAND_REG | 0x8);
1614 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1616 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1619 if (bp->dev->mtu > 1500) {
1620 /* Set extended packet length bit */
1621 bnx2_write_phy(bp, 0x18, 0x7);
1622 bnx2_read_phy(bp, 0x18, &val);
1623 bnx2_write_phy(bp, 0x18, val | 0x4000);
1625 bnx2_read_phy(bp, 0x10, &val);
1626 bnx2_write_phy(bp, 0x10, val | 0x1);
1629 bnx2_write_phy(bp, 0x18, 0x7);
1630 bnx2_read_phy(bp, 0x18, &val);
1631 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1633 bnx2_read_phy(bp, 0x10, &val);
1634 bnx2_write_phy(bp, 0x10, val & ~0x1);
1637 /* ethernet@wirespeed */
1638 bnx2_write_phy(bp, 0x18, 0x7007);
1639 bnx2_read_phy(bp, 0x18, &val);
1640 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1646 bnx2_init_phy(struct bnx2 *bp)
1651 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1652 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1654 bp->mii_bmcr = MII_BMCR;
1655 bp->mii_bmsr = MII_BMSR;
1656 bp->mii_bmsr1 = MII_BMSR;
1657 bp->mii_adv = MII_ADVERTISE;
1658 bp->mii_lpa = MII_LPA;
1660 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1662 bnx2_read_phy(bp, MII_PHYSID1, &val);
1663 bp->phy_id = val << 16;
1664 bnx2_read_phy(bp, MII_PHYSID2, &val);
1665 bp->phy_id |= val & 0xffff;
1667 if (bp->phy_flags & PHY_SERDES_FLAG) {
1668 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1669 rc = bnx2_init_5706s_phy(bp);
1670 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1671 rc = bnx2_init_5708s_phy(bp);
1672 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1673 rc = bnx2_init_5709s_phy(bp);
1676 rc = bnx2_init_copper_phy(bp);
1685 bnx2_set_mac_loopback(struct bnx2 *bp)
1689 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1690 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1691 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1692 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1697 static int bnx2_test_link(struct bnx2 *);
1700 bnx2_set_phy_loopback(struct bnx2 *bp)
1705 spin_lock_bh(&bp->phy_lock);
1706 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
1708 spin_unlock_bh(&bp->phy_lock);
1712 for (i = 0; i < 10; i++) {
1713 if (bnx2_test_link(bp) == 0)
1718 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1719 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1720 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1721 BNX2_EMAC_MODE_25G_MODE);
1723 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1724 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1730 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1736 msg_data |= bp->fw_wr_seq;
1738 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1740 /* wait for an acknowledgement. */
1741 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1744 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1746 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1749 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1752 /* If we timed out, inform the firmware that this is the case. */
1753 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1755 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1758 msg_data &= ~BNX2_DRV_MSG_CODE;
1759 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1761 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1766 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1773 bnx2_init_5709_context(struct bnx2 *bp)
1778 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1779 val |= (BCM_PAGE_BITS - 8) << 16;
1780 REG_WR(bp, BNX2_CTX_COMMAND, val);
1781 for (i = 0; i < bp->ctx_pages; i++) {
1784 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1785 (bp->ctx_blk_mapping[i] & 0xffffffff) |
1786 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1787 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1788 (u64) bp->ctx_blk_mapping[i] >> 32);
1789 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1790 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1791 for (j = 0; j < 10; j++) {
1793 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1794 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1798 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1807 bnx2_init_context(struct bnx2 *bp)
1813 u32 vcid_addr, pcid_addr, offset;
1817 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1820 vcid_addr = GET_PCID_ADDR(vcid);
1822 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1827 pcid_addr = GET_PCID_ADDR(new_vcid);
1830 vcid_addr = GET_CID_ADDR(vcid);
1831 pcid_addr = vcid_addr;
1834 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1835 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1837 /* Zero out the context. */
1838 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1839 CTX_WR(bp, 0x00, offset, 0);
1842 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1843 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1848 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1854 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1855 if (good_mbuf == NULL) {
1856 printk(KERN_ERR PFX "Failed to allocate memory in "
1857 "bnx2_alloc_bad_rbuf\n");
1861 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1862 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1866 /* Allocate a bunch of mbufs and save the good ones in an array. */
1867 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1868 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1869 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1871 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1873 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1875 /* The addresses with Bit 9 set are bad memory blocks. */
1876 if (!(val & (1 << 9))) {
1877 good_mbuf[good_mbuf_cnt] = (u16) val;
1881 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1884 /* Free the good ones back to the mbuf pool thus discarding
1885 * all the bad ones. */
1886 while (good_mbuf_cnt) {
1889 val = good_mbuf[good_mbuf_cnt];
1890 val = (val << 9) | val | 1;
1892 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1899 bnx2_set_mac_addr(struct bnx2 *bp)
1902 u8 *mac_addr = bp->dev->dev_addr;
1904 val = (mac_addr[0] << 8) | mac_addr[1];
1906 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1908 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1909 (mac_addr[4] << 8) | mac_addr[5];
1911 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1915 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1917 struct sk_buff *skb;
1918 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1920 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1921 unsigned long align;
1923 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1928 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1929 skb_reserve(skb, BNX2_RX_ALIGN - align);
1931 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1932 PCI_DMA_FROMDEVICE);
1935 pci_unmap_addr_set(rx_buf, mapping, mapping);
1937 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1938 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1940 bp->rx_prod_bseq += bp->rx_buf_use_size;
1946 bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
1948 struct status_block *sblk = bp->status_blk;
1949 u32 new_link_state, old_link_state;
1952 new_link_state = sblk->status_attn_bits & event;
1953 old_link_state = sblk->status_attn_bits_ack & event;
1954 if (new_link_state != old_link_state) {
1956 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
1958 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
1966 bnx2_phy_int(struct bnx2 *bp)
1968 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
1969 spin_lock(&bp->phy_lock);
1971 spin_unlock(&bp->phy_lock);
1976 bnx2_tx_int(struct bnx2 *bp)
1978 struct status_block *sblk = bp->status_blk;
1979 u16 hw_cons, sw_cons, sw_ring_cons;
1982 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1983 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1986 sw_cons = bp->tx_cons;
1988 while (sw_cons != hw_cons) {
1989 struct sw_bd *tx_buf;
1990 struct sk_buff *skb;
1993 sw_ring_cons = TX_RING_IDX(sw_cons);
1995 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1998 /* partial BD completions possible with TSO packets */
1999 if (skb_is_gso(skb)) {
2000 u16 last_idx, last_ring_idx;
2002 last_idx = sw_cons +
2003 skb_shinfo(skb)->nr_frags + 1;
2004 last_ring_idx = sw_ring_cons +
2005 skb_shinfo(skb)->nr_frags + 1;
2006 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2009 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2014 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2015 skb_headlen(skb), PCI_DMA_TODEVICE);
2018 last = skb_shinfo(skb)->nr_frags;
2020 for (i = 0; i < last; i++) {
2021 sw_cons = NEXT_TX_BD(sw_cons);
2023 pci_unmap_page(bp->pdev,
2025 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2027 skb_shinfo(skb)->frags[i].size,
2031 sw_cons = NEXT_TX_BD(sw_cons);
2033 tx_free_bd += last + 1;
2037 hw_cons = bp->hw_tx_cons =
2038 sblk->status_tx_quick_consumer_index0;
2040 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2045 bp->tx_cons = sw_cons;
2046 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2047 * before checking for netif_queue_stopped(). Without the
2048 * memory barrier, there is a small possibility that bnx2_start_xmit()
2049 * will miss it and cause the queue to be stopped forever.
2053 if (unlikely(netif_queue_stopped(bp->dev)) &&
2054 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2055 netif_tx_lock(bp->dev);
2056 if ((netif_queue_stopped(bp->dev)) &&
2057 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2058 netif_wake_queue(bp->dev);
2059 netif_tx_unlock(bp->dev);
2064 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2067 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2068 struct rx_bd *cons_bd, *prod_bd;
2070 cons_rx_buf = &bp->rx_buf_ring[cons];
2071 prod_rx_buf = &bp->rx_buf_ring[prod];
2073 pci_dma_sync_single_for_device(bp->pdev,
2074 pci_unmap_addr(cons_rx_buf, mapping),
2075 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2077 bp->rx_prod_bseq += bp->rx_buf_use_size;
2079 prod_rx_buf->skb = skb;
2084 pci_unmap_addr_set(prod_rx_buf, mapping,
2085 pci_unmap_addr(cons_rx_buf, mapping));
2087 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2088 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2089 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2090 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2094 bnx2_rx_int(struct bnx2 *bp, int budget)
2096 struct status_block *sblk = bp->status_blk;
2097 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2098 struct l2_fhdr *rx_hdr;
2101 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
2102 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2105 sw_cons = bp->rx_cons;
2106 sw_prod = bp->rx_prod;
2108 /* Memory barrier necessary as speculative reads of the rx
2109 * buffer can be ahead of the index in the status block
2112 while (sw_cons != hw_cons) {
2115 struct sw_bd *rx_buf;
2116 struct sk_buff *skb;
2117 dma_addr_t dma_addr;
2119 sw_ring_cons = RX_RING_IDX(sw_cons);
2120 sw_ring_prod = RX_RING_IDX(sw_prod);
2122 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2127 dma_addr = pci_unmap_addr(rx_buf, mapping);
2129 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2130 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2132 rx_hdr = (struct l2_fhdr *) skb->data;
2133 len = rx_hdr->l2_fhdr_pkt_len - 4;
2135 if ((status = rx_hdr->l2_fhdr_status) &
2136 (L2_FHDR_ERRORS_BAD_CRC |
2137 L2_FHDR_ERRORS_PHY_DECODE |
2138 L2_FHDR_ERRORS_ALIGNMENT |
2139 L2_FHDR_ERRORS_TOO_SHORT |
2140 L2_FHDR_ERRORS_GIANT_FRAME)) {
2145 /* Since we don't have a jumbo ring, copy small packets
2148 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2149 struct sk_buff *new_skb;
2151 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2152 if (new_skb == NULL)
2156 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2157 new_skb->data, len + 2);
2158 skb_reserve(new_skb, 2);
2159 skb_put(new_skb, len);
2161 bnx2_reuse_rx_skb(bp, skb,
2162 sw_ring_cons, sw_ring_prod);
2166 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
2167 pci_unmap_single(bp->pdev, dma_addr,
2168 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2170 skb_reserve(skb, bp->rx_offset);
2175 bnx2_reuse_rx_skb(bp, skb,
2176 sw_ring_cons, sw_ring_prod);
2180 skb->protocol = eth_type_trans(skb, bp->dev);
2182 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2183 (ntohs(skb->protocol) != 0x8100)) {
2190 skb->ip_summed = CHECKSUM_NONE;
2192 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2193 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2195 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2196 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2197 skb->ip_summed = CHECKSUM_UNNECESSARY;
2201 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2202 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2203 rx_hdr->l2_fhdr_vlan_tag);
2207 netif_receive_skb(skb);
2209 bp->dev->last_rx = jiffies;
2213 sw_cons = NEXT_RX_BD(sw_cons);
2214 sw_prod = NEXT_RX_BD(sw_prod);
2216 if ((rx_pkt == budget))
2219 /* Refresh hw_cons to see if there is new work */
2220 if (sw_cons == hw_cons) {
2221 hw_cons = bp->hw_rx_cons =
2222 sblk->status_rx_quick_consumer_index0;
2223 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2228 bp->rx_cons = sw_cons;
2229 bp->rx_prod = sw_prod;
2231 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2233 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2241 /* MSI ISR - The only difference between this and the INTx ISR
2242 * is that the MSI interrupt is always serviced.
2245 bnx2_msi(int irq, void *dev_instance)
2247 struct net_device *dev = dev_instance;
2248 struct bnx2 *bp = netdev_priv(dev);
2250 prefetch(bp->status_blk);
2251 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2252 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2253 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2255 /* Return here if interrupt is disabled. */
2256 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2259 netif_rx_schedule(dev);
2265 bnx2_msi_1shot(int irq, void *dev_instance)
2267 struct net_device *dev = dev_instance;
2268 struct bnx2 *bp = netdev_priv(dev);
2270 prefetch(bp->status_blk);
2272 /* Return here if interrupt is disabled. */
2273 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2276 netif_rx_schedule(dev);
2282 bnx2_interrupt(int irq, void *dev_instance)
2284 struct net_device *dev = dev_instance;
2285 struct bnx2 *bp = netdev_priv(dev);
2287 /* When using INTx, it is possible for the interrupt to arrive
2288 * at the CPU before the status block posted prior to the
2289 * interrupt. Reading a register will flush the status block.
2290 * When using MSI, the MSI message will always complete after
2291 * the status block write.
2293 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
2294 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2295 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2298 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2299 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2300 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2302 /* Return here if interrupt is shared and is disabled. */
2303 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2306 netif_rx_schedule(dev);
2311 #define STATUS_ATTN_EVENTS STATUS_ATTN_BITS_LINK_STATE
2314 bnx2_has_work(struct bnx2 *bp)
2316 struct status_block *sblk = bp->status_blk;
2318 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2319 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2322 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2323 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2330 bnx2_poll(struct net_device *dev, int *budget)
2332 struct bnx2 *bp = netdev_priv(dev);
2333 struct status_block *sblk = bp->status_blk;
2334 u32 status_attn_bits = sblk->status_attn_bits;
2335 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2337 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2338 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2342 /* This is needed to take care of transient status
2343 * during link changes.
2345 REG_WR(bp, BNX2_HC_COMMAND,
2346 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2347 REG_RD(bp, BNX2_HC_COMMAND);
2350 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2353 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2354 int orig_budget = *budget;
2357 if (orig_budget > dev->quota)
2358 orig_budget = dev->quota;
2360 work_done = bnx2_rx_int(bp, orig_budget);
2361 *budget -= work_done;
2362 dev->quota -= work_done;
2365 bp->last_status_idx = bp->status_blk->status_idx;
2368 if (!bnx2_has_work(bp)) {
2369 netif_rx_complete(dev);
2370 if (likely(bp->flags & USING_MSI_FLAG)) {
2371 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2372 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2373 bp->last_status_idx);
2376 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2377 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2378 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2379 bp->last_status_idx);
2381 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2382 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2383 bp->last_status_idx);
2390 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2391 * from set_multicast.
2394 bnx2_set_rx_mode(struct net_device *dev)
2396 struct bnx2 *bp = netdev_priv(dev);
2397 u32 rx_mode, sort_mode;
2400 spin_lock_bh(&bp->phy_lock);
2402 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2403 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2404 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2406 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2407 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2409 if (!(bp->flags & ASF_ENABLE_FLAG))
2410 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2412 if (dev->flags & IFF_PROMISC) {
2413 /* Promiscuous mode. */
2414 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2415 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2416 BNX2_RPM_SORT_USER0_PROM_VLAN;
2418 else if (dev->flags & IFF_ALLMULTI) {
2419 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2420 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2423 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2426 /* Accept one or more multicast(s). */
2427 struct dev_mc_list *mclist;
2428 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2433 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2435 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2436 i++, mclist = mclist->next) {
2438 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2440 regidx = (bit & 0xe0) >> 5;
2442 mc_filter[regidx] |= (1 << bit);
2445 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2446 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2450 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2453 if (rx_mode != bp->rx_mode) {
2454 bp->rx_mode = rx_mode;
2455 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2458 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2459 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2460 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2462 spin_unlock_bh(&bp->phy_lock);
2465 #define FW_BUF_SIZE 0x8000
2468 bnx2_gunzip_init(struct bnx2 *bp)
2470 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2473 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2476 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2477 if (bp->strm->workspace == NULL)
2487 vfree(bp->gunzip_buf);
2488 bp->gunzip_buf = NULL;
2491 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2492 "uncompression.\n", bp->dev->name);
2497 bnx2_gunzip_end(struct bnx2 *bp)
2499 kfree(bp->strm->workspace);
2504 if (bp->gunzip_buf) {
2505 vfree(bp->gunzip_buf);
2506 bp->gunzip_buf = NULL;
2511 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2515 /* check gzip header */
2516 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2522 if (zbuf[3] & FNAME)
2523 while ((zbuf[n++] != 0) && (n < len));
2525 bp->strm->next_in = zbuf + n;
2526 bp->strm->avail_in = len - n;
2527 bp->strm->next_out = bp->gunzip_buf;
2528 bp->strm->avail_out = FW_BUF_SIZE;
2530 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2534 rc = zlib_inflate(bp->strm, Z_FINISH);
2536 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2537 *outbuf = bp->gunzip_buf;
2539 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2540 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2541 bp->dev->name, bp->strm->msg);
2543 zlib_inflateEnd(bp->strm);
2545 if (rc == Z_STREAM_END)
2552 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2559 for (i = 0; i < rv2p_code_len; i += 8) {
2560 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2562 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2565 if (rv2p_proc == RV2P_PROC1) {
2566 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2567 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2570 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2571 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2575 /* Reset the processor, un-stall is done later. */
2576 if (rv2p_proc == RV2P_PROC1) {
2577 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2580 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2585 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2592 val = REG_RD_IND(bp, cpu_reg->mode);
2593 val |= cpu_reg->mode_value_halt;
2594 REG_WR_IND(bp, cpu_reg->mode, val);
2595 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2597 /* Load the Text area. */
2598 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2603 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2613 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2614 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2618 /* Load the Data area. */
2619 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2623 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2624 REG_WR_IND(bp, offset, fw->data[j]);
2628 /* Load the SBSS area. */
2629 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2633 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2634 REG_WR_IND(bp, offset, fw->sbss[j]);
2638 /* Load the BSS area. */
2639 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2643 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2644 REG_WR_IND(bp, offset, fw->bss[j]);
2648 /* Load the Read-Only area. */
2649 offset = cpu_reg->spad_base +
2650 (fw->rodata_addr - cpu_reg->mips_view_base);
2654 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2655 REG_WR_IND(bp, offset, fw->rodata[j]);
2659 /* Clear the pre-fetch instruction. */
2660 REG_WR_IND(bp, cpu_reg->inst, 0);
2661 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2663 /* Start the CPU. */
2664 val = REG_RD_IND(bp, cpu_reg->mode);
2665 val &= ~cpu_reg->mode_value_halt;
2666 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2667 REG_WR_IND(bp, cpu_reg->mode, val);
2673 bnx2_init_cpus(struct bnx2 *bp)
2675 struct cpu_reg cpu_reg;
2681 if ((rc = bnx2_gunzip_init(bp)) != 0)
2684 /* Initialize the RV2P processor. */
2685 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2690 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2692 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2697 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2699 /* Initialize the RX Processor. */
2700 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2701 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2702 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2703 cpu_reg.state = BNX2_RXP_CPU_STATE;
2704 cpu_reg.state_value_clear = 0xffffff;
2705 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2706 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2707 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2708 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2709 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2710 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2711 cpu_reg.mips_view_base = 0x8000000;
2713 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2714 fw = &bnx2_rxp_fw_09;
2716 fw = &bnx2_rxp_fw_06;
2718 rc = load_cpu_fw(bp, &cpu_reg, fw);
2722 /* Initialize the TX Processor. */
2723 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2724 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2725 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2726 cpu_reg.state = BNX2_TXP_CPU_STATE;
2727 cpu_reg.state_value_clear = 0xffffff;
2728 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2729 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2730 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2731 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2732 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2733 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2734 cpu_reg.mips_view_base = 0x8000000;
2736 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2737 fw = &bnx2_txp_fw_09;
2739 fw = &bnx2_txp_fw_06;
2741 rc = load_cpu_fw(bp, &cpu_reg, fw);
2745 /* Initialize the TX Patch-up Processor. */
2746 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2747 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2748 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2749 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2750 cpu_reg.state_value_clear = 0xffffff;
2751 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2752 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2753 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2754 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2755 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2756 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2757 cpu_reg.mips_view_base = 0x8000000;
2759 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2760 fw = &bnx2_tpat_fw_09;
2762 fw = &bnx2_tpat_fw_06;
2764 rc = load_cpu_fw(bp, &cpu_reg, fw);
2768 /* Initialize the Completion Processor. */
2769 cpu_reg.mode = BNX2_COM_CPU_MODE;
2770 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2771 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2772 cpu_reg.state = BNX2_COM_CPU_STATE;
2773 cpu_reg.state_value_clear = 0xffffff;
2774 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2775 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2776 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2777 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2778 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2779 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2780 cpu_reg.mips_view_base = 0x8000000;
2782 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2783 fw = &bnx2_com_fw_09;
2785 fw = &bnx2_com_fw_06;
2787 rc = load_cpu_fw(bp, &cpu_reg, fw);
2791 /* Initialize the Command Processor. */
2792 cpu_reg.mode = BNX2_CP_CPU_MODE;
2793 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2794 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2795 cpu_reg.state = BNX2_CP_CPU_STATE;
2796 cpu_reg.state_value_clear = 0xffffff;
2797 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2798 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2799 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2800 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2801 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2802 cpu_reg.spad_base = BNX2_CP_SCRATCH;
2803 cpu_reg.mips_view_base = 0x8000000;
2805 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2806 fw = &bnx2_cp_fw_09;
2808 rc = load_cpu_fw(bp, &cpu_reg, fw);
2813 bnx2_gunzip_end(bp);
2818 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2822 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2828 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2829 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2830 PCI_PM_CTRL_PME_STATUS);
2832 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2833 /* delay required during transition out of D3hot */
2836 val = REG_RD(bp, BNX2_EMAC_MODE);
2837 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2838 val &= ~BNX2_EMAC_MODE_MPKT;
2839 REG_WR(bp, BNX2_EMAC_MODE, val);
2841 val = REG_RD(bp, BNX2_RPM_CONFIG);
2842 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2843 REG_WR(bp, BNX2_RPM_CONFIG, val);
2854 autoneg = bp->autoneg;
2855 advertising = bp->advertising;
2857 bp->autoneg = AUTONEG_SPEED;
2858 bp->advertising = ADVERTISED_10baseT_Half |
2859 ADVERTISED_10baseT_Full |
2860 ADVERTISED_100baseT_Half |
2861 ADVERTISED_100baseT_Full |
2864 bnx2_setup_copper_phy(bp);
2866 bp->autoneg = autoneg;
2867 bp->advertising = advertising;
2869 bnx2_set_mac_addr(bp);
2871 val = REG_RD(bp, BNX2_EMAC_MODE);
2873 /* Enable port mode. */
2874 val &= ~BNX2_EMAC_MODE_PORT;
2875 val |= BNX2_EMAC_MODE_PORT_MII |
2876 BNX2_EMAC_MODE_MPKT_RCVD |
2877 BNX2_EMAC_MODE_ACPI_RCVD |
2878 BNX2_EMAC_MODE_MPKT;
2880 REG_WR(bp, BNX2_EMAC_MODE, val);
2882 /* receive all multicast */
2883 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2884 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2887 REG_WR(bp, BNX2_EMAC_RX_MODE,
2888 BNX2_EMAC_RX_MODE_SORT_MODE);
2890 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2891 BNX2_RPM_SORT_USER0_MC_EN;
2892 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2893 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2894 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2895 BNX2_RPM_SORT_USER0_ENA);
2897 /* Need to enable EMAC and RPM for WOL. */
2898 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2899 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2900 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2901 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2903 val = REG_RD(bp, BNX2_RPM_CONFIG);
2904 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2905 REG_WR(bp, BNX2_RPM_CONFIG, val);
2907 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2910 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2913 if (!(bp->flags & NO_WOL_FLAG))
2914 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2916 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2917 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2918 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2927 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2929 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2932 /* No more memory access after this point until
2933 * device is brought back to D0.
2945 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2950 /* Request access to the flash interface. */
2951 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2952 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2953 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2954 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2960 if (j >= NVRAM_TIMEOUT_COUNT)
2967 bnx2_release_nvram_lock(struct bnx2 *bp)
2972 /* Relinquish nvram interface. */
2973 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2975 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2976 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2977 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2983 if (j >= NVRAM_TIMEOUT_COUNT)
2991 bnx2_enable_nvram_write(struct bnx2 *bp)
2995 val = REG_RD(bp, BNX2_MISC_CFG);
2996 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2998 if (!bp->flash_info->buffered) {
3001 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3002 REG_WR(bp, BNX2_NVM_COMMAND,
3003 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3005 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3008 val = REG_RD(bp, BNX2_NVM_COMMAND);
3009 if (val & BNX2_NVM_COMMAND_DONE)
3013 if (j >= NVRAM_TIMEOUT_COUNT)
3020 bnx2_disable_nvram_write(struct bnx2 *bp)
3024 val = REG_RD(bp, BNX2_MISC_CFG);
3025 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3030 bnx2_enable_nvram_access(struct bnx2 *bp)
3034 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3035 /* Enable both bits, even on read. */
3036 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3037 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3041 bnx2_disable_nvram_access(struct bnx2 *bp)
3045 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3046 /* Disable both bits, even after read. */
3047 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3048 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3049 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3053 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3058 if (bp->flash_info->buffered)
3059 /* Buffered flash, no erase needed */
3062 /* Build an erase command */
3063 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3064 BNX2_NVM_COMMAND_DOIT;
3066 /* Need to clear DONE bit separately. */
3067 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3069 /* Address of the NVRAM to read from. */
3070 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3072 /* Issue an erase command. */
3073 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3075 /* Wait for completion. */
3076 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3081 val = REG_RD(bp, BNX2_NVM_COMMAND);
3082 if (val & BNX2_NVM_COMMAND_DONE)
3086 if (j >= NVRAM_TIMEOUT_COUNT)
3093 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3098 /* Build the command word. */
3099 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3101 /* Calculate an offset of a buffered flash. */
3102 if (bp->flash_info->buffered) {
3103 offset = ((offset / bp->flash_info->page_size) <<
3104 bp->flash_info->page_bits) +
3105 (offset % bp->flash_info->page_size);
3108 /* Need to clear DONE bit separately. */
3109 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3111 /* Address of the NVRAM to read from. */
3112 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3114 /* Issue a read command. */
3115 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3117 /* Wait for completion. */
3118 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3123 val = REG_RD(bp, BNX2_NVM_COMMAND);
3124 if (val & BNX2_NVM_COMMAND_DONE) {
3125 val = REG_RD(bp, BNX2_NVM_READ);
3127 val = be32_to_cpu(val);
3128 memcpy(ret_val, &val, 4);
3132 if (j >= NVRAM_TIMEOUT_COUNT)
3140 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3145 /* Build the command word. */
3146 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3148 /* Calculate an offset of a buffered flash. */
3149 if (bp->flash_info->buffered) {
3150 offset = ((offset / bp->flash_info->page_size) <<
3151 bp->flash_info->page_bits) +
3152 (offset % bp->flash_info->page_size);
3155 /* Need to clear DONE bit separately. */
3156 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3158 memcpy(&val32, val, 4);
3159 val32 = cpu_to_be32(val32);
3161 /* Write the data. */
3162 REG_WR(bp, BNX2_NVM_WRITE, val32);
3164 /* Address of the NVRAM to write to. */
3165 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3167 /* Issue the write command. */
3168 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3170 /* Wait for completion. */
3171 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3174 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3177 if (j >= NVRAM_TIMEOUT_COUNT)
3184 bnx2_init_nvram(struct bnx2 *bp)
3187 int j, entry_count, rc;
3188 struct flash_spec *flash;
3190 /* Determine the selected interface. */
3191 val = REG_RD(bp, BNX2_NVM_CFG1);
3193 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
3196 if (val & 0x40000000) {
3198 /* Flash interface has been reconfigured */
3199 for (j = 0, flash = &flash_table[0]; j < entry_count;
3201 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3202 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3203 bp->flash_info = flash;
3210 /* Not yet been reconfigured */
3212 if (val & (1 << 23))
3213 mask = FLASH_BACKUP_STRAP_MASK;
3215 mask = FLASH_STRAP_MASK;
3217 for (j = 0, flash = &flash_table[0]; j < entry_count;
3220 if ((val & mask) == (flash->strapping & mask)) {
3221 bp->flash_info = flash;
3223 /* Request access to the flash interface. */
3224 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3227 /* Enable access to flash interface */
3228 bnx2_enable_nvram_access(bp);
3230 /* Reconfigure the flash interface */
3231 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3232 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3233 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3234 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3236 /* Disable access to flash interface */
3237 bnx2_disable_nvram_access(bp);
3238 bnx2_release_nvram_lock(bp);
3243 } /* if (val & 0x40000000) */
3245 if (j == entry_count) {
3246 bp->flash_info = NULL;
3247 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3251 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3252 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3254 bp->flash_size = val;
3256 bp->flash_size = bp->flash_info->total_size;
3262 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3266 u32 cmd_flags, offset32, len32, extra;
3271 /* Request access to the flash interface. */
3272 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3275 /* Enable access to flash interface */
3276 bnx2_enable_nvram_access(bp);
3289 pre_len = 4 - (offset & 3);
3291 if (pre_len >= len32) {
3293 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3294 BNX2_NVM_COMMAND_LAST;
3297 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3300 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3305 memcpy(ret_buf, buf + (offset & 3), pre_len);
3312 extra = 4 - (len32 & 3);
3313 len32 = (len32 + 4) & ~3;
3320 cmd_flags = BNX2_NVM_COMMAND_LAST;
3322 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3323 BNX2_NVM_COMMAND_LAST;
3325 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3327 memcpy(ret_buf, buf, 4 - extra);
3329 else if (len32 > 0) {
3332 /* Read the first word. */
3336 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3338 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3340 /* Advance to the next dword. */
3345 while (len32 > 4 && rc == 0) {
3346 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3348 /* Advance to the next dword. */
3357 cmd_flags = BNX2_NVM_COMMAND_LAST;
3358 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3360 memcpy(ret_buf, buf, 4 - extra);
3363 /* Disable access to flash interface */
3364 bnx2_disable_nvram_access(bp);
3366 bnx2_release_nvram_lock(bp);
3372 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3375 u32 written, offset32, len32;
3376 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3378 int align_start, align_end;
3383 align_start = align_end = 0;
3385 if ((align_start = (offset32 & 3))) {
3387 len32 += align_start;
3390 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3395 align_end = 4 - (len32 & 3);
3397 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3401 if (align_start || align_end) {
3402 align_buf = kmalloc(len32, GFP_KERNEL);
3403 if (align_buf == NULL)
3406 memcpy(align_buf, start, 4);
3409 memcpy(align_buf + len32 - 4, end, 4);
3411 memcpy(align_buf + align_start, data_buf, buf_size);
3415 if (bp->flash_info->buffered == 0) {
3416 flash_buffer = kmalloc(264, GFP_KERNEL);
3417 if (flash_buffer == NULL) {
3419 goto nvram_write_end;
3424 while ((written < len32) && (rc == 0)) {
3425 u32 page_start, page_end, data_start, data_end;
3426 u32 addr, cmd_flags;
3429 /* Find the page_start addr */
3430 page_start = offset32 + written;
3431 page_start -= (page_start % bp->flash_info->page_size);
3432 /* Find the page_end addr */
3433 page_end = page_start + bp->flash_info->page_size;
3434 /* Find the data_start addr */
3435 data_start = (written == 0) ? offset32 : page_start;
3436 /* Find the data_end addr */
3437 data_end = (page_end > offset32 + len32) ?
3438 (offset32 + len32) : page_end;
3440 /* Request access to the flash interface. */
3441 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3442 goto nvram_write_end;
3444 /* Enable access to flash interface */
3445 bnx2_enable_nvram_access(bp);
3447 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3448 if (bp->flash_info->buffered == 0) {
3451 /* Read the whole page into the buffer
3452 * (non-buffer flash only) */
3453 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3454 if (j == (bp->flash_info->page_size - 4)) {
3455 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3457 rc = bnx2_nvram_read_dword(bp,
3463 goto nvram_write_end;
3469 /* Enable writes to flash interface (unlock write-protect) */
3470 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3471 goto nvram_write_end;
3473 /* Loop to write back the buffer data from page_start to
3476 if (bp->flash_info->buffered == 0) {
3477 /* Erase the page */
3478 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3479 goto nvram_write_end;
3481 /* Re-enable the write again for the actual write */
3482 bnx2_enable_nvram_write(bp);
3484 for (addr = page_start; addr < data_start;
3485 addr += 4, i += 4) {
3487 rc = bnx2_nvram_write_dword(bp, addr,
3488 &flash_buffer[i], cmd_flags);
3491 goto nvram_write_end;
3497 /* Loop to write the new data from data_start to data_end */
3498 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3499 if ((addr == page_end - 4) ||
3500 ((bp->flash_info->buffered) &&
3501 (addr == data_end - 4))) {
3503 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3505 rc = bnx2_nvram_write_dword(bp, addr, buf,
3509 goto nvram_write_end;
3515 /* Loop to write back the buffer data from data_end
3517 if (bp->flash_info->buffered == 0) {
3518 for (addr = data_end; addr < page_end;
3519 addr += 4, i += 4) {
3521 if (addr == page_end-4) {
3522 cmd_flags = BNX2_NVM_COMMAND_LAST;
3524 rc = bnx2_nvram_write_dword(bp, addr,
3525 &flash_buffer[i], cmd_flags);
3528 goto nvram_write_end;
3534 /* Disable writes to flash interface (lock write-protect) */
3535 bnx2_disable_nvram_write(bp);
3537 /* Disable access to flash interface */
3538 bnx2_disable_nvram_access(bp);
3539 bnx2_release_nvram_lock(bp);
3541 /* Increment written */
3542 written += data_end - data_start;
3546 kfree(flash_buffer);
3552 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3557 /* Wait for the current PCI transaction to complete before
3558 * issuing a reset. */
3559 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3560 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3561 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3562 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3563 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3564 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3567 /* Wait for the firmware to tell us it is ok to issue a reset. */
3568 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3570 /* Deposit a driver reset signature so the firmware knows that
3571 * this is a soft reset. */
3572 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3573 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3575 /* Do a dummy read to force the chip to complete all current transaction
3576 * before we issue a reset. */
3577 val = REG_RD(bp, BNX2_MISC_ID);
3579 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3580 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3581 REG_RD(bp, BNX2_MISC_COMMAND);
3584 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3585 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3587 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3590 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3591 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3592 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3595 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3597 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3598 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3599 current->state = TASK_UNINTERRUPTIBLE;
3600 schedule_timeout(HZ / 50);
3603 /* Reset takes approximate 30 usec */
3604 for (i = 0; i < 10; i++) {
3605 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3606 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3607 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3612 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3613 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3614 printk(KERN_ERR PFX "Chip reset did not complete\n");
3619 /* Make sure byte swapping is properly configured. */
3620 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3621 if (val != 0x01020304) {
3622 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3626 /* Wait for the firmware to finish its initialization. */
3627 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3631 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3632 /* Adjust the voltage regular to two steps lower. The default
3633 * of this register is 0x0000000e. */
3634 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3636 /* Remove bad rbuf memory from the free pool. */
3637 rc = bnx2_alloc_bad_rbuf(bp);
3644 bnx2_init_chip(struct bnx2 *bp)
3649 /* Make sure the interrupt is not active. */
3650 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3652 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3653 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3655 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3657 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3658 DMA_READ_CHANS << 12 |
3659 DMA_WRITE_CHANS << 16;
3661 val |= (0x2 << 20) | (1 << 11);
3663 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3666 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3667 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3668 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3670 REG_WR(bp, BNX2_DMA_CONFIG, val);
3672 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3673 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3674 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3675 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3678 if (bp->flags & PCIX_FLAG) {
3681 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3683 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3684 val16 & ~PCI_X_CMD_ERO);
3687 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3688 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3689 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3690 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3692 /* Initialize context mapping and zero out the quick contexts. The
3693 * context block must have already been enabled. */
3694 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3695 bnx2_init_5709_context(bp);
3697 bnx2_init_context(bp);
3699 if ((rc = bnx2_init_cpus(bp)) != 0)
3702 bnx2_init_nvram(bp);
3704 bnx2_set_mac_addr(bp);
3706 val = REG_RD(bp, BNX2_MQ_CONFIG);
3707 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3708 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3709 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
3710 val |= BNX2_MQ_CONFIG_HALT_DIS;
3712 REG_WR(bp, BNX2_MQ_CONFIG, val);
3714 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3715 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3716 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3718 val = (BCM_PAGE_BITS - 8) << 24;
3719 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3721 /* Configure page size. */
3722 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3723 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3724 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3725 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3727 val = bp->mac_addr[0] +
3728 (bp->mac_addr[1] << 8) +
3729 (bp->mac_addr[2] << 16) +
3731 (bp->mac_addr[4] << 8) +
3732 (bp->mac_addr[5] << 16);
3733 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3735 /* Program the MTU. Also include 4 bytes for CRC32. */
3736 val = bp->dev->mtu + ETH_HLEN + 4;
3737 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3738 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3739 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3741 bp->last_status_idx = 0;
3742 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3744 /* Set up how to generate a link change interrupt. */
3745 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3747 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3748 (u64) bp->status_blk_mapping & 0xffffffff);
3749 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3751 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3752 (u64) bp->stats_blk_mapping & 0xffffffff);
3753 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3754 (u64) bp->stats_blk_mapping >> 32);
3756 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3757 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3759 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3760 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3762 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3763 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3765 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3767 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3769 REG_WR(bp, BNX2_HC_COM_TICKS,
3770 (bp->com_ticks_int << 16) | bp->com_ticks);
3772 REG_WR(bp, BNX2_HC_CMD_TICKS,
3773 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3775 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3776 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3778 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3779 val = BNX2_HC_CONFIG_COLLECT_STATS;
3781 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
3782 BNX2_HC_CONFIG_COLLECT_STATS;
3785 if (bp->flags & ONE_SHOT_MSI_FLAG)
3786 val |= BNX2_HC_CONFIG_ONE_SHOT;
3788 REG_WR(bp, BNX2_HC_CONFIG, val);
3790 /* Clear internal stats counters. */
3791 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3793 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
3795 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3796 BNX2_PORT_FEATURE_ASF_ENABLED)
3797 bp->flags |= ASF_ENABLE_FLAG;
3799 /* Initialize the receive filter. */
3800 bnx2_set_rx_mode(bp->dev);
3802 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3805 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3806 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3810 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3816 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3818 u32 val, offset0, offset1, offset2, offset3;
3820 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3821 offset0 = BNX2_L2CTX_TYPE_XI;
3822 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3823 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3824 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3826 offset0 = BNX2_L2CTX_TYPE;
3827 offset1 = BNX2_L2CTX_CMD_TYPE;
3828 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3829 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3831 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3832 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3834 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3835 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3837 val = (u64) bp->tx_desc_mapping >> 32;
3838 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3840 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3841 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3845 bnx2_init_tx_ring(struct bnx2 *bp)
3850 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3852 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3854 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3855 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3860 bp->tx_prod_bseq = 0;
3863 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3864 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
3866 bnx2_init_tx_context(bp, cid);
3870 bnx2_init_rx_ring(struct bnx2 *bp)
3874 u16 prod, ring_prod;
3877 /* 8 for CRC and VLAN */
3878 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3880 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
3882 ring_prod = prod = bp->rx_prod = 0;
3885 bp->rx_prod_bseq = 0;
3887 for (i = 0; i < bp->rx_max_ring; i++) {
3890 rxbd = &bp->rx_desc_ring[i][0];
3891 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3892 rxbd->rx_bd_len = bp->rx_buf_use_size;
3893 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3895 if (i == (bp->rx_max_ring - 1))
3899 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3900 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3904 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3905 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3907 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3909 val = (u64) bp->rx_desc_mapping[0] >> 32;
3910 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3912 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3913 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3915 for (i = 0; i < bp->rx_ring_size; i++) {
3916 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3919 prod = NEXT_RX_BD(prod);
3920 ring_prod = RX_RING_IDX(prod);
3924 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3926 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3930 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3934 bp->rx_ring_size = size;
3936 while (size > MAX_RX_DESC_CNT) {
3937 size -= MAX_RX_DESC_CNT;
3940 /* round to next power of 2 */
3942 while ((max & num_rings) == 0)
3945 if (num_rings != max)
3948 bp->rx_max_ring = max;
3949 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3953 bnx2_free_tx_skbs(struct bnx2 *bp)
3957 if (bp->tx_buf_ring == NULL)
3960 for (i = 0; i < TX_DESC_CNT; ) {
3961 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3962 struct sk_buff *skb = tx_buf->skb;
3970 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3971 skb_headlen(skb), PCI_DMA_TODEVICE);
3975 last = skb_shinfo(skb)->nr_frags;
3976 for (j = 0; j < last; j++) {
3977 tx_buf = &bp->tx_buf_ring[i + j + 1];
3978 pci_unmap_page(bp->pdev,
3979 pci_unmap_addr(tx_buf, mapping),
3980 skb_shinfo(skb)->frags[j].size,
3990 bnx2_free_rx_skbs(struct bnx2 *bp)
3994 if (bp->rx_buf_ring == NULL)
3997 for (i = 0; i < bp->rx_max_ring_idx; i++) {
3998 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3999 struct sk_buff *skb = rx_buf->skb;
4004 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4005 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4014 bnx2_free_skbs(struct bnx2 *bp)
4016 bnx2_free_tx_skbs(bp);
4017 bnx2_free_rx_skbs(bp);
4021 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4025 rc = bnx2_reset_chip(bp, reset_code);
4030 if ((rc = bnx2_init_chip(bp)) != 0)
4033 bnx2_init_tx_ring(bp);
4034 bnx2_init_rx_ring(bp);
4039 bnx2_init_nic(struct bnx2 *bp)
4043 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4046 spin_lock_bh(&bp->phy_lock);
4048 spin_unlock_bh(&bp->phy_lock);
4054 bnx2_test_registers(struct bnx2 *bp)
4058 static const struct {
4061 #define BNX2_FL_NOT_5709 1
4065 { 0x006c, 0, 0x00000000, 0x0000003f },
4066 { 0x0090, 0, 0xffffffff, 0x00000000 },
4067 { 0x0094, 0, 0x00000000, 0x00000000 },
4069 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4070 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4071 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4072 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4073 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4074 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4075 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4076 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4077 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4079 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4080 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4081 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4082 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4083 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4084 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4086 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4087 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4088 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
4090 { 0x1000, 0, 0x00000000, 0x00000001 },
4091 { 0x1004, 0, 0x00000000, 0x000f0001 },
4093 { 0x1408, 0, 0x01c00800, 0x00000000 },
4094 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4095 { 0x14a8, 0, 0x00000000, 0x000001ff },
4096 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4097 { 0x14b0, 0, 0x00000002, 0x00000001 },
4098 { 0x14b8, 0, 0x00000000, 0x00000000 },
4099 { 0x14c0, 0, 0x00000000, 0x00000009 },
4100 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4101 { 0x14cc, 0, 0x00000000, 0x00000001 },
4102 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4104 { 0x1800, 0, 0x00000000, 0x00000001 },
4105 { 0x1804, 0, 0x00000000, 0x00000003 },
4107 { 0x2800, 0, 0x00000000, 0x00000001 },
4108 { 0x2804, 0, 0x00000000, 0x00003f01 },
4109 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4110 { 0x2810, 0, 0xffff0000, 0x00000000 },
4111 { 0x2814, 0, 0xffff0000, 0x00000000 },
4112 { 0x2818, 0, 0xffff0000, 0x00000000 },
4113 { 0x281c, 0, 0xffff0000, 0x00000000 },
4114 { 0x2834, 0, 0xffffffff, 0x00000000 },
4115 { 0x2840, 0, 0x00000000, 0xffffffff },
4116 { 0x2844, 0, 0x00000000, 0xffffffff },
4117 { 0x2848, 0, 0xffffffff, 0x00000000 },
4118 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4120 { 0x2c00, 0, 0x00000000, 0x00000011 },
4121 { 0x2c04, 0, 0x00000000, 0x00030007 },
4123 { 0x3c00, 0, 0x00000000, 0x00000001 },
4124 { 0x3c04, 0, 0x00000000, 0x00070000 },
4125 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4126 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4127 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4128 { 0x3c14, 0, 0x00000000, 0xffffffff },
4129 { 0x3c18, 0, 0x00000000, 0xffffffff },
4130 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4131 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4133 { 0x5004, 0, 0x00000000, 0x0000007f },
4134 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4136 { 0x5c00, 0, 0x00000000, 0x00000001 },
4137 { 0x5c04, 0, 0x00000000, 0x0003000f },
4138 { 0x5c08, 0, 0x00000003, 0x00000000 },
4139 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4140 { 0x5c10, 0, 0x00000000, 0xffffffff },
4141 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4142 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4143 { 0x5c88, 0, 0x00000000, 0x00077373 },
4144 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4146 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4147 { 0x680c, 0, 0xffffffff, 0x00000000 },
4148 { 0x6810, 0, 0xffffffff, 0x00000000 },
4149 { 0x6814, 0, 0xffffffff, 0x00000000 },
4150 { 0x6818, 0, 0xffffffff, 0x00000000 },
4151 { 0x681c, 0, 0xffffffff, 0x00000000 },
4152 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4153 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4154 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4155 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4156 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4157 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4158 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4159 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4160 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4161 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4162 { 0x684c, 0, 0xffffffff, 0x00000000 },
4163 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4164 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4165 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4166 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4167 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4168 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4170 { 0xffff, 0, 0x00000000, 0x00000000 },
4175 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4178 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4179 u32 offset, rw_mask, ro_mask, save_val, val;
4180 u16 flags = reg_tbl[i].flags;
4182 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4185 offset = (u32) reg_tbl[i].offset;
4186 rw_mask = reg_tbl[i].rw_mask;
4187 ro_mask = reg_tbl[i].ro_mask;
4189 save_val = readl(bp->regview + offset);
4191 writel(0, bp->regview + offset);
4193 val = readl(bp->regview + offset);
4194 if ((val & rw_mask) != 0) {
4198 if ((val & ro_mask) != (save_val & ro_mask)) {
4202 writel(0xffffffff, bp->regview + offset);
4204 val = readl(bp->regview + offset);
4205 if ((val & rw_mask) != rw_mask) {
4209 if ((val & ro_mask) != (save_val & ro_mask)) {
4213 writel(save_val, bp->regview + offset);
4217 writel(save_val, bp->regview + offset);
4225 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4227 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4228 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4231 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4234 for (offset = 0; offset < size; offset += 4) {
4236 REG_WR_IND(bp, start + offset, test_pattern[i]);
4238 if (REG_RD_IND(bp, start + offset) !=
4248 bnx2_test_memory(struct bnx2 *bp)
4252 static struct mem_entry {
4255 } mem_tbl_5706[] = {
4256 { 0x60000, 0x4000 },
4257 { 0xa0000, 0x3000 },
4258 { 0xe0000, 0x4000 },
4259 { 0x120000, 0x4000 },
4260 { 0x1a0000, 0x4000 },
4261 { 0x160000, 0x4000 },
4265 { 0x60000, 0x4000 },
4266 { 0xa0000, 0x3000 },
4267 { 0xe0000, 0x4000 },
4268 { 0x120000, 0x4000 },
4269 { 0x1a0000, 0x4000 },
4272 struct mem_entry *mem_tbl;
4274 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4275 mem_tbl = mem_tbl_5709;
4277 mem_tbl = mem_tbl_5706;
4279 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4280 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4281 mem_tbl[i].len)) != 0) {
4289 #define BNX2_MAC_LOOPBACK 0
4290 #define BNX2_PHY_LOOPBACK 1
4293 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4295 unsigned int pkt_size, num_pkts, i;
4296 struct sk_buff *skb, *rx_skb;
4297 unsigned char *packet;
4298 u16 rx_start_idx, rx_idx;
4301 struct sw_bd *rx_buf;
4302 struct l2_fhdr *rx_hdr;
4305 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4306 bp->loopback = MAC_LOOPBACK;
4307 bnx2_set_mac_loopback(bp);
4309 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4310 bp->loopback = PHY_LOOPBACK;
4311 bnx2_set_phy_loopback(bp);
4317 skb = netdev_alloc_skb(bp->dev, pkt_size);
4320 packet = skb_put(skb, pkt_size);
4321 memcpy(packet, bp->dev->dev_addr, 6);
4322 memset(packet + 6, 0x0, 8);
4323 for (i = 14; i < pkt_size; i++)
4324 packet[i] = (unsigned char) (i & 0xff);
4326 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4329 REG_WR(bp, BNX2_HC_COMMAND,
4330 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4332 REG_RD(bp, BNX2_HC_COMMAND);
4335 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4339 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4341 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4342 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4343 txbd->tx_bd_mss_nbytes = pkt_size;
4344 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4347 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4348 bp->tx_prod_bseq += pkt_size;
4350 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4351 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4355 REG_WR(bp, BNX2_HC_COMMAND,
4356 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4358 REG_RD(bp, BNX2_HC_COMMAND);
4362 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4365 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4366 goto loopback_test_done;
4369 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4370 if (rx_idx != rx_start_idx + num_pkts) {
4371 goto loopback_test_done;
4374 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4375 rx_skb = rx_buf->skb;
4377 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4378 skb_reserve(rx_skb, bp->rx_offset);
4380 pci_dma_sync_single_for_cpu(bp->pdev,
4381 pci_unmap_addr(rx_buf, mapping),
4382 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4384 if (rx_hdr->l2_fhdr_status &
4385 (L2_FHDR_ERRORS_BAD_CRC |
4386 L2_FHDR_ERRORS_PHY_DECODE |
4387 L2_FHDR_ERRORS_ALIGNMENT |
4388 L2_FHDR_ERRORS_TOO_SHORT |
4389 L2_FHDR_ERRORS_GIANT_FRAME)) {
4391 goto loopback_test_done;
4394 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4395 goto loopback_test_done;
4398 for (i = 14; i < pkt_size; i++) {
4399 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4400 goto loopback_test_done;
4411 #define BNX2_MAC_LOOPBACK_FAILED 1
4412 #define BNX2_PHY_LOOPBACK_FAILED 2
4413 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4414 BNX2_PHY_LOOPBACK_FAILED)
4417 bnx2_test_loopback(struct bnx2 *bp)
4421 if (!netif_running(bp->dev))
4422 return BNX2_LOOPBACK_FAILED;
4424 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4425 spin_lock_bh(&bp->phy_lock);
4427 spin_unlock_bh(&bp->phy_lock);
4428 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4429 rc |= BNX2_MAC_LOOPBACK_FAILED;
4430 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4431 rc |= BNX2_PHY_LOOPBACK_FAILED;
4435 #define NVRAM_SIZE 0x200
4436 #define CRC32_RESIDUAL 0xdebb20e3
4439 bnx2_test_nvram(struct bnx2 *bp)
4441 u32 buf[NVRAM_SIZE / 4];
4442 u8 *data = (u8 *) buf;
4446 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4447 goto test_nvram_done;
4449 magic = be32_to_cpu(buf[0]);
4450 if (magic != 0x669955aa) {
4452 goto test_nvram_done;
4455 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4456 goto test_nvram_done;
4458 csum = ether_crc_le(0x100, data);
4459 if (csum != CRC32_RESIDUAL) {
4461 goto test_nvram_done;
4464 csum = ether_crc_le(0x100, data + 0x100);
4465 if (csum != CRC32_RESIDUAL) {
4474 bnx2_test_link(struct bnx2 *bp)
4478 spin_lock_bh(&bp->phy_lock);
4479 bnx2_enable_bmsr1(bp);
4480 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4481 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4482 bnx2_disable_bmsr1(bp);
4483 spin_unlock_bh(&bp->phy_lock);
4485 if (bmsr & BMSR_LSTATUS) {
4492 bnx2_test_intr(struct bnx2 *bp)
4497 if (!netif_running(bp->dev))
4500 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4502 /* This register is not touched during run-time. */
4503 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4504 REG_RD(bp, BNX2_HC_COMMAND);
4506 for (i = 0; i < 10; i++) {
4507 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4513 msleep_interruptible(10);
4522 bnx2_5706_serdes_timer(struct bnx2 *bp)
4524 spin_lock(&bp->phy_lock);
4525 if (bp->serdes_an_pending)
4526 bp->serdes_an_pending--;
4527 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4530 bp->current_interval = bp->timer_interval;
4532 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4534 if (bmcr & BMCR_ANENABLE) {
4537 bnx2_write_phy(bp, 0x1c, 0x7c00);
4538 bnx2_read_phy(bp, 0x1c, &phy1);
4540 bnx2_write_phy(bp, 0x17, 0x0f01);
4541 bnx2_read_phy(bp, 0x15, &phy2);
4542 bnx2_write_phy(bp, 0x17, 0x0f01);
4543 bnx2_read_phy(bp, 0x15, &phy2);
4545 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4546 !(phy2 & 0x20)) { /* no CONFIG */
4548 bmcr &= ~BMCR_ANENABLE;
4549 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4550 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4551 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4555 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4556 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4559 bnx2_write_phy(bp, 0x17, 0x0f01);
4560 bnx2_read_phy(bp, 0x15, &phy2);
4564 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4565 bmcr |= BMCR_ANENABLE;
4566 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4568 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4571 bp->current_interval = bp->timer_interval;
4573 spin_unlock(&bp->phy_lock);
4577 bnx2_5708_serdes_timer(struct bnx2 *bp)
4579 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4580 bp->serdes_an_pending = 0;
4584 spin_lock(&bp->phy_lock);
4585 if (bp->serdes_an_pending)
4586 bp->serdes_an_pending--;
4587 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4590 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4591 if (bmcr & BMCR_ANENABLE) {
4592 bnx2_enable_forced_2g5(bp);
4593 bp->current_interval = SERDES_FORCED_TIMEOUT;
4595 bnx2_disable_forced_2g5(bp);
4596 bp->serdes_an_pending = 2;
4597 bp->current_interval = bp->timer_interval;
4601 bp->current_interval = bp->timer_interval;
4603 spin_unlock(&bp->phy_lock);
4607 bnx2_timer(unsigned long data)
4609 struct bnx2 *bp = (struct bnx2 *) data;
4612 if (!netif_running(bp->dev))
4615 if (atomic_read(&bp->intr_sem) != 0)
4616 goto bnx2_restart_timer;
4618 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4619 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4621 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4623 if (bp->phy_flags & PHY_SERDES_FLAG) {
4624 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4625 bnx2_5706_serdes_timer(bp);
4627 bnx2_5708_serdes_timer(bp);
4631 mod_timer(&bp->timer, jiffies + bp->current_interval);
4635 bnx2_request_irq(struct bnx2 *bp)
4637 struct net_device *dev = bp->dev;
4640 if (bp->flags & USING_MSI_FLAG) {
4641 irq_handler_t fn = bnx2_msi;
4643 if (bp->flags & ONE_SHOT_MSI_FLAG)
4644 fn = bnx2_msi_1shot;
4646 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
4648 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4649 IRQF_SHARED, dev->name, dev);
4654 bnx2_free_irq(struct bnx2 *bp)
4656 struct net_device *dev = bp->dev;
4658 if (bp->flags & USING_MSI_FLAG) {
4659 free_irq(bp->pdev->irq, dev);
4660 pci_disable_msi(bp->pdev);
4661 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
4663 free_irq(bp->pdev->irq, dev);
4666 /* Called with rtnl_lock */
4668 bnx2_open(struct net_device *dev)
4670 struct bnx2 *bp = netdev_priv(dev);
4673 netif_carrier_off(dev);
4675 bnx2_set_power_state(bp, PCI_D0);
4676 bnx2_disable_int(bp);
4678 rc = bnx2_alloc_mem(bp);
4682 if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
4683 if (pci_enable_msi(bp->pdev) == 0) {
4684 bp->flags |= USING_MSI_FLAG;
4685 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4686 bp->flags |= ONE_SHOT_MSI_FLAG;
4689 rc = bnx2_request_irq(bp);
4696 rc = bnx2_init_nic(bp);
4705 mod_timer(&bp->timer, jiffies + bp->current_interval);
4707 atomic_set(&bp->intr_sem, 0);
4709 bnx2_enable_int(bp);
4711 if (bp->flags & USING_MSI_FLAG) {
4712 /* Test MSI to make sure it is working
4713 * If MSI test fails, go back to INTx mode
4715 if (bnx2_test_intr(bp) != 0) {
4716 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4717 " using MSI, switching to INTx mode. Please"
4718 " report this failure to the PCI maintainer"
4719 " and include system chipset information.\n",
4722 bnx2_disable_int(bp);
4725 rc = bnx2_init_nic(bp);
4728 rc = bnx2_request_irq(bp);
4733 del_timer_sync(&bp->timer);
4736 bnx2_enable_int(bp);
4739 if (bp->flags & USING_MSI_FLAG) {
4740 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4743 netif_start_queue(dev);
4749 bnx2_reset_task(struct work_struct *work)
4751 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
4753 if (!netif_running(bp->dev))
4756 bp->in_reset_task = 1;
4757 bnx2_netif_stop(bp);
4761 atomic_set(&bp->intr_sem, 1);
4762 bnx2_netif_start(bp);
4763 bp->in_reset_task = 0;
4767 bnx2_tx_timeout(struct net_device *dev)
4769 struct bnx2 *bp = netdev_priv(dev);
4771 /* This allows the netif to be shutdown gracefully before resetting */
4772 schedule_work(&bp->reset_task);
4776 /* Called with rtnl_lock */
4778 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4780 struct bnx2 *bp = netdev_priv(dev);
4782 bnx2_netif_stop(bp);
4785 bnx2_set_rx_mode(dev);
4787 bnx2_netif_start(bp);
4791 /* Called with netif_tx_lock.
4792 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4793 * netif_wake_queue().
4796 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4798 struct bnx2 *bp = netdev_priv(dev);
4801 struct sw_bd *tx_buf;
4802 u32 len, vlan_tag_flags, last_frag, mss;
4803 u16 prod, ring_prod;
4806 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4807 netif_stop_queue(dev);
4808 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4811 return NETDEV_TX_BUSY;
4813 len = skb_headlen(skb);
4815 ring_prod = TX_RING_IDX(prod);
4818 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4819 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4822 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4824 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4826 if ((mss = skb_shinfo(skb)->gso_size)) {
4827 u32 tcp_opt_len, ip_tcp_len;
4830 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4832 tcp_opt_len = tcp_optlen(skb);
4834 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
4835 u32 tcp_off = skb_transport_offset(skb) -
4836 sizeof(struct ipv6hdr) - ETH_HLEN;
4838 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
4839 TX_BD_FLAGS_SW_FLAGS;
4840 if (likely(tcp_off == 0))
4841 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
4844 vlan_tag_flags |= ((tcp_off & 0x3) <<
4845 TX_BD_FLAGS_TCP6_OFF0_SHL) |
4846 ((tcp_off & 0x10) <<
4847 TX_BD_FLAGS_TCP6_OFF4_SHL);
4848 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
4851 if (skb_header_cloned(skb) &&
4852 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4854 return NETDEV_TX_OK;
4857 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4861 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4862 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4866 if (tcp_opt_len || (iph->ihl > 5)) {
4867 vlan_tag_flags |= ((iph->ihl - 5) +
4868 (tcp_opt_len >> 2)) << 8;
4874 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4876 tx_buf = &bp->tx_buf_ring[ring_prod];
4878 pci_unmap_addr_set(tx_buf, mapping, mapping);
4880 txbd = &bp->tx_desc_ring[ring_prod];
4882 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4883 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4884 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4885 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4887 last_frag = skb_shinfo(skb)->nr_frags;
4889 for (i = 0; i < last_frag; i++) {
4890 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4892 prod = NEXT_TX_BD(prod);
4893 ring_prod = TX_RING_IDX(prod);
4894 txbd = &bp->tx_desc_ring[ring_prod];
4897 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4898 len, PCI_DMA_TODEVICE);
4899 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4902 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4903 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4904 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4905 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4908 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4910 prod = NEXT_TX_BD(prod);
4911 bp->tx_prod_bseq += skb->len;
4913 REG_WR16(bp, bp->tx_bidx_addr, prod);
4914 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4919 dev->trans_start = jiffies;
4921 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4922 netif_stop_queue(dev);
4923 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
4924 netif_wake_queue(dev);
4927 return NETDEV_TX_OK;
4930 /* Called with rtnl_lock */
4932 bnx2_close(struct net_device *dev)
4934 struct bnx2 *bp = netdev_priv(dev);
4937 /* Calling flush_scheduled_work() may deadlock because
4938 * linkwatch_event() may be on the workqueue and it will try to get
4939 * the rtnl_lock which we are holding.
4941 while (bp->in_reset_task)
4944 bnx2_netif_stop(bp);
4945 del_timer_sync(&bp->timer);
4946 if (bp->flags & NO_WOL_FLAG)
4947 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
4949 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4951 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4952 bnx2_reset_chip(bp, reset_code);
4957 netif_carrier_off(bp->dev);
4958 bnx2_set_power_state(bp, PCI_D3hot);
4962 #define GET_NET_STATS64(ctr) \
4963 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4964 (unsigned long) (ctr##_lo)
4966 #define GET_NET_STATS32(ctr) \
4969 #if (BITS_PER_LONG == 64)
4970 #define GET_NET_STATS GET_NET_STATS64
4972 #define GET_NET_STATS GET_NET_STATS32
4975 static struct net_device_stats *
4976 bnx2_get_stats(struct net_device *dev)
4978 struct bnx2 *bp = netdev_priv(dev);
4979 struct statistics_block *stats_blk = bp->stats_blk;
4980 struct net_device_stats *net_stats = &bp->net_stats;
4982 if (bp->stats_blk == NULL) {
4985 net_stats->rx_packets =
4986 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4987 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4988 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4990 net_stats->tx_packets =
4991 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4992 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4993 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4995 net_stats->rx_bytes =
4996 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4998 net_stats->tx_bytes =
4999 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5001 net_stats->multicast =
5002 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5004 net_stats->collisions =
5005 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5007 net_stats->rx_length_errors =
5008 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5009 stats_blk->stat_EtherStatsOverrsizePkts);
5011 net_stats->rx_over_errors =
5012 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5014 net_stats->rx_frame_errors =
5015 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5017 net_stats->rx_crc_errors =
5018 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5020 net_stats->rx_errors = net_stats->rx_length_errors +
5021 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5022 net_stats->rx_crc_errors;
5024 net_stats->tx_aborted_errors =
5025 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5026 stats_blk->stat_Dot3StatsLateCollisions);
5028 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5029 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5030 net_stats->tx_carrier_errors = 0;
5032 net_stats->tx_carrier_errors =
5034 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5037 net_stats->tx_errors =
5039 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5041 net_stats->tx_aborted_errors +
5042 net_stats->tx_carrier_errors;
5044 net_stats->rx_missed_errors =
5045 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5046 stats_blk->stat_FwRxDrop);
5051 /* All ethtool functions called with rtnl_lock */
5054 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5056 struct bnx2 *bp = netdev_priv(dev);
5058 cmd->supported = SUPPORTED_Autoneg;
5059 if (bp->phy_flags & PHY_SERDES_FLAG) {
5060 cmd->supported |= SUPPORTED_1000baseT_Full |
5062 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5063 cmd->supported |= SUPPORTED_2500baseX_Full;
5065 cmd->port = PORT_FIBRE;
5068 cmd->supported |= SUPPORTED_10baseT_Half |
5069 SUPPORTED_10baseT_Full |
5070 SUPPORTED_100baseT_Half |
5071 SUPPORTED_100baseT_Full |
5072 SUPPORTED_1000baseT_Full |
5075 cmd->port = PORT_TP;
5078 cmd->advertising = bp->advertising;
5080 if (bp->autoneg & AUTONEG_SPEED) {
5081 cmd->autoneg = AUTONEG_ENABLE;
5084 cmd->autoneg = AUTONEG_DISABLE;
5087 if (netif_carrier_ok(dev)) {
5088 cmd->speed = bp->line_speed;
5089 cmd->duplex = bp->duplex;
5096 cmd->transceiver = XCVR_INTERNAL;
5097 cmd->phy_address = bp->phy_addr;
5103 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5105 struct bnx2 *bp = netdev_priv(dev);
5106 u8 autoneg = bp->autoneg;
5107 u8 req_duplex = bp->req_duplex;
5108 u16 req_line_speed = bp->req_line_speed;
5109 u32 advertising = bp->advertising;
5111 if (cmd->autoneg == AUTONEG_ENABLE) {
5112 autoneg |= AUTONEG_SPEED;
5114 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5116 /* allow advertising 1 speed */
5117 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5118 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5119 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5120 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5122 if (bp->phy_flags & PHY_SERDES_FLAG)
5125 advertising = cmd->advertising;
5127 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5128 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5130 } else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
5131 advertising = cmd->advertising;
5133 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
5137 if (bp->phy_flags & PHY_SERDES_FLAG) {
5138 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5141 advertising = ETHTOOL_ALL_COPPER_SPEED;
5144 advertising |= ADVERTISED_Autoneg;
5147 if (bp->phy_flags & PHY_SERDES_FLAG) {
5148 if ((cmd->speed != SPEED_1000 &&
5149 cmd->speed != SPEED_2500) ||
5150 (cmd->duplex != DUPLEX_FULL))
5153 if (cmd->speed == SPEED_2500 &&
5154 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5157 else if (cmd->speed == SPEED_1000) {
5160 autoneg &= ~AUTONEG_SPEED;
5161 req_line_speed = cmd->speed;
5162 req_duplex = cmd->duplex;
5166 bp->autoneg = autoneg;
5167 bp->advertising = advertising;
5168 bp->req_line_speed = req_line_speed;
5169 bp->req_duplex = req_duplex;
5171 spin_lock_bh(&bp->phy_lock);
5175 spin_unlock_bh(&bp->phy_lock);
5181 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5183 struct bnx2 *bp = netdev_priv(dev);
5185 strcpy(info->driver, DRV_MODULE_NAME);
5186 strcpy(info->version, DRV_MODULE_VERSION);
5187 strcpy(info->bus_info, pci_name(bp->pdev));
5188 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
5189 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
5190 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
5191 info->fw_version[1] = info->fw_version[3] = '.';
5192 info->fw_version[5] = 0;
5195 #define BNX2_REGDUMP_LEN (32 * 1024)
5198 bnx2_get_regs_len(struct net_device *dev)
5200 return BNX2_REGDUMP_LEN;
5204 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5206 u32 *p = _p, i, offset;
5208 struct bnx2 *bp = netdev_priv(dev);
5209 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5210 0x0800, 0x0880, 0x0c00, 0x0c10,
5211 0x0c30, 0x0d08, 0x1000, 0x101c,
5212 0x1040, 0x1048, 0x1080, 0x10a4,
5213 0x1400, 0x1490, 0x1498, 0x14f0,
5214 0x1500, 0x155c, 0x1580, 0x15dc,
5215 0x1600, 0x1658, 0x1680, 0x16d8,
5216 0x1800, 0x1820, 0x1840, 0x1854,
5217 0x1880, 0x1894, 0x1900, 0x1984,
5218 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5219 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5220 0x2000, 0x2030, 0x23c0, 0x2400,
5221 0x2800, 0x2820, 0x2830, 0x2850,
5222 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5223 0x3c00, 0x3c94, 0x4000, 0x4010,
5224 0x4080, 0x4090, 0x43c0, 0x4458,
5225 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5226 0x4fc0, 0x5010, 0x53c0, 0x5444,
5227 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5228 0x5fc0, 0x6000, 0x6400, 0x6428,
5229 0x6800, 0x6848, 0x684c, 0x6860,
5230 0x6888, 0x6910, 0x8000 };
5234 memset(p, 0, BNX2_REGDUMP_LEN);
5236 if (!netif_running(bp->dev))
5240 offset = reg_boundaries[0];
5242 while (offset < BNX2_REGDUMP_LEN) {
5243 *p++ = REG_RD(bp, offset);
5245 if (offset == reg_boundaries[i + 1]) {
5246 offset = reg_boundaries[i + 2];
5247 p = (u32 *) (orig_p + offset);
5254 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5256 struct bnx2 *bp = netdev_priv(dev);
5258 if (bp->flags & NO_WOL_FLAG) {
5263 wol->supported = WAKE_MAGIC;
5265 wol->wolopts = WAKE_MAGIC;
5269 memset(&wol->sopass, 0, sizeof(wol->sopass));
5273 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5275 struct bnx2 *bp = netdev_priv(dev);
5277 if (wol->wolopts & ~WAKE_MAGIC)
5280 if (wol->wolopts & WAKE_MAGIC) {
5281 if (bp->flags & NO_WOL_FLAG)
5293 bnx2_nway_reset(struct net_device *dev)
5295 struct bnx2 *bp = netdev_priv(dev);
5298 if (!(bp->autoneg & AUTONEG_SPEED)) {
5302 spin_lock_bh(&bp->phy_lock);
5304 /* Force a link down visible on the other side */
5305 if (bp->phy_flags & PHY_SERDES_FLAG) {
5306 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5307 spin_unlock_bh(&bp->phy_lock);
5311 spin_lock_bh(&bp->phy_lock);
5313 bp->current_interval = SERDES_AN_TIMEOUT;
5314 bp->serdes_an_pending = 1;
5315 mod_timer(&bp->timer, jiffies + bp->current_interval);
5318 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5319 bmcr &= ~BMCR_LOOPBACK;
5320 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5322 spin_unlock_bh(&bp->phy_lock);
5328 bnx2_get_eeprom_len(struct net_device *dev)
5330 struct bnx2 *bp = netdev_priv(dev);
5332 if (bp->flash_info == NULL)
5335 return (int) bp->flash_size;
5339 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5342 struct bnx2 *bp = netdev_priv(dev);
5345 /* parameters already validated in ethtool_get_eeprom */
5347 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5353 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5356 struct bnx2 *bp = netdev_priv(dev);
5359 /* parameters already validated in ethtool_set_eeprom */
5361 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5367 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5369 struct bnx2 *bp = netdev_priv(dev);
5371 memset(coal, 0, sizeof(struct ethtool_coalesce));
5373 coal->rx_coalesce_usecs = bp->rx_ticks;
5374 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5375 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5376 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5378 coal->tx_coalesce_usecs = bp->tx_ticks;
5379 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5380 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5381 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5383 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5389 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5391 struct bnx2 *bp = netdev_priv(dev);
5393 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5394 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5396 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5397 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5399 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5400 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5402 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5403 if (bp->rx_quick_cons_trip_int > 0xff)
5404 bp->rx_quick_cons_trip_int = 0xff;
5406 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5407 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5409 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5410 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5412 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5413 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5415 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5416 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5419 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5420 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5421 bp->stats_ticks &= 0xffff00;
5423 if (netif_running(bp->dev)) {
5424 bnx2_netif_stop(bp);
5426 bnx2_netif_start(bp);
5433 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5435 struct bnx2 *bp = netdev_priv(dev);
5437 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5438 ering->rx_mini_max_pending = 0;
5439 ering->rx_jumbo_max_pending = 0;
5441 ering->rx_pending = bp->rx_ring_size;
5442 ering->rx_mini_pending = 0;
5443 ering->rx_jumbo_pending = 0;
5445 ering->tx_max_pending = MAX_TX_DESC_CNT;
5446 ering->tx_pending = bp->tx_ring_size;
5450 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5452 struct bnx2 *bp = netdev_priv(dev);
5454 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5455 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5456 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5460 if (netif_running(bp->dev)) {
5461 bnx2_netif_stop(bp);
5462 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5467 bnx2_set_rx_ring_size(bp, ering->rx_pending);
5468 bp->tx_ring_size = ering->tx_pending;
5470 if (netif_running(bp->dev)) {
5473 rc = bnx2_alloc_mem(bp);
5477 bnx2_netif_start(bp);
5484 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5486 struct bnx2 *bp = netdev_priv(dev);
5488 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5489 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5490 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5494 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5496 struct bnx2 *bp = netdev_priv(dev);
5498 bp->req_flow_ctrl = 0;
5499 if (epause->rx_pause)
5500 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5501 if (epause->tx_pause)
5502 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5504 if (epause->autoneg) {
5505 bp->autoneg |= AUTONEG_FLOW_CTRL;
5508 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5511 spin_lock_bh(&bp->phy_lock);
5515 spin_unlock_bh(&bp->phy_lock);
5521 bnx2_get_rx_csum(struct net_device *dev)
5523 struct bnx2 *bp = netdev_priv(dev);
5529 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5531 struct bnx2 *bp = netdev_priv(dev);
5538 bnx2_set_tso(struct net_device *dev, u32 data)
5540 struct bnx2 *bp = netdev_priv(dev);
5543 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5544 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5545 dev->features |= NETIF_F_TSO6;
5547 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5552 #define BNX2_NUM_STATS 46
5555 char string[ETH_GSTRING_LEN];
5556 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5558 { "rx_error_bytes" },
5560 { "tx_error_bytes" },
5561 { "rx_ucast_packets" },
5562 { "rx_mcast_packets" },
5563 { "rx_bcast_packets" },
5564 { "tx_ucast_packets" },
5565 { "tx_mcast_packets" },
5566 { "tx_bcast_packets" },
5567 { "tx_mac_errors" },
5568 { "tx_carrier_errors" },
5569 { "rx_crc_errors" },
5570 { "rx_align_errors" },
5571 { "tx_single_collisions" },
5572 { "tx_multi_collisions" },
5574 { "tx_excess_collisions" },
5575 { "tx_late_collisions" },
5576 { "tx_total_collisions" },
5579 { "rx_undersize_packets" },
5580 { "rx_oversize_packets" },
5581 { "rx_64_byte_packets" },
5582 { "rx_65_to_127_byte_packets" },
5583 { "rx_128_to_255_byte_packets" },
5584 { "rx_256_to_511_byte_packets" },
5585 { "rx_512_to_1023_byte_packets" },
5586 { "rx_1024_to_1522_byte_packets" },
5587 { "rx_1523_to_9022_byte_packets" },
5588 { "tx_64_byte_packets" },
5589 { "tx_65_to_127_byte_packets" },
5590 { "tx_128_to_255_byte_packets" },
5591 { "tx_256_to_511_byte_packets" },
5592 { "tx_512_to_1023_byte_packets" },
5593 { "tx_1024_to_1522_byte_packets" },
5594 { "tx_1523_to_9022_byte_packets" },
5595 { "rx_xon_frames" },
5596 { "rx_xoff_frames" },
5597 { "tx_xon_frames" },
5598 { "tx_xoff_frames" },
5599 { "rx_mac_ctrl_frames" },
5600 { "rx_filtered_packets" },
5602 { "rx_fw_discards" },
5605 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5607 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5608 STATS_OFFSET32(stat_IfHCInOctets_hi),
5609 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5610 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5611 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5612 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5613 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5614 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5615 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5616 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5617 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5618 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5619 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5620 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5621 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5622 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5623 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5624 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5625 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5626 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5627 STATS_OFFSET32(stat_EtherStatsCollisions),
5628 STATS_OFFSET32(stat_EtherStatsFragments),
5629 STATS_OFFSET32(stat_EtherStatsJabbers),
5630 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5631 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5632 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5633 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5634 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5635 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5636 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5637 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5638 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5639 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5640 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5641 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5642 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5643 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5644 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5645 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5646 STATS_OFFSET32(stat_XonPauseFramesReceived),
5647 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5648 STATS_OFFSET32(stat_OutXonSent),
5649 STATS_OFFSET32(stat_OutXoffSent),
5650 STATS_OFFSET32(stat_MacControlFramesReceived),
5651 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5652 STATS_OFFSET32(stat_IfInMBUFDiscards),
5653 STATS_OFFSET32(stat_FwRxDrop),
5656 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5657 * skipped because of errata.
5659 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5660 8,0,8,8,8,8,8,8,8,8,
5661 4,0,4,4,4,4,4,4,4,4,
5662 4,4,4,4,4,4,4,4,4,4,
5663 4,4,4,4,4,4,4,4,4,4,
5667 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5668 8,0,8,8,8,8,8,8,8,8,
5669 4,4,4,4,4,4,4,4,4,4,
5670 4,4,4,4,4,4,4,4,4,4,
5671 4,4,4,4,4,4,4,4,4,4,
5675 #define BNX2_NUM_TESTS 6
5678 char string[ETH_GSTRING_LEN];
5679 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5680 { "register_test (offline)" },
5681 { "memory_test (offline)" },
5682 { "loopback_test (offline)" },
5683 { "nvram_test (online)" },
5684 { "interrupt_test (online)" },
5685 { "link_test (online)" },
5689 bnx2_self_test_count(struct net_device *dev)
5691 return BNX2_NUM_TESTS;
5695 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5697 struct bnx2 *bp = netdev_priv(dev);
5699 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5700 if (etest->flags & ETH_TEST_FL_OFFLINE) {
5703 bnx2_netif_stop(bp);
5704 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5707 if (bnx2_test_registers(bp) != 0) {
5709 etest->flags |= ETH_TEST_FL_FAILED;
5711 if (bnx2_test_memory(bp) != 0) {
5713 etest->flags |= ETH_TEST_FL_FAILED;
5715 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5716 etest->flags |= ETH_TEST_FL_FAILED;
5718 if (!netif_running(bp->dev)) {
5719 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5723 bnx2_netif_start(bp);
5726 /* wait for link up */
5727 for (i = 0; i < 7; i++) {
5730 msleep_interruptible(1000);
5734 if (bnx2_test_nvram(bp) != 0) {
5736 etest->flags |= ETH_TEST_FL_FAILED;
5738 if (bnx2_test_intr(bp) != 0) {
5740 etest->flags |= ETH_TEST_FL_FAILED;
5743 if (bnx2_test_link(bp) != 0) {
5745 etest->flags |= ETH_TEST_FL_FAILED;
5751 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5753 switch (stringset) {
5755 memcpy(buf, bnx2_stats_str_arr,
5756 sizeof(bnx2_stats_str_arr));
5759 memcpy(buf, bnx2_tests_str_arr,
5760 sizeof(bnx2_tests_str_arr));
5766 bnx2_get_stats_count(struct net_device *dev)
5768 return BNX2_NUM_STATS;
5772 bnx2_get_ethtool_stats(struct net_device *dev,
5773 struct ethtool_stats *stats, u64 *buf)
5775 struct bnx2 *bp = netdev_priv(dev);
5777 u32 *hw_stats = (u32 *) bp->stats_blk;
5778 u8 *stats_len_arr = NULL;
5780 if (hw_stats == NULL) {
5781 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5785 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5786 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5787 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5788 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5789 stats_len_arr = bnx2_5706_stats_len_arr;
5791 stats_len_arr = bnx2_5708_stats_len_arr;
5793 for (i = 0; i < BNX2_NUM_STATS; i++) {
5794 if (stats_len_arr[i] == 0) {
5795 /* skip this counter */
5799 if (stats_len_arr[i] == 4) {
5800 /* 4-byte counter */
5802 *(hw_stats + bnx2_stats_offset_arr[i]);
5805 /* 8-byte counter */
5806 buf[i] = (((u64) *(hw_stats +
5807 bnx2_stats_offset_arr[i])) << 32) +
5808 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5813 bnx2_phys_id(struct net_device *dev, u32 data)
5815 struct bnx2 *bp = netdev_priv(dev);
5822 save = REG_RD(bp, BNX2_MISC_CFG);
5823 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5825 for (i = 0; i < (data * 2); i++) {
5827 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5830 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5831 BNX2_EMAC_LED_1000MB_OVERRIDE |
5832 BNX2_EMAC_LED_100MB_OVERRIDE |
5833 BNX2_EMAC_LED_10MB_OVERRIDE |
5834 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5835 BNX2_EMAC_LED_TRAFFIC);
5837 msleep_interruptible(500);
5838 if (signal_pending(current))
5841 REG_WR(bp, BNX2_EMAC_LED, 0);
5842 REG_WR(bp, BNX2_MISC_CFG, save);
5847 bnx2_set_tx_csum(struct net_device *dev, u32 data)
5849 struct bnx2 *bp = netdev_priv(dev);
5851 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5852 return (ethtool_op_set_tx_hw_csum(dev, data));
5854 return (ethtool_op_set_tx_csum(dev, data));
5857 static const struct ethtool_ops bnx2_ethtool_ops = {
5858 .get_settings = bnx2_get_settings,
5859 .set_settings = bnx2_set_settings,
5860 .get_drvinfo = bnx2_get_drvinfo,
5861 .get_regs_len = bnx2_get_regs_len,
5862 .get_regs = bnx2_get_regs,
5863 .get_wol = bnx2_get_wol,
5864 .set_wol = bnx2_set_wol,
5865 .nway_reset = bnx2_nway_reset,
5866 .get_link = ethtool_op_get_link,
5867 .get_eeprom_len = bnx2_get_eeprom_len,
5868 .get_eeprom = bnx2_get_eeprom,
5869 .set_eeprom = bnx2_set_eeprom,
5870 .get_coalesce = bnx2_get_coalesce,
5871 .set_coalesce = bnx2_set_coalesce,
5872 .get_ringparam = bnx2_get_ringparam,
5873 .set_ringparam = bnx2_set_ringparam,
5874 .get_pauseparam = bnx2_get_pauseparam,
5875 .set_pauseparam = bnx2_set_pauseparam,
5876 .get_rx_csum = bnx2_get_rx_csum,
5877 .set_rx_csum = bnx2_set_rx_csum,
5878 .get_tx_csum = ethtool_op_get_tx_csum,
5879 .set_tx_csum = bnx2_set_tx_csum,
5880 .get_sg = ethtool_op_get_sg,
5881 .set_sg = ethtool_op_set_sg,
5882 .get_tso = ethtool_op_get_tso,
5883 .set_tso = bnx2_set_tso,
5884 .self_test_count = bnx2_self_test_count,
5885 .self_test = bnx2_self_test,
5886 .get_strings = bnx2_get_strings,
5887 .phys_id = bnx2_phys_id,
5888 .get_stats_count = bnx2_get_stats_count,
5889 .get_ethtool_stats = bnx2_get_ethtool_stats,
5890 .get_perm_addr = ethtool_op_get_perm_addr,
5893 /* Called with rtnl_lock */
5895 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5897 struct mii_ioctl_data *data = if_mii(ifr);
5898 struct bnx2 *bp = netdev_priv(dev);
5903 data->phy_id = bp->phy_addr;
5909 if (!netif_running(dev))
5912 spin_lock_bh(&bp->phy_lock);
5913 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5914 spin_unlock_bh(&bp->phy_lock);
5916 data->val_out = mii_regval;
5922 if (!capable(CAP_NET_ADMIN))
5925 if (!netif_running(dev))
5928 spin_lock_bh(&bp->phy_lock);
5929 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5930 spin_unlock_bh(&bp->phy_lock);
5941 /* Called with rtnl_lock */
5943 bnx2_change_mac_addr(struct net_device *dev, void *p)
5945 struct sockaddr *addr = p;
5946 struct bnx2 *bp = netdev_priv(dev);
5948 if (!is_valid_ether_addr(addr->sa_data))
5951 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5952 if (netif_running(dev))
5953 bnx2_set_mac_addr(bp);
5958 /* Called with rtnl_lock */
5960 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5962 struct bnx2 *bp = netdev_priv(dev);
5964 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5965 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5969 if (netif_running(dev)) {
5970 bnx2_netif_stop(bp);
5974 bnx2_netif_start(bp);
5979 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5981 poll_bnx2(struct net_device *dev)
5983 struct bnx2 *bp = netdev_priv(dev);
5985 disable_irq(bp->pdev->irq);
5986 bnx2_interrupt(bp->pdev->irq, dev);
5987 enable_irq(bp->pdev->irq);
5991 static void __devinit
5992 bnx2_get_5709_media(struct bnx2 *bp)
5994 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
5995 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
5998 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6000 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6001 bp->phy_flags |= PHY_SERDES_FLAG;
6005 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6006 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6008 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6010 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6015 bp->phy_flags |= PHY_SERDES_FLAG;
6023 bp->phy_flags |= PHY_SERDES_FLAG;
6029 static void __devinit
6030 bnx2_get_pci_speed(struct bnx2 *bp)
6034 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6035 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6038 bp->flags |= PCIX_FLAG;
6040 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6042 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6044 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6045 bp->bus_speed_mhz = 133;
6048 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6049 bp->bus_speed_mhz = 100;
6052 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6053 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6054 bp->bus_speed_mhz = 66;
6057 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6058 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6059 bp->bus_speed_mhz = 50;
6062 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6063 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6064 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6065 bp->bus_speed_mhz = 33;
6070 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6071 bp->bus_speed_mhz = 66;
6073 bp->bus_speed_mhz = 33;
6076 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6077 bp->flags |= PCI_32BIT_FLAG;
6081 static int __devinit
6082 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6085 unsigned long mem_len;
6088 u64 dma_mask, persist_dma_mask;
6090 SET_MODULE_OWNER(dev);
6091 SET_NETDEV_DEV(dev, &pdev->dev);
6092 bp = netdev_priv(dev);
6097 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6098 rc = pci_enable_device(pdev);
6100 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
6104 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6106 "Cannot find PCI device base address, aborting.\n");
6108 goto err_out_disable;
6111 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6113 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
6114 goto err_out_disable;
6117 pci_set_master(pdev);
6119 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6120 if (bp->pm_cap == 0) {
6122 "Cannot find power management capability, aborting.\n");
6124 goto err_out_release;
6130 spin_lock_init(&bp->phy_lock);
6131 spin_lock_init(&bp->indirect_lock);
6132 INIT_WORK(&bp->reset_task, bnx2_reset_task);
6134 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
6135 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
6136 dev->mem_end = dev->mem_start + mem_len;
6137 dev->irq = pdev->irq;
6139 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6142 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
6144 goto err_out_release;
6147 /* Configure byte swap and enable write to the reg_window registers.
6148 * Rely on CPU to do target byte swapping on big endian systems
6149 * The chip's target access swapping will not swap all accesses
6151 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6152 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6153 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6155 bnx2_set_power_state(bp, PCI_D0);
6157 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6159 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6160 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6162 "Cannot find PCIE capability, aborting.\n");
6166 bp->flags |= PCIE_FLAG;
6168 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6169 if (bp->pcix_cap == 0) {
6171 "Cannot find PCIX capability, aborting.\n");
6177 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6178 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6179 bp->flags |= MSI_CAP_FLAG;
6182 /* 5708 cannot support DMA addresses > 40-bit. */
6183 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6184 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6186 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6188 /* Configure DMA attributes. */
6189 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6190 dev->features |= NETIF_F_HIGHDMA;
6191 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6194 "pci_set_consistent_dma_mask failed, aborting.\n");
6197 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6198 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6202 if (!(bp->flags & PCIE_FLAG))
6203 bnx2_get_pci_speed(bp);
6205 /* 5706A0 may falsely detect SERR and PERR. */
6206 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6207 reg = REG_RD(bp, PCI_COMMAND);
6208 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6209 REG_WR(bp, PCI_COMMAND, reg);
6211 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6212 !(bp->flags & PCIX_FLAG)) {
6215 "5706 A1 can only be used in a PCIX bus, aborting.\n");
6219 bnx2_init_nvram(bp);
6221 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6223 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
6224 BNX2_SHM_HDR_SIGNATURE_SIG) {
6225 u32 off = PCI_FUNC(pdev->devfn) << 2;
6227 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6229 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6231 /* Get the permanent MAC address. First we need to make sure the
6232 * firmware is actually running.
6234 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
6236 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6237 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
6238 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
6243 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6245 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
6246 bp->mac_addr[0] = (u8) (reg >> 8);
6247 bp->mac_addr[1] = (u8) reg;
6249 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
6250 bp->mac_addr[2] = (u8) (reg >> 24);
6251 bp->mac_addr[3] = (u8) (reg >> 16);
6252 bp->mac_addr[4] = (u8) (reg >> 8);
6253 bp->mac_addr[5] = (u8) reg;
6255 bp->tx_ring_size = MAX_TX_DESC_CNT;
6256 bnx2_set_rx_ring_size(bp, 255);
6260 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6262 bp->tx_quick_cons_trip_int = 20;
6263 bp->tx_quick_cons_trip = 20;
6264 bp->tx_ticks_int = 80;
6267 bp->rx_quick_cons_trip_int = 6;
6268 bp->rx_quick_cons_trip = 6;
6269 bp->rx_ticks_int = 18;
6272 bp->stats_ticks = 1000000 & 0xffff00;
6274 bp->timer_interval = HZ;
6275 bp->current_interval = HZ;
6279 /* Disable WOL support if we are running on a SERDES chip. */
6280 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6281 bnx2_get_5709_media(bp);
6282 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
6283 bp->phy_flags |= PHY_SERDES_FLAG;
6285 if (bp->phy_flags & PHY_SERDES_FLAG) {
6286 bp->flags |= NO_WOL_FLAG;
6287 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
6289 reg = REG_RD_IND(bp, bp->shmem_base +
6290 BNX2_SHARED_HW_CFG_CONFIG);
6291 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6292 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6294 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6295 CHIP_NUM(bp) == CHIP_NUM_5708)
6296 bp->phy_flags |= PHY_CRC_FIX_FLAG;
6297 else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
6298 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
6300 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6301 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6302 (CHIP_ID(bp) == CHIP_ID_5708_B1))
6303 bp->flags |= NO_WOL_FLAG;
6305 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6306 bp->tx_quick_cons_trip_int =
6307 bp->tx_quick_cons_trip;
6308 bp->tx_ticks_int = bp->tx_ticks;
6309 bp->rx_quick_cons_trip_int =
6310 bp->rx_quick_cons_trip;
6311 bp->rx_ticks_int = bp->rx_ticks;
6312 bp->comp_prod_trip_int = bp->comp_prod_trip;
6313 bp->com_ticks_int = bp->com_ticks;
6314 bp->cmd_ticks_int = bp->cmd_ticks;
6317 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6319 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6320 * with byte enables disabled on the unused 32-bit word. This is legal
6321 * but causes problems on the AMD 8132 which will eventually stop
6322 * responding after a while.
6324 * AMD believes this incompatibility is unique to the 5706, and
6325 * prefers to locally disable MSI rather than globally disabling it.
6327 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6328 struct pci_dev *amd_8132 = NULL;
6330 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6331 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6335 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
6336 if (rev >= 0x10 && rev <= 0x13) {
6338 pci_dev_put(amd_8132);
6344 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
6345 bp->req_line_speed = 0;
6346 if (bp->phy_flags & PHY_SERDES_FLAG) {
6347 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
6349 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
6350 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
6351 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
6353 bp->req_line_speed = bp->line_speed = SPEED_1000;
6354 bp->req_duplex = DUPLEX_FULL;
6358 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
6361 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6363 init_timer(&bp->timer);
6364 bp->timer.expires = RUN_AT(bp->timer_interval);
6365 bp->timer.data = (unsigned long) bp;
6366 bp->timer.function = bnx2_timer;
6372 iounmap(bp->regview);
6377 pci_release_regions(pdev);
6380 pci_disable_device(pdev);
6381 pci_set_drvdata(pdev, NULL);
6387 static char * __devinit
6388 bnx2_bus_string(struct bnx2 *bp, char *str)
6392 if (bp->flags & PCIE_FLAG) {
6393 s += sprintf(s, "PCI Express");
6395 s += sprintf(s, "PCI");
6396 if (bp->flags & PCIX_FLAG)
6397 s += sprintf(s, "-X");
6398 if (bp->flags & PCI_32BIT_FLAG)
6399 s += sprintf(s, " 32-bit");
6401 s += sprintf(s, " 64-bit");
6402 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
6407 static int __devinit
6408 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6410 static int version_printed = 0;
6411 struct net_device *dev = NULL;
6416 if (version_printed++ == 0)
6417 printk(KERN_INFO "%s", version);
6419 /* dev zeroed in init_etherdev */
6420 dev = alloc_etherdev(sizeof(*bp));
6425 rc = bnx2_init_board(pdev, dev);
6431 dev->open = bnx2_open;
6432 dev->hard_start_xmit = bnx2_start_xmit;
6433 dev->stop = bnx2_close;
6434 dev->get_stats = bnx2_get_stats;
6435 dev->set_multicast_list = bnx2_set_rx_mode;
6436 dev->do_ioctl = bnx2_ioctl;
6437 dev->set_mac_address = bnx2_change_mac_addr;
6438 dev->change_mtu = bnx2_change_mtu;
6439 dev->tx_timeout = bnx2_tx_timeout;
6440 dev->watchdog_timeo = TX_TIMEOUT;
6442 dev->vlan_rx_register = bnx2_vlan_rx_register;
6444 dev->poll = bnx2_poll;
6445 dev->ethtool_ops = &bnx2_ethtool_ops;
6448 bp = netdev_priv(dev);
6450 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6451 dev->poll_controller = poll_bnx2;
6454 pci_set_drvdata(pdev, dev);
6456 memcpy(dev->dev_addr, bp->mac_addr, 6);
6457 memcpy(dev->perm_addr, bp->mac_addr, 6);
6458 bp->name = board_info[ent->driver_data].name;
6460 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6461 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
6463 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
6465 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6467 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6468 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6469 dev->features |= NETIF_F_TSO6;
6471 if ((rc = register_netdev(dev))) {
6472 dev_err(&pdev->dev, "Cannot register net device\n");
6474 iounmap(bp->regview);
6475 pci_release_regions(pdev);
6476 pci_disable_device(pdev);
6477 pci_set_drvdata(pdev, NULL);
6482 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
6486 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6487 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6488 bnx2_bus_string(bp, str),
6492 printk("node addr ");
6493 for (i = 0; i < 6; i++)
6494 printk("%2.2x", dev->dev_addr[i]);
6500 static void __devexit
6501 bnx2_remove_one(struct pci_dev *pdev)
6503 struct net_device *dev = pci_get_drvdata(pdev);
6504 struct bnx2 *bp = netdev_priv(dev);
6506 flush_scheduled_work();
6508 unregister_netdev(dev);
6511 iounmap(bp->regview);
6514 pci_release_regions(pdev);
6515 pci_disable_device(pdev);
6516 pci_set_drvdata(pdev, NULL);
6520 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6522 struct net_device *dev = pci_get_drvdata(pdev);
6523 struct bnx2 *bp = netdev_priv(dev);
6526 if (!netif_running(dev))
6529 flush_scheduled_work();
6530 bnx2_netif_stop(bp);
6531 netif_device_detach(dev);
6532 del_timer_sync(&bp->timer);
6533 if (bp->flags & NO_WOL_FLAG)
6534 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6536 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6538 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6539 bnx2_reset_chip(bp, reset_code);
6541 pci_save_state(pdev);
6542 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6547 bnx2_resume(struct pci_dev *pdev)
6549 struct net_device *dev = pci_get_drvdata(pdev);
6550 struct bnx2 *bp = netdev_priv(dev);
6552 if (!netif_running(dev))
6555 pci_restore_state(pdev);
6556 bnx2_set_power_state(bp, PCI_D0);
6557 netif_device_attach(dev);
6559 bnx2_netif_start(bp);
6563 static struct pci_driver bnx2_pci_driver = {
6564 .name = DRV_MODULE_NAME,
6565 .id_table = bnx2_pci_tbl,
6566 .probe = bnx2_init_one,
6567 .remove = __devexit_p(bnx2_remove_one),
6568 .suspend = bnx2_suspend,
6569 .resume = bnx2_resume,
6572 static int __init bnx2_init(void)
6574 return pci_register_driver(&bnx2_pci_driver);
6577 static void __exit bnx2_cleanup(void)
6579 pci_unregister_driver(&bnx2_pci_driver);
6582 module_init(bnx2_init);
6583 module_exit(bnx2_cleanup);