1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
45 #include <net/checksum.h>
48 #include <linux/workqueue.h>
49 #include <linux/crc32.h>
50 #include <linux/prefetch.h>
51 #include <linux/cache.h>
52 #include <linux/zlib.h>
58 #define DRV_MODULE_NAME "bnx2"
59 #define PFX DRV_MODULE_NAME ": "
60 #define DRV_MODULE_VERSION "1.5.2"
61 #define DRV_MODULE_RELDATE "December 13, 2006"
63 #define RUN_AT(x) (jiffies + (x))
65 /* Time in jiffies before concluding the transmitter is hung. */
66 #define TX_TIMEOUT (5*HZ)
68 static const char version[] __devinitdata =
69 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
71 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
76 static int disable_msi = 0;
78 module_param(disable_msi, int, 0);
79 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
92 /* indexed by board_t, above */
95 } board_info[] __devinitdata = {
96 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97 { "HP NC370T Multifunction Gigabit Server Adapter" },
98 { "HP NC370i Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100 { "HP NC370F Multifunction Gigabit Server Adapter" },
101 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
106 static struct pci_device_id bnx2_pci_tbl[] = {
107 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
108 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
109 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
110 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
111 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
112 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
113 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
114 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
116 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
126 static struct flash_spec flash_table[] =
129 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
130 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
131 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
133 /* Expansion entry 0001 */
134 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
135 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
136 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
138 /* Saifun SA25F010 (non-buffered flash) */
139 /* strap, cfg1, & write1 need updates */
140 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
141 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
142 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
143 "Non-buffered flash (128kB)"},
144 /* Saifun SA25F020 (non-buffered flash) */
145 /* strap, cfg1, & write1 need updates */
146 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
147 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
148 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
149 "Non-buffered flash (256kB)"},
150 /* Expansion entry 0100 */
151 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
152 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
155 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
156 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
157 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
158 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
159 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
160 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
161 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
162 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
164 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
165 /* Saifun SA25F005 (non-buffered flash) */
166 /* strap, cfg1, & write1 need updates */
167 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
168 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
169 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
170 "Non-buffered flash (64kB)"},
172 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
173 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
174 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
176 /* Expansion entry 1001 */
177 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
178 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
181 /* Expansion entry 1010 */
182 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
183 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186 /* ATMEL AT45DB011B (buffered flash) */
187 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
188 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
189 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
190 "Buffered flash (128kB)"},
191 /* Expansion entry 1100 */
192 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
193 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
196 /* Expansion entry 1101 */
197 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
198 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201 /* Ateml Expansion entry 1110 */
202 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
203 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
204 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
205 "Entry 1110 (Atmel)"},
206 /* ATMEL AT45DB021B (buffered flash) */
207 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
208 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
210 "Buffered flash (256kB)"},
213 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
215 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
221 /* The ring uses 256 indices for 255 entries, one of them
222 * needs to be skipped.
224 diff = bp->tx_prod - bp->tx_cons;
225 if (unlikely(diff >= TX_DESC_CNT)) {
227 if (diff == TX_DESC_CNT)
228 diff = MAX_TX_DESC_CNT;
230 return (bp->tx_ring_size - diff);
234 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
236 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
237 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
241 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
243 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
244 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
248 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
251 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
254 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
255 REG_WR(bp, BNX2_CTX_CTX_CTRL,
256 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
257 for (i = 0; i < 5; i++) {
259 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
260 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
265 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
266 REG_WR(bp, BNX2_CTX_DATA, val);
271 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
276 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
277 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
278 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
280 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
281 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
286 val1 = (bp->phy_addr << 21) | (reg << 16) |
287 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
288 BNX2_EMAC_MDIO_COMM_START_BUSY;
289 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
291 for (i = 0; i < 50; i++) {
294 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
295 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
298 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
299 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
305 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
314 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
315 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
316 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
318 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
319 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
328 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
333 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
334 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
335 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
337 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
338 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
343 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
344 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
345 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
346 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
348 for (i = 0; i < 50; i++) {
351 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
352 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
358 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
363 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
364 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
365 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
367 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
368 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
377 bnx2_disable_int(struct bnx2 *bp)
379 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
380 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
381 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
385 bnx2_enable_int(struct bnx2 *bp)
387 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
388 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
389 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
391 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
392 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
394 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
398 bnx2_disable_int_sync(struct bnx2 *bp)
400 atomic_inc(&bp->intr_sem);
401 bnx2_disable_int(bp);
402 synchronize_irq(bp->pdev->irq);
406 bnx2_netif_stop(struct bnx2 *bp)
408 bnx2_disable_int_sync(bp);
409 if (netif_running(bp->dev)) {
410 netif_poll_disable(bp->dev);
411 netif_tx_disable(bp->dev);
412 bp->dev->trans_start = jiffies; /* prevent tx timeout */
417 bnx2_netif_start(struct bnx2 *bp)
419 if (atomic_dec_and_test(&bp->intr_sem)) {
420 if (netif_running(bp->dev)) {
421 netif_wake_queue(bp->dev);
422 netif_poll_enable(bp->dev);
429 bnx2_free_mem(struct bnx2 *bp)
433 for (i = 0; i < bp->ctx_pages; i++) {
434 if (bp->ctx_blk[i]) {
435 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
437 bp->ctx_blk_mapping[i]);
438 bp->ctx_blk[i] = NULL;
441 if (bp->status_blk) {
442 pci_free_consistent(bp->pdev, bp->status_stats_size,
443 bp->status_blk, bp->status_blk_mapping);
444 bp->status_blk = NULL;
445 bp->stats_blk = NULL;
447 if (bp->tx_desc_ring) {
448 pci_free_consistent(bp->pdev,
449 sizeof(struct tx_bd) * TX_DESC_CNT,
450 bp->tx_desc_ring, bp->tx_desc_mapping);
451 bp->tx_desc_ring = NULL;
453 kfree(bp->tx_buf_ring);
454 bp->tx_buf_ring = NULL;
455 for (i = 0; i < bp->rx_max_ring; i++) {
456 if (bp->rx_desc_ring[i])
457 pci_free_consistent(bp->pdev,
458 sizeof(struct rx_bd) * RX_DESC_CNT,
460 bp->rx_desc_mapping[i]);
461 bp->rx_desc_ring[i] = NULL;
463 vfree(bp->rx_buf_ring);
464 bp->rx_buf_ring = NULL;
468 bnx2_alloc_mem(struct bnx2 *bp)
470 int i, status_blk_size;
472 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
474 if (bp->tx_buf_ring == NULL)
477 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
478 sizeof(struct tx_bd) *
480 &bp->tx_desc_mapping);
481 if (bp->tx_desc_ring == NULL)
484 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
486 if (bp->rx_buf_ring == NULL)
489 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
492 for (i = 0; i < bp->rx_max_ring; i++) {
493 bp->rx_desc_ring[i] =
494 pci_alloc_consistent(bp->pdev,
495 sizeof(struct rx_bd) * RX_DESC_CNT,
496 &bp->rx_desc_mapping[i]);
497 if (bp->rx_desc_ring[i] == NULL)
502 /* Combine status and statistics blocks into one allocation. */
503 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
504 bp->status_stats_size = status_blk_size +
505 sizeof(struct statistics_block);
507 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
508 &bp->status_blk_mapping);
509 if (bp->status_blk == NULL)
512 memset(bp->status_blk, 0, bp->status_stats_size);
514 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
517 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
519 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
520 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
521 if (bp->ctx_pages == 0)
523 for (i = 0; i < bp->ctx_pages; i++) {
524 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
526 &bp->ctx_blk_mapping[i]);
527 if (bp->ctx_blk[i] == NULL)
539 bnx2_report_fw_link(struct bnx2 *bp)
541 u32 fw_link_status = 0;
546 switch (bp->line_speed) {
548 if (bp->duplex == DUPLEX_HALF)
549 fw_link_status = BNX2_LINK_STATUS_10HALF;
551 fw_link_status = BNX2_LINK_STATUS_10FULL;
554 if (bp->duplex == DUPLEX_HALF)
555 fw_link_status = BNX2_LINK_STATUS_100HALF;
557 fw_link_status = BNX2_LINK_STATUS_100FULL;
560 if (bp->duplex == DUPLEX_HALF)
561 fw_link_status = BNX2_LINK_STATUS_1000HALF;
563 fw_link_status = BNX2_LINK_STATUS_1000FULL;
566 if (bp->duplex == DUPLEX_HALF)
567 fw_link_status = BNX2_LINK_STATUS_2500HALF;
569 fw_link_status = BNX2_LINK_STATUS_2500FULL;
573 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
576 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
578 bnx2_read_phy(bp, MII_BMSR, &bmsr);
579 bnx2_read_phy(bp, MII_BMSR, &bmsr);
581 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
582 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
583 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
585 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
589 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
591 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
595 bnx2_report_link(struct bnx2 *bp)
598 netif_carrier_on(bp->dev);
599 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
601 printk("%d Mbps ", bp->line_speed);
603 if (bp->duplex == DUPLEX_FULL)
604 printk("full duplex");
606 printk("half duplex");
609 if (bp->flow_ctrl & FLOW_CTRL_RX) {
610 printk(", receive ");
611 if (bp->flow_ctrl & FLOW_CTRL_TX)
612 printk("& transmit ");
615 printk(", transmit ");
617 printk("flow control ON");
622 netif_carrier_off(bp->dev);
623 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
626 bnx2_report_fw_link(bp);
630 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
632 u32 local_adv, remote_adv;
635 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
636 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
638 if (bp->duplex == DUPLEX_FULL) {
639 bp->flow_ctrl = bp->req_flow_ctrl;
644 if (bp->duplex != DUPLEX_FULL) {
648 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
649 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
652 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
653 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
654 bp->flow_ctrl |= FLOW_CTRL_TX;
655 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
656 bp->flow_ctrl |= FLOW_CTRL_RX;
660 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
661 bnx2_read_phy(bp, MII_LPA, &remote_adv);
663 if (bp->phy_flags & PHY_SERDES_FLAG) {
664 u32 new_local_adv = 0;
665 u32 new_remote_adv = 0;
667 if (local_adv & ADVERTISE_1000XPAUSE)
668 new_local_adv |= ADVERTISE_PAUSE_CAP;
669 if (local_adv & ADVERTISE_1000XPSE_ASYM)
670 new_local_adv |= ADVERTISE_PAUSE_ASYM;
671 if (remote_adv & ADVERTISE_1000XPAUSE)
672 new_remote_adv |= ADVERTISE_PAUSE_CAP;
673 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
674 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
676 local_adv = new_local_adv;
677 remote_adv = new_remote_adv;
680 /* See Table 28B-3 of 802.3ab-1999 spec. */
681 if (local_adv & ADVERTISE_PAUSE_CAP) {
682 if(local_adv & ADVERTISE_PAUSE_ASYM) {
683 if (remote_adv & ADVERTISE_PAUSE_CAP) {
684 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
686 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
687 bp->flow_ctrl = FLOW_CTRL_RX;
691 if (remote_adv & ADVERTISE_PAUSE_CAP) {
692 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
696 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
697 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
698 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
700 bp->flow_ctrl = FLOW_CTRL_TX;
706 bnx2_5708s_linkup(struct bnx2 *bp)
711 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
712 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
713 case BCM5708S_1000X_STAT1_SPEED_10:
714 bp->line_speed = SPEED_10;
716 case BCM5708S_1000X_STAT1_SPEED_100:
717 bp->line_speed = SPEED_100;
719 case BCM5708S_1000X_STAT1_SPEED_1G:
720 bp->line_speed = SPEED_1000;
722 case BCM5708S_1000X_STAT1_SPEED_2G5:
723 bp->line_speed = SPEED_2500;
726 if (val & BCM5708S_1000X_STAT1_FD)
727 bp->duplex = DUPLEX_FULL;
729 bp->duplex = DUPLEX_HALF;
735 bnx2_5706s_linkup(struct bnx2 *bp)
737 u32 bmcr, local_adv, remote_adv, common;
740 bp->line_speed = SPEED_1000;
742 bnx2_read_phy(bp, MII_BMCR, &bmcr);
743 if (bmcr & BMCR_FULLDPLX) {
744 bp->duplex = DUPLEX_FULL;
747 bp->duplex = DUPLEX_HALF;
750 if (!(bmcr & BMCR_ANENABLE)) {
754 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
755 bnx2_read_phy(bp, MII_LPA, &remote_adv);
757 common = local_adv & remote_adv;
758 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
760 if (common & ADVERTISE_1000XFULL) {
761 bp->duplex = DUPLEX_FULL;
764 bp->duplex = DUPLEX_HALF;
772 bnx2_copper_linkup(struct bnx2 *bp)
776 bnx2_read_phy(bp, MII_BMCR, &bmcr);
777 if (bmcr & BMCR_ANENABLE) {
778 u32 local_adv, remote_adv, common;
780 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
781 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
783 common = local_adv & (remote_adv >> 2);
784 if (common & ADVERTISE_1000FULL) {
785 bp->line_speed = SPEED_1000;
786 bp->duplex = DUPLEX_FULL;
788 else if (common & ADVERTISE_1000HALF) {
789 bp->line_speed = SPEED_1000;
790 bp->duplex = DUPLEX_HALF;
793 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
794 bnx2_read_phy(bp, MII_LPA, &remote_adv);
796 common = local_adv & remote_adv;
797 if (common & ADVERTISE_100FULL) {
798 bp->line_speed = SPEED_100;
799 bp->duplex = DUPLEX_FULL;
801 else if (common & ADVERTISE_100HALF) {
802 bp->line_speed = SPEED_100;
803 bp->duplex = DUPLEX_HALF;
805 else if (common & ADVERTISE_10FULL) {
806 bp->line_speed = SPEED_10;
807 bp->duplex = DUPLEX_FULL;
809 else if (common & ADVERTISE_10HALF) {
810 bp->line_speed = SPEED_10;
811 bp->duplex = DUPLEX_HALF;
820 if (bmcr & BMCR_SPEED100) {
821 bp->line_speed = SPEED_100;
824 bp->line_speed = SPEED_10;
826 if (bmcr & BMCR_FULLDPLX) {
827 bp->duplex = DUPLEX_FULL;
830 bp->duplex = DUPLEX_HALF;
838 bnx2_set_mac_link(struct bnx2 *bp)
842 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
843 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
844 (bp->duplex == DUPLEX_HALF)) {
845 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
848 /* Configure the EMAC mode register. */
849 val = REG_RD(bp, BNX2_EMAC_MODE);
851 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
852 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
853 BNX2_EMAC_MODE_25G_MODE);
856 switch (bp->line_speed) {
858 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
859 val |= BNX2_EMAC_MODE_PORT_MII_10M;
864 val |= BNX2_EMAC_MODE_PORT_MII;
867 val |= BNX2_EMAC_MODE_25G_MODE;
870 val |= BNX2_EMAC_MODE_PORT_GMII;
875 val |= BNX2_EMAC_MODE_PORT_GMII;
878 /* Set the MAC to operate in the appropriate duplex mode. */
879 if (bp->duplex == DUPLEX_HALF)
880 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
881 REG_WR(bp, BNX2_EMAC_MODE, val);
883 /* Enable/disable rx PAUSE. */
884 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
886 if (bp->flow_ctrl & FLOW_CTRL_RX)
887 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
888 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
890 /* Enable/disable tx PAUSE. */
891 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
892 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
894 if (bp->flow_ctrl & FLOW_CTRL_TX)
895 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
896 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
898 /* Acknowledge the interrupt. */
899 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
905 bnx2_set_link(struct bnx2 *bp)
910 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
915 link_up = bp->link_up;
917 bnx2_read_phy(bp, MII_BMSR, &bmsr);
918 bnx2_read_phy(bp, MII_BMSR, &bmsr);
920 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
921 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
924 val = REG_RD(bp, BNX2_EMAC_STATUS);
925 if (val & BNX2_EMAC_STATUS_LINK)
926 bmsr |= BMSR_LSTATUS;
928 bmsr &= ~BMSR_LSTATUS;
931 if (bmsr & BMSR_LSTATUS) {
934 if (bp->phy_flags & PHY_SERDES_FLAG) {
935 if (CHIP_NUM(bp) == CHIP_NUM_5706)
936 bnx2_5706s_linkup(bp);
937 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
938 bnx2_5708s_linkup(bp);
941 bnx2_copper_linkup(bp);
943 bnx2_resolve_flow_ctrl(bp);
946 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
947 (bp->autoneg & AUTONEG_SPEED)) {
951 bnx2_read_phy(bp, MII_BMCR, &bmcr);
952 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
953 if (!(bmcr & BMCR_ANENABLE)) {
954 bnx2_write_phy(bp, MII_BMCR, bmcr |
958 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
962 if (bp->link_up != link_up) {
963 bnx2_report_link(bp);
966 bnx2_set_mac_link(bp);
972 bnx2_reset_phy(struct bnx2 *bp)
977 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
979 #define PHY_RESET_MAX_WAIT 100
980 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
983 bnx2_read_phy(bp, MII_BMCR, ®);
984 if (!(reg & BMCR_RESET)) {
989 if (i == PHY_RESET_MAX_WAIT) {
996 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1000 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1001 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1003 if (bp->phy_flags & PHY_SERDES_FLAG) {
1004 adv = ADVERTISE_1000XPAUSE;
1007 adv = ADVERTISE_PAUSE_CAP;
1010 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1011 if (bp->phy_flags & PHY_SERDES_FLAG) {
1012 adv = ADVERTISE_1000XPSE_ASYM;
1015 adv = ADVERTISE_PAUSE_ASYM;
1018 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1019 if (bp->phy_flags & PHY_SERDES_FLAG) {
1020 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1023 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1030 bnx2_setup_serdes_phy(struct bnx2 *bp)
1035 if (!(bp->autoneg & AUTONEG_SPEED)) {
1037 int force_link_down = 0;
1039 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1040 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1042 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1043 new_bmcr = bmcr & ~(BMCR_ANENABLE | BCM5708S_BMCR_FORCE_2500);
1044 new_bmcr |= BMCR_SPEED1000;
1045 if (bp->req_line_speed == SPEED_2500) {
1046 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1047 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1048 if (!(up1 & BCM5708S_UP1_2G5)) {
1049 up1 |= BCM5708S_UP1_2G5;
1050 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1051 force_link_down = 1;
1053 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1054 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1055 if (up1 & BCM5708S_UP1_2G5) {
1056 up1 &= ~BCM5708S_UP1_2G5;
1057 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1058 force_link_down = 1;
1062 if (bp->req_duplex == DUPLEX_FULL) {
1063 adv |= ADVERTISE_1000XFULL;
1064 new_bmcr |= BMCR_FULLDPLX;
1067 adv |= ADVERTISE_1000XHALF;
1068 new_bmcr &= ~BMCR_FULLDPLX;
1070 if ((new_bmcr != bmcr) || (force_link_down)) {
1071 /* Force a link down visible on the other side */
1073 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1074 ~(ADVERTISE_1000XFULL |
1075 ADVERTISE_1000XHALF));
1076 bnx2_write_phy(bp, MII_BMCR, bmcr |
1077 BMCR_ANRESTART | BMCR_ANENABLE);
1080 netif_carrier_off(bp->dev);
1081 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1082 bnx2_report_link(bp);
1084 bnx2_write_phy(bp, MII_ADVERTISE, adv);
1085 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1090 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1091 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1092 up1 |= BCM5708S_UP1_2G5;
1093 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1096 if (bp->advertising & ADVERTISED_1000baseT_Full)
1097 new_adv |= ADVERTISE_1000XFULL;
1099 new_adv |= bnx2_phy_get_pause_adv(bp);
1101 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1102 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1104 bp->serdes_an_pending = 0;
1105 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1106 /* Force a link down visible on the other side */
1108 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1109 spin_unlock_bh(&bp->phy_lock);
1111 spin_lock_bh(&bp->phy_lock);
1114 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1115 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1117 /* Speed up link-up time when the link partner
1118 * does not autonegotiate which is very common
1119 * in blade servers. Some blade servers use
1120 * IPMI for kerboard input and it's important
1121 * to minimize link disruptions. Autoneg. involves
1122 * exchanging base pages plus 3 next pages and
1123 * normally completes in about 120 msec.
1125 bp->current_interval = SERDES_AN_TIMEOUT;
1126 bp->serdes_an_pending = 1;
1127 mod_timer(&bp->timer, jiffies + bp->current_interval);
1133 #define ETHTOOL_ALL_FIBRE_SPEED \
1134 (ADVERTISED_1000baseT_Full)
1136 #define ETHTOOL_ALL_COPPER_SPEED \
1137 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1138 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1139 ADVERTISED_1000baseT_Full)
1141 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1142 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1144 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1147 bnx2_setup_copper_phy(struct bnx2 *bp)
1152 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1154 if (bp->autoneg & AUTONEG_SPEED) {
1155 u32 adv_reg, adv1000_reg;
1156 u32 new_adv_reg = 0;
1157 u32 new_adv1000_reg = 0;
1159 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1160 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1161 ADVERTISE_PAUSE_ASYM);
1163 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1164 adv1000_reg &= PHY_ALL_1000_SPEED;
1166 if (bp->advertising & ADVERTISED_10baseT_Half)
1167 new_adv_reg |= ADVERTISE_10HALF;
1168 if (bp->advertising & ADVERTISED_10baseT_Full)
1169 new_adv_reg |= ADVERTISE_10FULL;
1170 if (bp->advertising & ADVERTISED_100baseT_Half)
1171 new_adv_reg |= ADVERTISE_100HALF;
1172 if (bp->advertising & ADVERTISED_100baseT_Full)
1173 new_adv_reg |= ADVERTISE_100FULL;
1174 if (bp->advertising & ADVERTISED_1000baseT_Full)
1175 new_adv1000_reg |= ADVERTISE_1000FULL;
1177 new_adv_reg |= ADVERTISE_CSMA;
1179 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1181 if ((adv1000_reg != new_adv1000_reg) ||
1182 (adv_reg != new_adv_reg) ||
1183 ((bmcr & BMCR_ANENABLE) == 0)) {
1185 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1186 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1187 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1190 else if (bp->link_up) {
1191 /* Flow ctrl may have changed from auto to forced */
1192 /* or vice-versa. */
1194 bnx2_resolve_flow_ctrl(bp);
1195 bnx2_set_mac_link(bp);
1201 if (bp->req_line_speed == SPEED_100) {
1202 new_bmcr |= BMCR_SPEED100;
1204 if (bp->req_duplex == DUPLEX_FULL) {
1205 new_bmcr |= BMCR_FULLDPLX;
1207 if (new_bmcr != bmcr) {
1210 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1211 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1213 if (bmsr & BMSR_LSTATUS) {
1214 /* Force link down */
1215 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1216 spin_unlock_bh(&bp->phy_lock);
1218 spin_lock_bh(&bp->phy_lock);
1220 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1221 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1224 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1226 /* Normally, the new speed is setup after the link has
1227 * gone down and up again. In some cases, link will not go
1228 * down so we need to set up the new speed here.
1230 if (bmsr & BMSR_LSTATUS) {
1231 bp->line_speed = bp->req_line_speed;
1232 bp->duplex = bp->req_duplex;
1233 bnx2_resolve_flow_ctrl(bp);
1234 bnx2_set_mac_link(bp);
1241 bnx2_setup_phy(struct bnx2 *bp)
1243 if (bp->loopback == MAC_LOOPBACK)
1246 if (bp->phy_flags & PHY_SERDES_FLAG) {
1247 return (bnx2_setup_serdes_phy(bp));
1250 return (bnx2_setup_copper_phy(bp));
1255 bnx2_init_5708s_phy(struct bnx2 *bp)
1259 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1260 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1261 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1263 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1264 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1265 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1267 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1268 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1269 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1271 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1272 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1273 val |= BCM5708S_UP1_2G5;
1274 bnx2_write_phy(bp, BCM5708S_UP1, val);
1277 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1278 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1279 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1280 /* increase tx signal amplitude */
1281 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1282 BCM5708S_BLK_ADDR_TX_MISC);
1283 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1284 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1285 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1286 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1289 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1290 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1295 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1296 BNX2_SHARED_HW_CFG_CONFIG);
1297 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1298 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1299 BCM5708S_BLK_ADDR_TX_MISC);
1300 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1301 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1302 BCM5708S_BLK_ADDR_DIG);
1309 bnx2_init_5706s_phy(struct bnx2 *bp)
1311 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1313 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1314 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1316 if (bp->dev->mtu > 1500) {
1319 /* Set extended packet length bit */
1320 bnx2_write_phy(bp, 0x18, 0x7);
1321 bnx2_read_phy(bp, 0x18, &val);
1322 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1324 bnx2_write_phy(bp, 0x1c, 0x6c00);
1325 bnx2_read_phy(bp, 0x1c, &val);
1326 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1331 bnx2_write_phy(bp, 0x18, 0x7);
1332 bnx2_read_phy(bp, 0x18, &val);
1333 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1335 bnx2_write_phy(bp, 0x1c, 0x6c00);
1336 bnx2_read_phy(bp, 0x1c, &val);
1337 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1344 bnx2_init_copper_phy(struct bnx2 *bp)
1348 bp->phy_flags |= PHY_CRC_FIX_FLAG;
1350 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1351 bnx2_write_phy(bp, 0x18, 0x0c00);
1352 bnx2_write_phy(bp, 0x17, 0x000a);
1353 bnx2_write_phy(bp, 0x15, 0x310b);
1354 bnx2_write_phy(bp, 0x17, 0x201f);
1355 bnx2_write_phy(bp, 0x15, 0x9506);
1356 bnx2_write_phy(bp, 0x17, 0x401f);
1357 bnx2_write_phy(bp, 0x15, 0x14e2);
1358 bnx2_write_phy(bp, 0x18, 0x0400);
1361 if (bp->dev->mtu > 1500) {
1362 /* Set extended packet length bit */
1363 bnx2_write_phy(bp, 0x18, 0x7);
1364 bnx2_read_phy(bp, 0x18, &val);
1365 bnx2_write_phy(bp, 0x18, val | 0x4000);
1367 bnx2_read_phy(bp, 0x10, &val);
1368 bnx2_write_phy(bp, 0x10, val | 0x1);
1371 bnx2_write_phy(bp, 0x18, 0x7);
1372 bnx2_read_phy(bp, 0x18, &val);
1373 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1375 bnx2_read_phy(bp, 0x10, &val);
1376 bnx2_write_phy(bp, 0x10, val & ~0x1);
1379 /* ethernet@wirespeed */
1380 bnx2_write_phy(bp, 0x18, 0x7007);
1381 bnx2_read_phy(bp, 0x18, &val);
1382 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1388 bnx2_init_phy(struct bnx2 *bp)
1393 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1394 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1396 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1400 bnx2_read_phy(bp, MII_PHYSID1, &val);
1401 bp->phy_id = val << 16;
1402 bnx2_read_phy(bp, MII_PHYSID2, &val);
1403 bp->phy_id |= val & 0xffff;
1405 if (bp->phy_flags & PHY_SERDES_FLAG) {
1406 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1407 rc = bnx2_init_5706s_phy(bp);
1408 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1409 rc = bnx2_init_5708s_phy(bp);
1412 rc = bnx2_init_copper_phy(bp);
1421 bnx2_set_mac_loopback(struct bnx2 *bp)
1425 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1426 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1427 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1428 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1433 static int bnx2_test_link(struct bnx2 *);
1436 bnx2_set_phy_loopback(struct bnx2 *bp)
1441 spin_lock_bh(&bp->phy_lock);
1442 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1444 spin_unlock_bh(&bp->phy_lock);
1448 for (i = 0; i < 10; i++) {
1449 if (bnx2_test_link(bp) == 0)
1454 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1455 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1456 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1457 BNX2_EMAC_MODE_25G_MODE);
1459 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1460 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1466 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1472 msg_data |= bp->fw_wr_seq;
1474 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1476 /* wait for an acknowledgement. */
1477 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1480 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1482 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1485 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1488 /* If we timed out, inform the firmware that this is the case. */
1489 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1491 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1494 msg_data &= ~BNX2_DRV_MSG_CODE;
1495 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1497 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1502 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1509 bnx2_init_5709_context(struct bnx2 *bp)
1514 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1515 val |= (BCM_PAGE_BITS - 8) << 16;
1516 REG_WR(bp, BNX2_CTX_COMMAND, val);
1517 for (i = 0; i < bp->ctx_pages; i++) {
1520 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1521 (bp->ctx_blk_mapping[i] & 0xffffffff) |
1522 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1523 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1524 (u64) bp->ctx_blk_mapping[i] >> 32);
1525 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1526 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1527 for (j = 0; j < 10; j++) {
1529 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1530 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1534 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1543 bnx2_init_context(struct bnx2 *bp)
1549 u32 vcid_addr, pcid_addr, offset;
1553 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1556 vcid_addr = GET_PCID_ADDR(vcid);
1558 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1563 pcid_addr = GET_PCID_ADDR(new_vcid);
1566 vcid_addr = GET_CID_ADDR(vcid);
1567 pcid_addr = vcid_addr;
1570 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1571 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1573 /* Zero out the context. */
1574 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1575 CTX_WR(bp, 0x00, offset, 0);
1578 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1579 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1584 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1590 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1591 if (good_mbuf == NULL) {
1592 printk(KERN_ERR PFX "Failed to allocate memory in "
1593 "bnx2_alloc_bad_rbuf\n");
1597 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1598 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1602 /* Allocate a bunch of mbufs and save the good ones in an array. */
1603 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1604 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1605 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1607 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1609 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1611 /* The addresses with Bit 9 set are bad memory blocks. */
1612 if (!(val & (1 << 9))) {
1613 good_mbuf[good_mbuf_cnt] = (u16) val;
1617 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1620 /* Free the good ones back to the mbuf pool thus discarding
1621 * all the bad ones. */
1622 while (good_mbuf_cnt) {
1625 val = good_mbuf[good_mbuf_cnt];
1626 val = (val << 9) | val | 1;
1628 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1635 bnx2_set_mac_addr(struct bnx2 *bp)
1638 u8 *mac_addr = bp->dev->dev_addr;
1640 val = (mac_addr[0] << 8) | mac_addr[1];
1642 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1644 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1645 (mac_addr[4] << 8) | mac_addr[5];
1647 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1651 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1653 struct sk_buff *skb;
1654 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1656 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1657 unsigned long align;
1659 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1664 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1665 skb_reserve(skb, BNX2_RX_ALIGN - align);
1667 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1668 PCI_DMA_FROMDEVICE);
1671 pci_unmap_addr_set(rx_buf, mapping, mapping);
1673 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1674 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1676 bp->rx_prod_bseq += bp->rx_buf_use_size;
1682 bnx2_phy_int(struct bnx2 *bp)
1684 u32 new_link_state, old_link_state;
1686 new_link_state = bp->status_blk->status_attn_bits &
1687 STATUS_ATTN_BITS_LINK_STATE;
1688 old_link_state = bp->status_blk->status_attn_bits_ack &
1689 STATUS_ATTN_BITS_LINK_STATE;
1690 if (new_link_state != old_link_state) {
1691 if (new_link_state) {
1692 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1693 STATUS_ATTN_BITS_LINK_STATE);
1696 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1697 STATUS_ATTN_BITS_LINK_STATE);
1704 bnx2_tx_int(struct bnx2 *bp)
1706 struct status_block *sblk = bp->status_blk;
1707 u16 hw_cons, sw_cons, sw_ring_cons;
1710 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1711 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1714 sw_cons = bp->tx_cons;
1716 while (sw_cons != hw_cons) {
1717 struct sw_bd *tx_buf;
1718 struct sk_buff *skb;
1721 sw_ring_cons = TX_RING_IDX(sw_cons);
1723 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1726 /* partial BD completions possible with TSO packets */
1727 if (skb_is_gso(skb)) {
1728 u16 last_idx, last_ring_idx;
1730 last_idx = sw_cons +
1731 skb_shinfo(skb)->nr_frags + 1;
1732 last_ring_idx = sw_ring_cons +
1733 skb_shinfo(skb)->nr_frags + 1;
1734 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1737 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1742 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1743 skb_headlen(skb), PCI_DMA_TODEVICE);
1746 last = skb_shinfo(skb)->nr_frags;
1748 for (i = 0; i < last; i++) {
1749 sw_cons = NEXT_TX_BD(sw_cons);
1751 pci_unmap_page(bp->pdev,
1753 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1755 skb_shinfo(skb)->frags[i].size,
1759 sw_cons = NEXT_TX_BD(sw_cons);
1761 tx_free_bd += last + 1;
1765 hw_cons = bp->hw_tx_cons =
1766 sblk->status_tx_quick_consumer_index0;
1768 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1773 bp->tx_cons = sw_cons;
1774 /* Need to make the tx_cons update visible to bnx2_start_xmit()
1775 * before checking for netif_queue_stopped(). Without the
1776 * memory barrier, there is a small possibility that bnx2_start_xmit()
1777 * will miss it and cause the queue to be stopped forever.
1781 if (unlikely(netif_queue_stopped(bp->dev)) &&
1782 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1783 netif_tx_lock(bp->dev);
1784 if ((netif_queue_stopped(bp->dev)) &&
1785 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
1786 netif_wake_queue(bp->dev);
1787 netif_tx_unlock(bp->dev);
1792 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1795 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1796 struct rx_bd *cons_bd, *prod_bd;
1798 cons_rx_buf = &bp->rx_buf_ring[cons];
1799 prod_rx_buf = &bp->rx_buf_ring[prod];
1801 pci_dma_sync_single_for_device(bp->pdev,
1802 pci_unmap_addr(cons_rx_buf, mapping),
1803 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1805 bp->rx_prod_bseq += bp->rx_buf_use_size;
1807 prod_rx_buf->skb = skb;
1812 pci_unmap_addr_set(prod_rx_buf, mapping,
1813 pci_unmap_addr(cons_rx_buf, mapping));
1815 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1816 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1817 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1818 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
1822 bnx2_rx_int(struct bnx2 *bp, int budget)
1824 struct status_block *sblk = bp->status_blk;
1825 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1826 struct l2_fhdr *rx_hdr;
1829 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1830 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1833 sw_cons = bp->rx_cons;
1834 sw_prod = bp->rx_prod;
1836 /* Memory barrier necessary as speculative reads of the rx
1837 * buffer can be ahead of the index in the status block
1840 while (sw_cons != hw_cons) {
1843 struct sw_bd *rx_buf;
1844 struct sk_buff *skb;
1845 dma_addr_t dma_addr;
1847 sw_ring_cons = RX_RING_IDX(sw_cons);
1848 sw_ring_prod = RX_RING_IDX(sw_prod);
1850 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1855 dma_addr = pci_unmap_addr(rx_buf, mapping);
1857 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
1858 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1860 rx_hdr = (struct l2_fhdr *) skb->data;
1861 len = rx_hdr->l2_fhdr_pkt_len - 4;
1863 if ((status = rx_hdr->l2_fhdr_status) &
1864 (L2_FHDR_ERRORS_BAD_CRC |
1865 L2_FHDR_ERRORS_PHY_DECODE |
1866 L2_FHDR_ERRORS_ALIGNMENT |
1867 L2_FHDR_ERRORS_TOO_SHORT |
1868 L2_FHDR_ERRORS_GIANT_FRAME)) {
1873 /* Since we don't have a jumbo ring, copy small packets
1876 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1877 struct sk_buff *new_skb;
1879 new_skb = netdev_alloc_skb(bp->dev, len + 2);
1880 if (new_skb == NULL)
1884 memcpy(new_skb->data,
1885 skb->data + bp->rx_offset - 2,
1888 skb_reserve(new_skb, 2);
1889 skb_put(new_skb, len);
1891 bnx2_reuse_rx_skb(bp, skb,
1892 sw_ring_cons, sw_ring_prod);
1896 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1897 pci_unmap_single(bp->pdev, dma_addr,
1898 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1900 skb_reserve(skb, bp->rx_offset);
1905 bnx2_reuse_rx_skb(bp, skb,
1906 sw_ring_cons, sw_ring_prod);
1910 skb->protocol = eth_type_trans(skb, bp->dev);
1912 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1913 (ntohs(skb->protocol) != 0x8100)) {
1920 skb->ip_summed = CHECKSUM_NONE;
1922 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1923 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1925 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1926 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
1927 skb->ip_summed = CHECKSUM_UNNECESSARY;
1931 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1932 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1933 rx_hdr->l2_fhdr_vlan_tag);
1937 netif_receive_skb(skb);
1939 bp->dev->last_rx = jiffies;
1943 sw_cons = NEXT_RX_BD(sw_cons);
1944 sw_prod = NEXT_RX_BD(sw_prod);
1946 if ((rx_pkt == budget))
1949 /* Refresh hw_cons to see if there is new work */
1950 if (sw_cons == hw_cons) {
1951 hw_cons = bp->hw_rx_cons =
1952 sblk->status_rx_quick_consumer_index0;
1953 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1958 bp->rx_cons = sw_cons;
1959 bp->rx_prod = sw_prod;
1961 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1963 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1971 /* MSI ISR - The only difference between this and the INTx ISR
1972 * is that the MSI interrupt is always serviced.
1975 bnx2_msi(int irq, void *dev_instance)
1977 struct net_device *dev = dev_instance;
1978 struct bnx2 *bp = netdev_priv(dev);
1980 prefetch(bp->status_blk);
1981 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1982 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1983 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1985 /* Return here if interrupt is disabled. */
1986 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1989 netif_rx_schedule(dev);
1995 bnx2_interrupt(int irq, void *dev_instance)
1997 struct net_device *dev = dev_instance;
1998 struct bnx2 *bp = netdev_priv(dev);
2000 /* When using INTx, it is possible for the interrupt to arrive
2001 * at the CPU before the status block posted prior to the
2002 * interrupt. Reading a register will flush the status block.
2003 * When using MSI, the MSI message will always complete after
2004 * the status block write.
2006 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
2007 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2008 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2011 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2012 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2013 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2015 /* Return here if interrupt is shared and is disabled. */
2016 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2019 netif_rx_schedule(dev);
2025 bnx2_has_work(struct bnx2 *bp)
2027 struct status_block *sblk = bp->status_blk;
2029 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2030 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2033 if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
2041 bnx2_poll(struct net_device *dev, int *budget)
2043 struct bnx2 *bp = netdev_priv(dev);
2045 if ((bp->status_blk->status_attn_bits &
2046 STATUS_ATTN_BITS_LINK_STATE) !=
2047 (bp->status_blk->status_attn_bits_ack &
2048 STATUS_ATTN_BITS_LINK_STATE)) {
2050 spin_lock(&bp->phy_lock);
2052 spin_unlock(&bp->phy_lock);
2054 /* This is needed to take care of transient status
2055 * during link changes.
2057 REG_WR(bp, BNX2_HC_COMMAND,
2058 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2059 REG_RD(bp, BNX2_HC_COMMAND);
2062 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2065 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2066 int orig_budget = *budget;
2069 if (orig_budget > dev->quota)
2070 orig_budget = dev->quota;
2072 work_done = bnx2_rx_int(bp, orig_budget);
2073 *budget -= work_done;
2074 dev->quota -= work_done;
2077 bp->last_status_idx = bp->status_blk->status_idx;
2080 if (!bnx2_has_work(bp)) {
2081 netif_rx_complete(dev);
2082 if (likely(bp->flags & USING_MSI_FLAG)) {
2083 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2084 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2085 bp->last_status_idx);
2088 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2089 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2090 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2091 bp->last_status_idx);
2093 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2094 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2095 bp->last_status_idx);
2102 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2103 * from set_multicast.
2106 bnx2_set_rx_mode(struct net_device *dev)
2108 struct bnx2 *bp = netdev_priv(dev);
2109 u32 rx_mode, sort_mode;
2112 spin_lock_bh(&bp->phy_lock);
2114 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2115 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2116 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2118 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2119 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2121 if (!(bp->flags & ASF_ENABLE_FLAG))
2122 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2124 if (dev->flags & IFF_PROMISC) {
2125 /* Promiscuous mode. */
2126 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2127 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2128 BNX2_RPM_SORT_USER0_PROM_VLAN;
2130 else if (dev->flags & IFF_ALLMULTI) {
2131 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2132 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2135 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2138 /* Accept one or more multicast(s). */
2139 struct dev_mc_list *mclist;
2140 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2145 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2147 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2148 i++, mclist = mclist->next) {
2150 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2152 regidx = (bit & 0xe0) >> 5;
2154 mc_filter[regidx] |= (1 << bit);
2157 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2158 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2162 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2165 if (rx_mode != bp->rx_mode) {
2166 bp->rx_mode = rx_mode;
2167 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2170 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2171 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2172 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2174 spin_unlock_bh(&bp->phy_lock);
2177 #define FW_BUF_SIZE 0x8000
2180 bnx2_gunzip_init(struct bnx2 *bp)
2182 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2185 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2188 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2189 if (bp->strm->workspace == NULL)
2199 vfree(bp->gunzip_buf);
2200 bp->gunzip_buf = NULL;
2203 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2204 "uncompression.\n", bp->dev->name);
2209 bnx2_gunzip_end(struct bnx2 *bp)
2211 kfree(bp->strm->workspace);
2216 if (bp->gunzip_buf) {
2217 vfree(bp->gunzip_buf);
2218 bp->gunzip_buf = NULL;
2223 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2227 /* check gzip header */
2228 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2234 if (zbuf[3] & FNAME)
2235 while ((zbuf[n++] != 0) && (n < len));
2237 bp->strm->next_in = zbuf + n;
2238 bp->strm->avail_in = len - n;
2239 bp->strm->next_out = bp->gunzip_buf;
2240 bp->strm->avail_out = FW_BUF_SIZE;
2242 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2246 rc = zlib_inflate(bp->strm, Z_FINISH);
2248 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2249 *outbuf = bp->gunzip_buf;
2251 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2252 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2253 bp->dev->name, bp->strm->msg);
2255 zlib_inflateEnd(bp->strm);
2257 if (rc == Z_STREAM_END)
2264 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2271 for (i = 0; i < rv2p_code_len; i += 8) {
2272 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2274 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2277 if (rv2p_proc == RV2P_PROC1) {
2278 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2279 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2282 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2283 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2287 /* Reset the processor, un-stall is done later. */
2288 if (rv2p_proc == RV2P_PROC1) {
2289 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2292 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2297 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2304 val = REG_RD_IND(bp, cpu_reg->mode);
2305 val |= cpu_reg->mode_value_halt;
2306 REG_WR_IND(bp, cpu_reg->mode, val);
2307 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2309 /* Load the Text area. */
2310 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2315 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2325 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2326 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2330 /* Load the Data area. */
2331 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2335 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2336 REG_WR_IND(bp, offset, fw->data[j]);
2340 /* Load the SBSS area. */
2341 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2345 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2346 REG_WR_IND(bp, offset, fw->sbss[j]);
2350 /* Load the BSS area. */
2351 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2355 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2356 REG_WR_IND(bp, offset, fw->bss[j]);
2360 /* Load the Read-Only area. */
2361 offset = cpu_reg->spad_base +
2362 (fw->rodata_addr - cpu_reg->mips_view_base);
2366 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2367 REG_WR_IND(bp, offset, fw->rodata[j]);
2371 /* Clear the pre-fetch instruction. */
2372 REG_WR_IND(bp, cpu_reg->inst, 0);
2373 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2375 /* Start the CPU. */
2376 val = REG_RD_IND(bp, cpu_reg->mode);
2377 val &= ~cpu_reg->mode_value_halt;
2378 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2379 REG_WR_IND(bp, cpu_reg->mode, val);
2385 bnx2_init_cpus(struct bnx2 *bp)
2387 struct cpu_reg cpu_reg;
2393 if ((rc = bnx2_gunzip_init(bp)) != 0)
2396 /* Initialize the RV2P processor. */
2397 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2402 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2404 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2409 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2411 /* Initialize the RX Processor. */
2412 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2413 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2414 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2415 cpu_reg.state = BNX2_RXP_CPU_STATE;
2416 cpu_reg.state_value_clear = 0xffffff;
2417 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2418 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2419 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2420 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2421 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2422 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2423 cpu_reg.mips_view_base = 0x8000000;
2425 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2426 fw = &bnx2_rxp_fw_09;
2428 fw = &bnx2_rxp_fw_06;
2430 rc = load_cpu_fw(bp, &cpu_reg, fw);
2434 /* Initialize the TX Processor. */
2435 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2436 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2437 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2438 cpu_reg.state = BNX2_TXP_CPU_STATE;
2439 cpu_reg.state_value_clear = 0xffffff;
2440 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2441 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2442 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2443 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2444 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2445 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2446 cpu_reg.mips_view_base = 0x8000000;
2448 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2449 fw = &bnx2_txp_fw_09;
2451 fw = &bnx2_txp_fw_06;
2453 rc = load_cpu_fw(bp, &cpu_reg, fw);
2457 /* Initialize the TX Patch-up Processor. */
2458 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2459 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2460 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2461 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2462 cpu_reg.state_value_clear = 0xffffff;
2463 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2464 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2465 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2466 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2467 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2468 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2469 cpu_reg.mips_view_base = 0x8000000;
2471 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2472 fw = &bnx2_tpat_fw_09;
2474 fw = &bnx2_tpat_fw_06;
2476 rc = load_cpu_fw(bp, &cpu_reg, fw);
2480 /* Initialize the Completion Processor. */
2481 cpu_reg.mode = BNX2_COM_CPU_MODE;
2482 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2483 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2484 cpu_reg.state = BNX2_COM_CPU_STATE;
2485 cpu_reg.state_value_clear = 0xffffff;
2486 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2487 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2488 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2489 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2490 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2491 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2492 cpu_reg.mips_view_base = 0x8000000;
2494 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2495 fw = &bnx2_com_fw_09;
2497 fw = &bnx2_com_fw_06;
2499 rc = load_cpu_fw(bp, &cpu_reg, fw);
2503 /* Initialize the Command Processor. */
2504 cpu_reg.mode = BNX2_CP_CPU_MODE;
2505 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2506 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2507 cpu_reg.state = BNX2_CP_CPU_STATE;
2508 cpu_reg.state_value_clear = 0xffffff;
2509 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2510 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2511 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2512 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2513 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2514 cpu_reg.spad_base = BNX2_CP_SCRATCH;
2515 cpu_reg.mips_view_base = 0x8000000;
2517 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2518 fw = &bnx2_cp_fw_09;
2520 rc = load_cpu_fw(bp, &cpu_reg, fw);
2525 bnx2_gunzip_end(bp);
2530 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2534 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2540 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2541 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2542 PCI_PM_CTRL_PME_STATUS);
2544 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2545 /* delay required during transition out of D3hot */
2548 val = REG_RD(bp, BNX2_EMAC_MODE);
2549 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2550 val &= ~BNX2_EMAC_MODE_MPKT;
2551 REG_WR(bp, BNX2_EMAC_MODE, val);
2553 val = REG_RD(bp, BNX2_RPM_CONFIG);
2554 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2555 REG_WR(bp, BNX2_RPM_CONFIG, val);
2566 autoneg = bp->autoneg;
2567 advertising = bp->advertising;
2569 bp->autoneg = AUTONEG_SPEED;
2570 bp->advertising = ADVERTISED_10baseT_Half |
2571 ADVERTISED_10baseT_Full |
2572 ADVERTISED_100baseT_Half |
2573 ADVERTISED_100baseT_Full |
2576 bnx2_setup_copper_phy(bp);
2578 bp->autoneg = autoneg;
2579 bp->advertising = advertising;
2581 bnx2_set_mac_addr(bp);
2583 val = REG_RD(bp, BNX2_EMAC_MODE);
2585 /* Enable port mode. */
2586 val &= ~BNX2_EMAC_MODE_PORT;
2587 val |= BNX2_EMAC_MODE_PORT_MII |
2588 BNX2_EMAC_MODE_MPKT_RCVD |
2589 BNX2_EMAC_MODE_ACPI_RCVD |
2590 BNX2_EMAC_MODE_MPKT;
2592 REG_WR(bp, BNX2_EMAC_MODE, val);
2594 /* receive all multicast */
2595 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2596 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2599 REG_WR(bp, BNX2_EMAC_RX_MODE,
2600 BNX2_EMAC_RX_MODE_SORT_MODE);
2602 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2603 BNX2_RPM_SORT_USER0_MC_EN;
2604 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2605 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2606 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2607 BNX2_RPM_SORT_USER0_ENA);
2609 /* Need to enable EMAC and RPM for WOL. */
2610 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2611 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2612 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2613 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2615 val = REG_RD(bp, BNX2_RPM_CONFIG);
2616 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2617 REG_WR(bp, BNX2_RPM_CONFIG, val);
2619 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2622 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2625 if (!(bp->flags & NO_WOL_FLAG))
2626 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2628 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2629 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2630 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2639 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2641 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2644 /* No more memory access after this point until
2645 * device is brought back to D0.
2657 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2662 /* Request access to the flash interface. */
2663 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2664 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2665 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2666 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2672 if (j >= NVRAM_TIMEOUT_COUNT)
2679 bnx2_release_nvram_lock(struct bnx2 *bp)
2684 /* Relinquish nvram interface. */
2685 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2687 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2688 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2689 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2695 if (j >= NVRAM_TIMEOUT_COUNT)
2703 bnx2_enable_nvram_write(struct bnx2 *bp)
2707 val = REG_RD(bp, BNX2_MISC_CFG);
2708 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2710 if (!bp->flash_info->buffered) {
2713 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2714 REG_WR(bp, BNX2_NVM_COMMAND,
2715 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2717 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2720 val = REG_RD(bp, BNX2_NVM_COMMAND);
2721 if (val & BNX2_NVM_COMMAND_DONE)
2725 if (j >= NVRAM_TIMEOUT_COUNT)
2732 bnx2_disable_nvram_write(struct bnx2 *bp)
2736 val = REG_RD(bp, BNX2_MISC_CFG);
2737 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2742 bnx2_enable_nvram_access(struct bnx2 *bp)
2746 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2747 /* Enable both bits, even on read. */
2748 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2749 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2753 bnx2_disable_nvram_access(struct bnx2 *bp)
2757 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2758 /* Disable both bits, even after read. */
2759 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2760 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2761 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2765 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2770 if (bp->flash_info->buffered)
2771 /* Buffered flash, no erase needed */
2774 /* Build an erase command */
2775 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2776 BNX2_NVM_COMMAND_DOIT;
2778 /* Need to clear DONE bit separately. */
2779 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2781 /* Address of the NVRAM to read from. */
2782 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2784 /* Issue an erase command. */
2785 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2787 /* Wait for completion. */
2788 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2793 val = REG_RD(bp, BNX2_NVM_COMMAND);
2794 if (val & BNX2_NVM_COMMAND_DONE)
2798 if (j >= NVRAM_TIMEOUT_COUNT)
2805 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2810 /* Build the command word. */
2811 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2813 /* Calculate an offset of a buffered flash. */
2814 if (bp->flash_info->buffered) {
2815 offset = ((offset / bp->flash_info->page_size) <<
2816 bp->flash_info->page_bits) +
2817 (offset % bp->flash_info->page_size);
2820 /* Need to clear DONE bit separately. */
2821 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2823 /* Address of the NVRAM to read from. */
2824 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2826 /* Issue a read command. */
2827 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2829 /* Wait for completion. */
2830 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2835 val = REG_RD(bp, BNX2_NVM_COMMAND);
2836 if (val & BNX2_NVM_COMMAND_DONE) {
2837 val = REG_RD(bp, BNX2_NVM_READ);
2839 val = be32_to_cpu(val);
2840 memcpy(ret_val, &val, 4);
2844 if (j >= NVRAM_TIMEOUT_COUNT)
2852 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2857 /* Build the command word. */
2858 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2860 /* Calculate an offset of a buffered flash. */
2861 if (bp->flash_info->buffered) {
2862 offset = ((offset / bp->flash_info->page_size) <<
2863 bp->flash_info->page_bits) +
2864 (offset % bp->flash_info->page_size);
2867 /* Need to clear DONE bit separately. */
2868 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2870 memcpy(&val32, val, 4);
2871 val32 = cpu_to_be32(val32);
2873 /* Write the data. */
2874 REG_WR(bp, BNX2_NVM_WRITE, val32);
2876 /* Address of the NVRAM to write to. */
2877 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2879 /* Issue the write command. */
2880 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2882 /* Wait for completion. */
2883 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2886 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2889 if (j >= NVRAM_TIMEOUT_COUNT)
2896 bnx2_init_nvram(struct bnx2 *bp)
2899 int j, entry_count, rc;
2900 struct flash_spec *flash;
2902 /* Determine the selected interface. */
2903 val = REG_RD(bp, BNX2_NVM_CFG1);
2905 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2908 if (val & 0x40000000) {
2910 /* Flash interface has been reconfigured */
2911 for (j = 0, flash = &flash_table[0]; j < entry_count;
2913 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2914 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2915 bp->flash_info = flash;
2922 /* Not yet been reconfigured */
2924 if (val & (1 << 23))
2925 mask = FLASH_BACKUP_STRAP_MASK;
2927 mask = FLASH_STRAP_MASK;
2929 for (j = 0, flash = &flash_table[0]; j < entry_count;
2932 if ((val & mask) == (flash->strapping & mask)) {
2933 bp->flash_info = flash;
2935 /* Request access to the flash interface. */
2936 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2939 /* Enable access to flash interface */
2940 bnx2_enable_nvram_access(bp);
2942 /* Reconfigure the flash interface */
2943 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2944 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2945 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2946 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2948 /* Disable access to flash interface */
2949 bnx2_disable_nvram_access(bp);
2950 bnx2_release_nvram_lock(bp);
2955 } /* if (val & 0x40000000) */
2957 if (j == entry_count) {
2958 bp->flash_info = NULL;
2959 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
2963 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2964 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2966 bp->flash_size = val;
2968 bp->flash_size = bp->flash_info->total_size;
2974 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2978 u32 cmd_flags, offset32, len32, extra;
2983 /* Request access to the flash interface. */
2984 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2987 /* Enable access to flash interface */
2988 bnx2_enable_nvram_access(bp);
3001 pre_len = 4 - (offset & 3);
3003 if (pre_len >= len32) {
3005 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3006 BNX2_NVM_COMMAND_LAST;
3009 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3012 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3017 memcpy(ret_buf, buf + (offset & 3), pre_len);
3024 extra = 4 - (len32 & 3);
3025 len32 = (len32 + 4) & ~3;
3032 cmd_flags = BNX2_NVM_COMMAND_LAST;
3034 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3035 BNX2_NVM_COMMAND_LAST;
3037 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3039 memcpy(ret_buf, buf, 4 - extra);
3041 else if (len32 > 0) {
3044 /* Read the first word. */
3048 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3050 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3052 /* Advance to the next dword. */
3057 while (len32 > 4 && rc == 0) {
3058 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3060 /* Advance to the next dword. */
3069 cmd_flags = BNX2_NVM_COMMAND_LAST;
3070 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3072 memcpy(ret_buf, buf, 4 - extra);
3075 /* Disable access to flash interface */
3076 bnx2_disable_nvram_access(bp);
3078 bnx2_release_nvram_lock(bp);
3084 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3087 u32 written, offset32, len32;
3088 u8 *buf, start[4], end[4], *flash_buffer = NULL;
3090 int align_start, align_end;
3095 align_start = align_end = 0;
3097 if ((align_start = (offset32 & 3))) {
3099 len32 += (4 - align_start);
3100 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3105 if ((len32 > 4) || !align_start) {
3106 align_end = 4 - (len32 & 3);
3108 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
3115 if (align_start || align_end) {
3116 buf = kmalloc(len32, GFP_KERNEL);
3120 memcpy(buf, start, 4);
3123 memcpy(buf + len32 - 4, end, 4);
3125 memcpy(buf + align_start, data_buf, buf_size);
3128 if (bp->flash_info->buffered == 0) {
3129 flash_buffer = kmalloc(264, GFP_KERNEL);
3130 if (flash_buffer == NULL) {
3132 goto nvram_write_end;
3137 while ((written < len32) && (rc == 0)) {
3138 u32 page_start, page_end, data_start, data_end;
3139 u32 addr, cmd_flags;
3142 /* Find the page_start addr */
3143 page_start = offset32 + written;
3144 page_start -= (page_start % bp->flash_info->page_size);
3145 /* Find the page_end addr */
3146 page_end = page_start + bp->flash_info->page_size;
3147 /* Find the data_start addr */
3148 data_start = (written == 0) ? offset32 : page_start;
3149 /* Find the data_end addr */
3150 data_end = (page_end > offset32 + len32) ?
3151 (offset32 + len32) : page_end;
3153 /* Request access to the flash interface. */
3154 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3155 goto nvram_write_end;
3157 /* Enable access to flash interface */
3158 bnx2_enable_nvram_access(bp);
3160 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3161 if (bp->flash_info->buffered == 0) {
3164 /* Read the whole page into the buffer
3165 * (non-buffer flash only) */
3166 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3167 if (j == (bp->flash_info->page_size - 4)) {
3168 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3170 rc = bnx2_nvram_read_dword(bp,
3176 goto nvram_write_end;
3182 /* Enable writes to flash interface (unlock write-protect) */
3183 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3184 goto nvram_write_end;
3186 /* Erase the page */
3187 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3188 goto nvram_write_end;
3190 /* Re-enable the write again for the actual write */
3191 bnx2_enable_nvram_write(bp);
3193 /* Loop to write back the buffer data from page_start to
3196 if (bp->flash_info->buffered == 0) {
3197 for (addr = page_start; addr < data_start;
3198 addr += 4, i += 4) {
3200 rc = bnx2_nvram_write_dword(bp, addr,
3201 &flash_buffer[i], cmd_flags);
3204 goto nvram_write_end;
3210 /* Loop to write the new data from data_start to data_end */
3211 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3212 if ((addr == page_end - 4) ||
3213 ((bp->flash_info->buffered) &&
3214 (addr == data_end - 4))) {
3216 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3218 rc = bnx2_nvram_write_dword(bp, addr, buf,
3222 goto nvram_write_end;
3228 /* Loop to write back the buffer data from data_end
3230 if (bp->flash_info->buffered == 0) {
3231 for (addr = data_end; addr < page_end;
3232 addr += 4, i += 4) {
3234 if (addr == page_end-4) {
3235 cmd_flags = BNX2_NVM_COMMAND_LAST;
3237 rc = bnx2_nvram_write_dword(bp, addr,
3238 &flash_buffer[i], cmd_flags);
3241 goto nvram_write_end;
3247 /* Disable writes to flash interface (lock write-protect) */
3248 bnx2_disable_nvram_write(bp);
3250 /* Disable access to flash interface */
3251 bnx2_disable_nvram_access(bp);
3252 bnx2_release_nvram_lock(bp);
3254 /* Increment written */
3255 written += data_end - data_start;
3259 if (bp->flash_info->buffered == 0)
3260 kfree(flash_buffer);
3262 if (align_start || align_end)
3268 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3273 /* Wait for the current PCI transaction to complete before
3274 * issuing a reset. */
3275 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3276 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3277 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3278 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3279 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3280 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3283 /* Wait for the firmware to tell us it is ok to issue a reset. */
3284 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3286 /* Deposit a driver reset signature so the firmware knows that
3287 * this is a soft reset. */
3288 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3289 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3291 /* Do a dummy read to force the chip to complete all current transaction
3292 * before we issue a reset. */
3293 val = REG_RD(bp, BNX2_MISC_ID);
3295 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3296 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3297 REG_RD(bp, BNX2_MISC_COMMAND);
3300 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3301 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3303 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3306 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3307 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3308 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3311 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3313 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3314 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3315 current->state = TASK_UNINTERRUPTIBLE;
3316 schedule_timeout(HZ / 50);
3319 /* Reset takes approximate 30 usec */
3320 for (i = 0; i < 10; i++) {
3321 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3322 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3323 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3328 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3329 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3330 printk(KERN_ERR PFX "Chip reset did not complete\n");
3335 /* Make sure byte swapping is properly configured. */
3336 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3337 if (val != 0x01020304) {
3338 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3342 /* Wait for the firmware to finish its initialization. */
3343 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3347 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3348 /* Adjust the voltage regular to two steps lower. The default
3349 * of this register is 0x0000000e. */
3350 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3352 /* Remove bad rbuf memory from the free pool. */
3353 rc = bnx2_alloc_bad_rbuf(bp);
3360 bnx2_init_chip(struct bnx2 *bp)
3365 /* Make sure the interrupt is not active. */
3366 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3368 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3369 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3371 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3373 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3374 DMA_READ_CHANS << 12 |
3375 DMA_WRITE_CHANS << 16;
3377 val |= (0x2 << 20) | (1 << 11);
3379 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3382 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3383 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3384 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3386 REG_WR(bp, BNX2_DMA_CONFIG, val);
3388 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3389 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3390 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3391 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3394 if (bp->flags & PCIX_FLAG) {
3397 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3399 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3400 val16 & ~PCI_X_CMD_ERO);
3403 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3404 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3405 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3406 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3408 /* Initialize context mapping and zero out the quick contexts. The
3409 * context block must have already been enabled. */
3410 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3411 bnx2_init_5709_context(bp);
3413 bnx2_init_context(bp);
3415 if ((rc = bnx2_init_cpus(bp)) != 0)
3418 bnx2_init_nvram(bp);
3420 bnx2_set_mac_addr(bp);
3422 val = REG_RD(bp, BNX2_MQ_CONFIG);
3423 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3424 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3425 REG_WR(bp, BNX2_MQ_CONFIG, val);
3427 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3428 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3429 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3431 val = (BCM_PAGE_BITS - 8) << 24;
3432 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3434 /* Configure page size. */
3435 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3436 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3437 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3438 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3440 val = bp->mac_addr[0] +
3441 (bp->mac_addr[1] << 8) +
3442 (bp->mac_addr[2] << 16) +
3444 (bp->mac_addr[4] << 8) +
3445 (bp->mac_addr[5] << 16);
3446 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3448 /* Program the MTU. Also include 4 bytes for CRC32. */
3449 val = bp->dev->mtu + ETH_HLEN + 4;
3450 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3451 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3452 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3454 bp->last_status_idx = 0;
3455 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3457 /* Set up how to generate a link change interrupt. */
3458 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3460 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3461 (u64) bp->status_blk_mapping & 0xffffffff);
3462 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3464 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3465 (u64) bp->stats_blk_mapping & 0xffffffff);
3466 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3467 (u64) bp->stats_blk_mapping >> 32);
3469 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3470 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3472 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3473 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3475 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3476 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3478 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3480 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3482 REG_WR(bp, BNX2_HC_COM_TICKS,
3483 (bp->com_ticks_int << 16) | bp->com_ticks);
3485 REG_WR(bp, BNX2_HC_CMD_TICKS,
3486 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3488 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3489 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3491 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3492 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3494 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3495 BNX2_HC_CONFIG_TX_TMR_MODE |
3496 BNX2_HC_CONFIG_COLLECT_STATS);
3499 /* Clear internal stats counters. */
3500 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3502 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3504 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3505 BNX2_PORT_FEATURE_ASF_ENABLED)
3506 bp->flags |= ASF_ENABLE_FLAG;
3508 /* Initialize the receive filter. */
3509 bnx2_set_rx_mode(bp->dev);
3511 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3514 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3515 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3519 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3525 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3527 u32 val, offset0, offset1, offset2, offset3;
3529 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3530 offset0 = BNX2_L2CTX_TYPE_XI;
3531 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3532 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3533 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3535 offset0 = BNX2_L2CTX_TYPE;
3536 offset1 = BNX2_L2CTX_CMD_TYPE;
3537 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3538 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3540 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3541 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3543 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3544 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3546 val = (u64) bp->tx_desc_mapping >> 32;
3547 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3549 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3550 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3554 bnx2_init_tx_ring(struct bnx2 *bp)
3559 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3561 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3563 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3564 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3569 bp->tx_prod_bseq = 0;
3572 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3573 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
3575 bnx2_init_tx_context(bp, cid);
3579 bnx2_init_rx_ring(struct bnx2 *bp)
3583 u16 prod, ring_prod;
3586 /* 8 for CRC and VLAN */
3587 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3589 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
3591 ring_prod = prod = bp->rx_prod = 0;
3594 bp->rx_prod_bseq = 0;
3596 for (i = 0; i < bp->rx_max_ring; i++) {
3599 rxbd = &bp->rx_desc_ring[i][0];
3600 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3601 rxbd->rx_bd_len = bp->rx_buf_use_size;
3602 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3604 if (i == (bp->rx_max_ring - 1))
3608 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3609 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3613 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3614 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3616 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3618 val = (u64) bp->rx_desc_mapping[0] >> 32;
3619 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3621 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3622 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3624 for (i = 0; i < bp->rx_ring_size; i++) {
3625 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3628 prod = NEXT_RX_BD(prod);
3629 ring_prod = RX_RING_IDX(prod);
3633 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3635 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3639 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3643 bp->rx_ring_size = size;
3645 while (size > MAX_RX_DESC_CNT) {
3646 size -= MAX_RX_DESC_CNT;
3649 /* round to next power of 2 */
3651 while ((max & num_rings) == 0)
3654 if (num_rings != max)
3657 bp->rx_max_ring = max;
3658 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3662 bnx2_free_tx_skbs(struct bnx2 *bp)
3666 if (bp->tx_buf_ring == NULL)
3669 for (i = 0; i < TX_DESC_CNT; ) {
3670 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3671 struct sk_buff *skb = tx_buf->skb;
3679 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3680 skb_headlen(skb), PCI_DMA_TODEVICE);
3684 last = skb_shinfo(skb)->nr_frags;
3685 for (j = 0; j < last; j++) {
3686 tx_buf = &bp->tx_buf_ring[i + j + 1];
3687 pci_unmap_page(bp->pdev,
3688 pci_unmap_addr(tx_buf, mapping),
3689 skb_shinfo(skb)->frags[j].size,
3699 bnx2_free_rx_skbs(struct bnx2 *bp)
3703 if (bp->rx_buf_ring == NULL)
3706 for (i = 0; i < bp->rx_max_ring_idx; i++) {
3707 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3708 struct sk_buff *skb = rx_buf->skb;
3713 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3714 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3723 bnx2_free_skbs(struct bnx2 *bp)
3725 bnx2_free_tx_skbs(bp);
3726 bnx2_free_rx_skbs(bp);
3730 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3734 rc = bnx2_reset_chip(bp, reset_code);
3739 if ((rc = bnx2_init_chip(bp)) != 0)
3742 bnx2_init_tx_ring(bp);
3743 bnx2_init_rx_ring(bp);
3748 bnx2_init_nic(struct bnx2 *bp)
3752 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3755 spin_lock_bh(&bp->phy_lock);
3757 spin_unlock_bh(&bp->phy_lock);
3763 bnx2_test_registers(struct bnx2 *bp)
3767 static const struct {
3773 { 0x006c, 0, 0x00000000, 0x0000003f },
3774 { 0x0090, 0, 0xffffffff, 0x00000000 },
3775 { 0x0094, 0, 0x00000000, 0x00000000 },
3777 { 0x0404, 0, 0x00003f00, 0x00000000 },
3778 { 0x0418, 0, 0x00000000, 0xffffffff },
3779 { 0x041c, 0, 0x00000000, 0xffffffff },
3780 { 0x0420, 0, 0x00000000, 0x80ffffff },
3781 { 0x0424, 0, 0x00000000, 0x00000000 },
3782 { 0x0428, 0, 0x00000000, 0x00000001 },
3783 { 0x0450, 0, 0x00000000, 0x0000ffff },
3784 { 0x0454, 0, 0x00000000, 0xffffffff },
3785 { 0x0458, 0, 0x00000000, 0xffffffff },
3787 { 0x0808, 0, 0x00000000, 0xffffffff },
3788 { 0x0854, 0, 0x00000000, 0xffffffff },
3789 { 0x0868, 0, 0x00000000, 0x77777777 },
3790 { 0x086c, 0, 0x00000000, 0x77777777 },
3791 { 0x0870, 0, 0x00000000, 0x77777777 },
3792 { 0x0874, 0, 0x00000000, 0x77777777 },
3794 { 0x0c00, 0, 0x00000000, 0x00000001 },
3795 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3796 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
3798 { 0x1000, 0, 0x00000000, 0x00000001 },
3799 { 0x1004, 0, 0x00000000, 0x000f0001 },
3801 { 0x1408, 0, 0x01c00800, 0x00000000 },
3802 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3803 { 0x14a8, 0, 0x00000000, 0x000001ff },
3804 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3805 { 0x14b0, 0, 0x00000002, 0x00000001 },
3806 { 0x14b8, 0, 0x00000000, 0x00000000 },
3807 { 0x14c0, 0, 0x00000000, 0x00000009 },
3808 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3809 { 0x14cc, 0, 0x00000000, 0x00000001 },
3810 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3812 { 0x1800, 0, 0x00000000, 0x00000001 },
3813 { 0x1804, 0, 0x00000000, 0x00000003 },
3815 { 0x2800, 0, 0x00000000, 0x00000001 },
3816 { 0x2804, 0, 0x00000000, 0x00003f01 },
3817 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3818 { 0x2810, 0, 0xffff0000, 0x00000000 },
3819 { 0x2814, 0, 0xffff0000, 0x00000000 },
3820 { 0x2818, 0, 0xffff0000, 0x00000000 },
3821 { 0x281c, 0, 0xffff0000, 0x00000000 },
3822 { 0x2834, 0, 0xffffffff, 0x00000000 },
3823 { 0x2840, 0, 0x00000000, 0xffffffff },
3824 { 0x2844, 0, 0x00000000, 0xffffffff },
3825 { 0x2848, 0, 0xffffffff, 0x00000000 },
3826 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3828 { 0x2c00, 0, 0x00000000, 0x00000011 },
3829 { 0x2c04, 0, 0x00000000, 0x00030007 },
3831 { 0x3c00, 0, 0x00000000, 0x00000001 },
3832 { 0x3c04, 0, 0x00000000, 0x00070000 },
3833 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3834 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3835 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3836 { 0x3c14, 0, 0x00000000, 0xffffffff },
3837 { 0x3c18, 0, 0x00000000, 0xffffffff },
3838 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3839 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3841 { 0x5004, 0, 0x00000000, 0x0000007f },
3842 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3843 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3845 { 0x5c00, 0, 0x00000000, 0x00000001 },
3846 { 0x5c04, 0, 0x00000000, 0x0003000f },
3847 { 0x5c08, 0, 0x00000003, 0x00000000 },
3848 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3849 { 0x5c10, 0, 0x00000000, 0xffffffff },
3850 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3851 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3852 { 0x5c88, 0, 0x00000000, 0x00077373 },
3853 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3855 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3856 { 0x680c, 0, 0xffffffff, 0x00000000 },
3857 { 0x6810, 0, 0xffffffff, 0x00000000 },
3858 { 0x6814, 0, 0xffffffff, 0x00000000 },
3859 { 0x6818, 0, 0xffffffff, 0x00000000 },
3860 { 0x681c, 0, 0xffffffff, 0x00000000 },
3861 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3862 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3863 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3864 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3865 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3866 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3867 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3868 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3869 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3870 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3871 { 0x684c, 0, 0xffffffff, 0x00000000 },
3872 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3873 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3874 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3875 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3876 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3877 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3879 { 0xffff, 0, 0x00000000, 0x00000000 },
3883 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3884 u32 offset, rw_mask, ro_mask, save_val, val;
3886 offset = (u32) reg_tbl[i].offset;
3887 rw_mask = reg_tbl[i].rw_mask;
3888 ro_mask = reg_tbl[i].ro_mask;
3890 save_val = readl(bp->regview + offset);
3892 writel(0, bp->regview + offset);
3894 val = readl(bp->regview + offset);
3895 if ((val & rw_mask) != 0) {
3899 if ((val & ro_mask) != (save_val & ro_mask)) {
3903 writel(0xffffffff, bp->regview + offset);
3905 val = readl(bp->regview + offset);
3906 if ((val & rw_mask) != rw_mask) {
3910 if ((val & ro_mask) != (save_val & ro_mask)) {
3914 writel(save_val, bp->regview + offset);
3918 writel(save_val, bp->regview + offset);
3926 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3928 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
3929 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3932 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3935 for (offset = 0; offset < size; offset += 4) {
3937 REG_WR_IND(bp, start + offset, test_pattern[i]);
3939 if (REG_RD_IND(bp, start + offset) !=
3949 bnx2_test_memory(struct bnx2 *bp)
3953 static const struct {
3957 { 0x60000, 0x4000 },
3958 { 0xa0000, 0x3000 },
3959 { 0xe0000, 0x4000 },
3960 { 0x120000, 0x4000 },
3961 { 0x1a0000, 0x4000 },
3962 { 0x160000, 0x4000 },
3966 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3967 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3968 mem_tbl[i].len)) != 0) {
3976 #define BNX2_MAC_LOOPBACK 0
3977 #define BNX2_PHY_LOOPBACK 1
3980 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
3982 unsigned int pkt_size, num_pkts, i;
3983 struct sk_buff *skb, *rx_skb;
3984 unsigned char *packet;
3985 u16 rx_start_idx, rx_idx;
3988 struct sw_bd *rx_buf;
3989 struct l2_fhdr *rx_hdr;
3992 if (loopback_mode == BNX2_MAC_LOOPBACK) {
3993 bp->loopback = MAC_LOOPBACK;
3994 bnx2_set_mac_loopback(bp);
3996 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
3997 bp->loopback = PHY_LOOPBACK;
3998 bnx2_set_phy_loopback(bp);
4004 skb = netdev_alloc_skb(bp->dev, pkt_size);
4007 packet = skb_put(skb, pkt_size);
4008 memcpy(packet, bp->dev->dev_addr, 6);
4009 memset(packet + 6, 0x0, 8);
4010 for (i = 14; i < pkt_size; i++)
4011 packet[i] = (unsigned char) (i & 0xff);
4013 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4016 REG_WR(bp, BNX2_HC_COMMAND,
4017 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4019 REG_RD(bp, BNX2_HC_COMMAND);
4022 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4026 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4028 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4029 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4030 txbd->tx_bd_mss_nbytes = pkt_size;
4031 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4034 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4035 bp->tx_prod_bseq += pkt_size;
4037 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4038 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4042 REG_WR(bp, BNX2_HC_COMMAND,
4043 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4045 REG_RD(bp, BNX2_HC_COMMAND);
4049 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4052 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4053 goto loopback_test_done;
4056 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4057 if (rx_idx != rx_start_idx + num_pkts) {
4058 goto loopback_test_done;
4061 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4062 rx_skb = rx_buf->skb;
4064 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4065 skb_reserve(rx_skb, bp->rx_offset);
4067 pci_dma_sync_single_for_cpu(bp->pdev,
4068 pci_unmap_addr(rx_buf, mapping),
4069 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4071 if (rx_hdr->l2_fhdr_status &
4072 (L2_FHDR_ERRORS_BAD_CRC |
4073 L2_FHDR_ERRORS_PHY_DECODE |
4074 L2_FHDR_ERRORS_ALIGNMENT |
4075 L2_FHDR_ERRORS_TOO_SHORT |
4076 L2_FHDR_ERRORS_GIANT_FRAME)) {
4078 goto loopback_test_done;
4081 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4082 goto loopback_test_done;
4085 for (i = 14; i < pkt_size; i++) {
4086 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4087 goto loopback_test_done;
4098 #define BNX2_MAC_LOOPBACK_FAILED 1
4099 #define BNX2_PHY_LOOPBACK_FAILED 2
4100 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4101 BNX2_PHY_LOOPBACK_FAILED)
4104 bnx2_test_loopback(struct bnx2 *bp)
4108 if (!netif_running(bp->dev))
4109 return BNX2_LOOPBACK_FAILED;
4111 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4112 spin_lock_bh(&bp->phy_lock);
4114 spin_unlock_bh(&bp->phy_lock);
4115 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4116 rc |= BNX2_MAC_LOOPBACK_FAILED;
4117 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4118 rc |= BNX2_PHY_LOOPBACK_FAILED;
4122 #define NVRAM_SIZE 0x200
4123 #define CRC32_RESIDUAL 0xdebb20e3
4126 bnx2_test_nvram(struct bnx2 *bp)
4128 u32 buf[NVRAM_SIZE / 4];
4129 u8 *data = (u8 *) buf;
4133 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4134 goto test_nvram_done;
4136 magic = be32_to_cpu(buf[0]);
4137 if (magic != 0x669955aa) {
4139 goto test_nvram_done;
4142 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4143 goto test_nvram_done;
4145 csum = ether_crc_le(0x100, data);
4146 if (csum != CRC32_RESIDUAL) {
4148 goto test_nvram_done;
4151 csum = ether_crc_le(0x100, data + 0x100);
4152 if (csum != CRC32_RESIDUAL) {
4161 bnx2_test_link(struct bnx2 *bp)
4165 spin_lock_bh(&bp->phy_lock);
4166 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4167 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4168 spin_unlock_bh(&bp->phy_lock);
4170 if (bmsr & BMSR_LSTATUS) {
4177 bnx2_test_intr(struct bnx2 *bp)
4182 if (!netif_running(bp->dev))
4185 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4187 /* This register is not touched during run-time. */
4188 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4189 REG_RD(bp, BNX2_HC_COMMAND);
4191 for (i = 0; i < 10; i++) {
4192 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4198 msleep_interruptible(10);
4207 bnx2_5706_serdes_timer(struct bnx2 *bp)
4209 spin_lock(&bp->phy_lock);
4210 if (bp->serdes_an_pending)
4211 bp->serdes_an_pending--;
4212 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4215 bp->current_interval = bp->timer_interval;
4217 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4219 if (bmcr & BMCR_ANENABLE) {
4222 bnx2_write_phy(bp, 0x1c, 0x7c00);
4223 bnx2_read_phy(bp, 0x1c, &phy1);
4225 bnx2_write_phy(bp, 0x17, 0x0f01);
4226 bnx2_read_phy(bp, 0x15, &phy2);
4227 bnx2_write_phy(bp, 0x17, 0x0f01);
4228 bnx2_read_phy(bp, 0x15, &phy2);
4230 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4231 !(phy2 & 0x20)) { /* no CONFIG */
4233 bmcr &= ~BMCR_ANENABLE;
4234 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4235 bnx2_write_phy(bp, MII_BMCR, bmcr);
4236 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4240 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4241 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4244 bnx2_write_phy(bp, 0x17, 0x0f01);
4245 bnx2_read_phy(bp, 0x15, &phy2);
4249 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4250 bmcr |= BMCR_ANENABLE;
4251 bnx2_write_phy(bp, MII_BMCR, bmcr);
4253 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4256 bp->current_interval = bp->timer_interval;
4258 spin_unlock(&bp->phy_lock);
4262 bnx2_5708_serdes_timer(struct bnx2 *bp)
4264 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4265 bp->serdes_an_pending = 0;
4269 spin_lock(&bp->phy_lock);
4270 if (bp->serdes_an_pending)
4271 bp->serdes_an_pending--;
4272 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4275 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4277 if (bmcr & BMCR_ANENABLE) {
4278 bmcr &= ~BMCR_ANENABLE;
4279 bmcr |= BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500;
4280 bnx2_write_phy(bp, MII_BMCR, bmcr);
4281 bp->current_interval = SERDES_FORCED_TIMEOUT;
4283 bmcr &= ~(BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500);
4284 bmcr |= BMCR_ANENABLE;
4285 bnx2_write_phy(bp, MII_BMCR, bmcr);
4286 bp->serdes_an_pending = 2;
4287 bp->current_interval = bp->timer_interval;
4291 bp->current_interval = bp->timer_interval;
4293 spin_unlock(&bp->phy_lock);
4297 bnx2_timer(unsigned long data)
4299 struct bnx2 *bp = (struct bnx2 *) data;
4302 if (!netif_running(bp->dev))
4305 if (atomic_read(&bp->intr_sem) != 0)
4306 goto bnx2_restart_timer;
4308 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4309 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4311 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4313 if (bp->phy_flags & PHY_SERDES_FLAG) {
4314 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4315 bnx2_5706_serdes_timer(bp);
4316 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
4317 bnx2_5708_serdes_timer(bp);
4321 mod_timer(&bp->timer, jiffies + bp->current_interval);
4324 /* Called with rtnl_lock */
4326 bnx2_open(struct net_device *dev)
4328 struct bnx2 *bp = netdev_priv(dev);
4331 bnx2_set_power_state(bp, PCI_D0);
4332 bnx2_disable_int(bp);
4334 rc = bnx2_alloc_mem(bp);
4338 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4339 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4342 if (pci_enable_msi(bp->pdev) == 0) {
4343 bp->flags |= USING_MSI_FLAG;
4344 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4348 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4349 IRQF_SHARED, dev->name, dev);
4353 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
4361 rc = bnx2_init_nic(bp);
4364 free_irq(bp->pdev->irq, dev);
4365 if (bp->flags & USING_MSI_FLAG) {
4366 pci_disable_msi(bp->pdev);
4367 bp->flags &= ~USING_MSI_FLAG;
4374 mod_timer(&bp->timer, jiffies + bp->current_interval);
4376 atomic_set(&bp->intr_sem, 0);
4378 bnx2_enable_int(bp);
4380 if (bp->flags & USING_MSI_FLAG) {
4381 /* Test MSI to make sure it is working
4382 * If MSI test fails, go back to INTx mode
4384 if (bnx2_test_intr(bp) != 0) {
4385 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4386 " using MSI, switching to INTx mode. Please"
4387 " report this failure to the PCI maintainer"
4388 " and include system chipset information.\n",
4391 bnx2_disable_int(bp);
4392 free_irq(bp->pdev->irq, dev);
4393 pci_disable_msi(bp->pdev);
4394 bp->flags &= ~USING_MSI_FLAG;
4396 rc = bnx2_init_nic(bp);
4399 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4400 IRQF_SHARED, dev->name, dev);
4405 del_timer_sync(&bp->timer);
4408 bnx2_enable_int(bp);
4411 if (bp->flags & USING_MSI_FLAG) {
4412 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4415 netif_start_queue(dev);
4421 bnx2_reset_task(struct work_struct *work)
4423 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
4425 if (!netif_running(bp->dev))
4428 bp->in_reset_task = 1;
4429 bnx2_netif_stop(bp);
4433 atomic_set(&bp->intr_sem, 1);
4434 bnx2_netif_start(bp);
4435 bp->in_reset_task = 0;
4439 bnx2_tx_timeout(struct net_device *dev)
4441 struct bnx2 *bp = netdev_priv(dev);
4443 /* This allows the netif to be shutdown gracefully before resetting */
4444 schedule_work(&bp->reset_task);
4448 /* Called with rtnl_lock */
4450 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4452 struct bnx2 *bp = netdev_priv(dev);
4454 bnx2_netif_stop(bp);
4457 bnx2_set_rx_mode(dev);
4459 bnx2_netif_start(bp);
4462 /* Called with rtnl_lock */
4464 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4466 struct bnx2 *bp = netdev_priv(dev);
4468 bnx2_netif_stop(bp);
4471 bp->vlgrp->vlan_devices[vid] = NULL;
4472 bnx2_set_rx_mode(dev);
4474 bnx2_netif_start(bp);
4478 /* Called with netif_tx_lock.
4479 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4480 * netif_wake_queue().
4483 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4485 struct bnx2 *bp = netdev_priv(dev);
4488 struct sw_bd *tx_buf;
4489 u32 len, vlan_tag_flags, last_frag, mss;
4490 u16 prod, ring_prod;
4493 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4494 netif_stop_queue(dev);
4495 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4498 return NETDEV_TX_BUSY;
4500 len = skb_headlen(skb);
4502 ring_prod = TX_RING_IDX(prod);
4505 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4506 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4509 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4511 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4514 if ((mss = skb_shinfo(skb)->gso_size) &&
4515 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4516 u32 tcp_opt_len, ip_tcp_len;
4518 if (skb_header_cloned(skb) &&
4519 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4521 return NETDEV_TX_OK;
4524 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4525 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4528 if (skb->h.th->doff > 5) {
4529 tcp_opt_len = (skb->h.th->doff - 5) << 2;
4531 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4533 skb->nh.iph->check = 0;
4534 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4536 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4540 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4541 vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4542 (tcp_opt_len >> 2)) << 8;
4551 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4553 tx_buf = &bp->tx_buf_ring[ring_prod];
4555 pci_unmap_addr_set(tx_buf, mapping, mapping);
4557 txbd = &bp->tx_desc_ring[ring_prod];
4559 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4560 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4561 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4562 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4564 last_frag = skb_shinfo(skb)->nr_frags;
4566 for (i = 0; i < last_frag; i++) {
4567 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4569 prod = NEXT_TX_BD(prod);
4570 ring_prod = TX_RING_IDX(prod);
4571 txbd = &bp->tx_desc_ring[ring_prod];
4574 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4575 len, PCI_DMA_TODEVICE);
4576 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4579 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4580 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4581 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4582 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4585 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4587 prod = NEXT_TX_BD(prod);
4588 bp->tx_prod_bseq += skb->len;
4590 REG_WR16(bp, bp->tx_bidx_addr, prod);
4591 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4596 dev->trans_start = jiffies;
4598 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4599 netif_stop_queue(dev);
4600 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
4601 netif_wake_queue(dev);
4604 return NETDEV_TX_OK;
4607 /* Called with rtnl_lock */
4609 bnx2_close(struct net_device *dev)
4611 struct bnx2 *bp = netdev_priv(dev);
4614 /* Calling flush_scheduled_work() may deadlock because
4615 * linkwatch_event() may be on the workqueue and it will try to get
4616 * the rtnl_lock which we are holding.
4618 while (bp->in_reset_task)
4621 bnx2_netif_stop(bp);
4622 del_timer_sync(&bp->timer);
4623 if (bp->flags & NO_WOL_FLAG)
4624 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
4626 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4628 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4629 bnx2_reset_chip(bp, reset_code);
4630 free_irq(bp->pdev->irq, dev);
4631 if (bp->flags & USING_MSI_FLAG) {
4632 pci_disable_msi(bp->pdev);
4633 bp->flags &= ~USING_MSI_FLAG;
4638 netif_carrier_off(bp->dev);
4639 bnx2_set_power_state(bp, PCI_D3hot);
4643 #define GET_NET_STATS64(ctr) \
4644 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4645 (unsigned long) (ctr##_lo)
4647 #define GET_NET_STATS32(ctr) \
4650 #if (BITS_PER_LONG == 64)
4651 #define GET_NET_STATS GET_NET_STATS64
4653 #define GET_NET_STATS GET_NET_STATS32
4656 static struct net_device_stats *
4657 bnx2_get_stats(struct net_device *dev)
4659 struct bnx2 *bp = netdev_priv(dev);
4660 struct statistics_block *stats_blk = bp->stats_blk;
4661 struct net_device_stats *net_stats = &bp->net_stats;
4663 if (bp->stats_blk == NULL) {
4666 net_stats->rx_packets =
4667 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4668 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4669 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4671 net_stats->tx_packets =
4672 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4673 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4674 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4676 net_stats->rx_bytes =
4677 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4679 net_stats->tx_bytes =
4680 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4682 net_stats->multicast =
4683 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4685 net_stats->collisions =
4686 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4688 net_stats->rx_length_errors =
4689 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4690 stats_blk->stat_EtherStatsOverrsizePkts);
4692 net_stats->rx_over_errors =
4693 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4695 net_stats->rx_frame_errors =
4696 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4698 net_stats->rx_crc_errors =
4699 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4701 net_stats->rx_errors = net_stats->rx_length_errors +
4702 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4703 net_stats->rx_crc_errors;
4705 net_stats->tx_aborted_errors =
4706 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4707 stats_blk->stat_Dot3StatsLateCollisions);
4709 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4710 (CHIP_ID(bp) == CHIP_ID_5708_A0))
4711 net_stats->tx_carrier_errors = 0;
4713 net_stats->tx_carrier_errors =
4715 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4718 net_stats->tx_errors =
4720 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4722 net_stats->tx_aborted_errors +
4723 net_stats->tx_carrier_errors;
4725 net_stats->rx_missed_errors =
4726 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4727 stats_blk->stat_FwRxDrop);
4732 /* All ethtool functions called with rtnl_lock */
4735 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4737 struct bnx2 *bp = netdev_priv(dev);
4739 cmd->supported = SUPPORTED_Autoneg;
4740 if (bp->phy_flags & PHY_SERDES_FLAG) {
4741 cmd->supported |= SUPPORTED_1000baseT_Full |
4744 cmd->port = PORT_FIBRE;
4747 cmd->supported |= SUPPORTED_10baseT_Half |
4748 SUPPORTED_10baseT_Full |
4749 SUPPORTED_100baseT_Half |
4750 SUPPORTED_100baseT_Full |
4751 SUPPORTED_1000baseT_Full |
4754 cmd->port = PORT_TP;
4757 cmd->advertising = bp->advertising;
4759 if (bp->autoneg & AUTONEG_SPEED) {
4760 cmd->autoneg = AUTONEG_ENABLE;
4763 cmd->autoneg = AUTONEG_DISABLE;
4766 if (netif_carrier_ok(dev)) {
4767 cmd->speed = bp->line_speed;
4768 cmd->duplex = bp->duplex;
4775 cmd->transceiver = XCVR_INTERNAL;
4776 cmd->phy_address = bp->phy_addr;
4782 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4784 struct bnx2 *bp = netdev_priv(dev);
4785 u8 autoneg = bp->autoneg;
4786 u8 req_duplex = bp->req_duplex;
4787 u16 req_line_speed = bp->req_line_speed;
4788 u32 advertising = bp->advertising;
4790 if (cmd->autoneg == AUTONEG_ENABLE) {
4791 autoneg |= AUTONEG_SPEED;
4793 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
4795 /* allow advertising 1 speed */
4796 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4797 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4798 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4799 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4801 if (bp->phy_flags & PHY_SERDES_FLAG)
4804 advertising = cmd->advertising;
4807 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4808 advertising = cmd->advertising;
4810 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4814 if (bp->phy_flags & PHY_SERDES_FLAG) {
4815 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4818 advertising = ETHTOOL_ALL_COPPER_SPEED;
4821 advertising |= ADVERTISED_Autoneg;
4824 if (bp->phy_flags & PHY_SERDES_FLAG) {
4825 if ((cmd->speed != SPEED_1000 &&
4826 cmd->speed != SPEED_2500) ||
4827 (cmd->duplex != DUPLEX_FULL))
4830 if (cmd->speed == SPEED_2500 &&
4831 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
4834 else if (cmd->speed == SPEED_1000) {
4837 autoneg &= ~AUTONEG_SPEED;
4838 req_line_speed = cmd->speed;
4839 req_duplex = cmd->duplex;
4843 bp->autoneg = autoneg;
4844 bp->advertising = advertising;
4845 bp->req_line_speed = req_line_speed;
4846 bp->req_duplex = req_duplex;
4848 spin_lock_bh(&bp->phy_lock);
4852 spin_unlock_bh(&bp->phy_lock);
4858 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4860 struct bnx2 *bp = netdev_priv(dev);
4862 strcpy(info->driver, DRV_MODULE_NAME);
4863 strcpy(info->version, DRV_MODULE_VERSION);
4864 strcpy(info->bus_info, pci_name(bp->pdev));
4865 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4866 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4867 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
4868 info->fw_version[1] = info->fw_version[3] = '.';
4869 info->fw_version[5] = 0;
4872 #define BNX2_REGDUMP_LEN (32 * 1024)
4875 bnx2_get_regs_len(struct net_device *dev)
4877 return BNX2_REGDUMP_LEN;
4881 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4883 u32 *p = _p, i, offset;
4885 struct bnx2 *bp = netdev_priv(dev);
4886 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4887 0x0800, 0x0880, 0x0c00, 0x0c10,
4888 0x0c30, 0x0d08, 0x1000, 0x101c,
4889 0x1040, 0x1048, 0x1080, 0x10a4,
4890 0x1400, 0x1490, 0x1498, 0x14f0,
4891 0x1500, 0x155c, 0x1580, 0x15dc,
4892 0x1600, 0x1658, 0x1680, 0x16d8,
4893 0x1800, 0x1820, 0x1840, 0x1854,
4894 0x1880, 0x1894, 0x1900, 0x1984,
4895 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4896 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4897 0x2000, 0x2030, 0x23c0, 0x2400,
4898 0x2800, 0x2820, 0x2830, 0x2850,
4899 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4900 0x3c00, 0x3c94, 0x4000, 0x4010,
4901 0x4080, 0x4090, 0x43c0, 0x4458,
4902 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4903 0x4fc0, 0x5010, 0x53c0, 0x5444,
4904 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4905 0x5fc0, 0x6000, 0x6400, 0x6428,
4906 0x6800, 0x6848, 0x684c, 0x6860,
4907 0x6888, 0x6910, 0x8000 };
4911 memset(p, 0, BNX2_REGDUMP_LEN);
4913 if (!netif_running(bp->dev))
4917 offset = reg_boundaries[0];
4919 while (offset < BNX2_REGDUMP_LEN) {
4920 *p++ = REG_RD(bp, offset);
4922 if (offset == reg_boundaries[i + 1]) {
4923 offset = reg_boundaries[i + 2];
4924 p = (u32 *) (orig_p + offset);
4931 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4933 struct bnx2 *bp = netdev_priv(dev);
4935 if (bp->flags & NO_WOL_FLAG) {
4940 wol->supported = WAKE_MAGIC;
4942 wol->wolopts = WAKE_MAGIC;
4946 memset(&wol->sopass, 0, sizeof(wol->sopass));
4950 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4952 struct bnx2 *bp = netdev_priv(dev);
4954 if (wol->wolopts & ~WAKE_MAGIC)
4957 if (wol->wolopts & WAKE_MAGIC) {
4958 if (bp->flags & NO_WOL_FLAG)
4970 bnx2_nway_reset(struct net_device *dev)
4972 struct bnx2 *bp = netdev_priv(dev);
4975 if (!(bp->autoneg & AUTONEG_SPEED)) {
4979 spin_lock_bh(&bp->phy_lock);
4981 /* Force a link down visible on the other side */
4982 if (bp->phy_flags & PHY_SERDES_FLAG) {
4983 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
4984 spin_unlock_bh(&bp->phy_lock);
4988 spin_lock_bh(&bp->phy_lock);
4990 bp->current_interval = SERDES_AN_TIMEOUT;
4991 bp->serdes_an_pending = 1;
4992 mod_timer(&bp->timer, jiffies + bp->current_interval);
4995 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4996 bmcr &= ~BMCR_LOOPBACK;
4997 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4999 spin_unlock_bh(&bp->phy_lock);
5005 bnx2_get_eeprom_len(struct net_device *dev)
5007 struct bnx2 *bp = netdev_priv(dev);
5009 if (bp->flash_info == NULL)
5012 return (int) bp->flash_size;
5016 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5019 struct bnx2 *bp = netdev_priv(dev);
5022 /* parameters already validated in ethtool_get_eeprom */
5024 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5030 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5033 struct bnx2 *bp = netdev_priv(dev);
5036 /* parameters already validated in ethtool_set_eeprom */
5038 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5044 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5046 struct bnx2 *bp = netdev_priv(dev);
5048 memset(coal, 0, sizeof(struct ethtool_coalesce));
5050 coal->rx_coalesce_usecs = bp->rx_ticks;
5051 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5052 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5053 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5055 coal->tx_coalesce_usecs = bp->tx_ticks;
5056 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5057 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5058 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5060 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5066 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5068 struct bnx2 *bp = netdev_priv(dev);
5070 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5071 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5073 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5074 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5076 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5077 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5079 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5080 if (bp->rx_quick_cons_trip_int > 0xff)
5081 bp->rx_quick_cons_trip_int = 0xff;
5083 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5084 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5086 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5087 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5089 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5090 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5092 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5093 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5096 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5097 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5098 bp->stats_ticks &= 0xffff00;
5100 if (netif_running(bp->dev)) {
5101 bnx2_netif_stop(bp);
5103 bnx2_netif_start(bp);
5110 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5112 struct bnx2 *bp = netdev_priv(dev);
5114 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5115 ering->rx_mini_max_pending = 0;
5116 ering->rx_jumbo_max_pending = 0;
5118 ering->rx_pending = bp->rx_ring_size;
5119 ering->rx_mini_pending = 0;
5120 ering->rx_jumbo_pending = 0;
5122 ering->tx_max_pending = MAX_TX_DESC_CNT;
5123 ering->tx_pending = bp->tx_ring_size;
5127 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5129 struct bnx2 *bp = netdev_priv(dev);
5131 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5132 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5133 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5137 if (netif_running(bp->dev)) {
5138 bnx2_netif_stop(bp);
5139 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5144 bnx2_set_rx_ring_size(bp, ering->rx_pending);
5145 bp->tx_ring_size = ering->tx_pending;
5147 if (netif_running(bp->dev)) {
5150 rc = bnx2_alloc_mem(bp);
5154 bnx2_netif_start(bp);
5161 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5163 struct bnx2 *bp = netdev_priv(dev);
5165 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5166 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5167 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5171 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5173 struct bnx2 *bp = netdev_priv(dev);
5175 bp->req_flow_ctrl = 0;
5176 if (epause->rx_pause)
5177 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5178 if (epause->tx_pause)
5179 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5181 if (epause->autoneg) {
5182 bp->autoneg |= AUTONEG_FLOW_CTRL;
5185 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5188 spin_lock_bh(&bp->phy_lock);
5192 spin_unlock_bh(&bp->phy_lock);
5198 bnx2_get_rx_csum(struct net_device *dev)
5200 struct bnx2 *bp = netdev_priv(dev);
5206 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5208 struct bnx2 *bp = netdev_priv(dev);
5215 bnx2_set_tso(struct net_device *dev, u32 data)
5218 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5220 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
5224 #define BNX2_NUM_STATS 46
5227 char string[ETH_GSTRING_LEN];
5228 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5230 { "rx_error_bytes" },
5232 { "tx_error_bytes" },
5233 { "rx_ucast_packets" },
5234 { "rx_mcast_packets" },
5235 { "rx_bcast_packets" },
5236 { "tx_ucast_packets" },
5237 { "tx_mcast_packets" },
5238 { "tx_bcast_packets" },
5239 { "tx_mac_errors" },
5240 { "tx_carrier_errors" },
5241 { "rx_crc_errors" },
5242 { "rx_align_errors" },
5243 { "tx_single_collisions" },
5244 { "tx_multi_collisions" },
5246 { "tx_excess_collisions" },
5247 { "tx_late_collisions" },
5248 { "tx_total_collisions" },
5251 { "rx_undersize_packets" },
5252 { "rx_oversize_packets" },
5253 { "rx_64_byte_packets" },
5254 { "rx_65_to_127_byte_packets" },
5255 { "rx_128_to_255_byte_packets" },
5256 { "rx_256_to_511_byte_packets" },
5257 { "rx_512_to_1023_byte_packets" },
5258 { "rx_1024_to_1522_byte_packets" },
5259 { "rx_1523_to_9022_byte_packets" },
5260 { "tx_64_byte_packets" },
5261 { "tx_65_to_127_byte_packets" },
5262 { "tx_128_to_255_byte_packets" },
5263 { "tx_256_to_511_byte_packets" },
5264 { "tx_512_to_1023_byte_packets" },
5265 { "tx_1024_to_1522_byte_packets" },
5266 { "tx_1523_to_9022_byte_packets" },
5267 { "rx_xon_frames" },
5268 { "rx_xoff_frames" },
5269 { "tx_xon_frames" },
5270 { "tx_xoff_frames" },
5271 { "rx_mac_ctrl_frames" },
5272 { "rx_filtered_packets" },
5274 { "rx_fw_discards" },
5277 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5279 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5280 STATS_OFFSET32(stat_IfHCInOctets_hi),
5281 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5282 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5283 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5284 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5285 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5286 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5287 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5288 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5289 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5290 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5291 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5292 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5293 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5294 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5295 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5296 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5297 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5298 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5299 STATS_OFFSET32(stat_EtherStatsCollisions),
5300 STATS_OFFSET32(stat_EtherStatsFragments),
5301 STATS_OFFSET32(stat_EtherStatsJabbers),
5302 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5303 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5304 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5305 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5306 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5307 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5308 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5309 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5310 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5311 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5312 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5313 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5314 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5315 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5316 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5317 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5318 STATS_OFFSET32(stat_XonPauseFramesReceived),
5319 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5320 STATS_OFFSET32(stat_OutXonSent),
5321 STATS_OFFSET32(stat_OutXoffSent),
5322 STATS_OFFSET32(stat_MacControlFramesReceived),
5323 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5324 STATS_OFFSET32(stat_IfInMBUFDiscards),
5325 STATS_OFFSET32(stat_FwRxDrop),
5328 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5329 * skipped because of errata.
5331 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5332 8,0,8,8,8,8,8,8,8,8,
5333 4,0,4,4,4,4,4,4,4,4,
5334 4,4,4,4,4,4,4,4,4,4,
5335 4,4,4,4,4,4,4,4,4,4,
5339 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5340 8,0,8,8,8,8,8,8,8,8,
5341 4,4,4,4,4,4,4,4,4,4,
5342 4,4,4,4,4,4,4,4,4,4,
5343 4,4,4,4,4,4,4,4,4,4,
5347 #define BNX2_NUM_TESTS 6
5350 char string[ETH_GSTRING_LEN];
5351 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5352 { "register_test (offline)" },
5353 { "memory_test (offline)" },
5354 { "loopback_test (offline)" },
5355 { "nvram_test (online)" },
5356 { "interrupt_test (online)" },
5357 { "link_test (online)" },
5361 bnx2_self_test_count(struct net_device *dev)
5363 return BNX2_NUM_TESTS;
5367 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5369 struct bnx2 *bp = netdev_priv(dev);
5371 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5372 if (etest->flags & ETH_TEST_FL_OFFLINE) {
5375 bnx2_netif_stop(bp);
5376 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5379 if (bnx2_test_registers(bp) != 0) {
5381 etest->flags |= ETH_TEST_FL_FAILED;
5383 if (bnx2_test_memory(bp) != 0) {
5385 etest->flags |= ETH_TEST_FL_FAILED;
5387 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5388 etest->flags |= ETH_TEST_FL_FAILED;
5390 if (!netif_running(bp->dev)) {
5391 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5395 bnx2_netif_start(bp);
5398 /* wait for link up */
5399 for (i = 0; i < 7; i++) {
5402 msleep_interruptible(1000);
5406 if (bnx2_test_nvram(bp) != 0) {
5408 etest->flags |= ETH_TEST_FL_FAILED;
5410 if (bnx2_test_intr(bp) != 0) {
5412 etest->flags |= ETH_TEST_FL_FAILED;
5415 if (bnx2_test_link(bp) != 0) {
5417 etest->flags |= ETH_TEST_FL_FAILED;
5423 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5425 switch (stringset) {
5427 memcpy(buf, bnx2_stats_str_arr,
5428 sizeof(bnx2_stats_str_arr));
5431 memcpy(buf, bnx2_tests_str_arr,
5432 sizeof(bnx2_tests_str_arr));
5438 bnx2_get_stats_count(struct net_device *dev)
5440 return BNX2_NUM_STATS;
5444 bnx2_get_ethtool_stats(struct net_device *dev,
5445 struct ethtool_stats *stats, u64 *buf)
5447 struct bnx2 *bp = netdev_priv(dev);
5449 u32 *hw_stats = (u32 *) bp->stats_blk;
5450 u8 *stats_len_arr = NULL;
5452 if (hw_stats == NULL) {
5453 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5457 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5458 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5459 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5460 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5461 stats_len_arr = bnx2_5706_stats_len_arr;
5463 stats_len_arr = bnx2_5708_stats_len_arr;
5465 for (i = 0; i < BNX2_NUM_STATS; i++) {
5466 if (stats_len_arr[i] == 0) {
5467 /* skip this counter */
5471 if (stats_len_arr[i] == 4) {
5472 /* 4-byte counter */
5474 *(hw_stats + bnx2_stats_offset_arr[i]);
5477 /* 8-byte counter */
5478 buf[i] = (((u64) *(hw_stats +
5479 bnx2_stats_offset_arr[i])) << 32) +
5480 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5485 bnx2_phys_id(struct net_device *dev, u32 data)
5487 struct bnx2 *bp = netdev_priv(dev);
5494 save = REG_RD(bp, BNX2_MISC_CFG);
5495 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5497 for (i = 0; i < (data * 2); i++) {
5499 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5502 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5503 BNX2_EMAC_LED_1000MB_OVERRIDE |
5504 BNX2_EMAC_LED_100MB_OVERRIDE |
5505 BNX2_EMAC_LED_10MB_OVERRIDE |
5506 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5507 BNX2_EMAC_LED_TRAFFIC);
5509 msleep_interruptible(500);
5510 if (signal_pending(current))
5513 REG_WR(bp, BNX2_EMAC_LED, 0);
5514 REG_WR(bp, BNX2_MISC_CFG, save);
5518 static const struct ethtool_ops bnx2_ethtool_ops = {
5519 .get_settings = bnx2_get_settings,
5520 .set_settings = bnx2_set_settings,
5521 .get_drvinfo = bnx2_get_drvinfo,
5522 .get_regs_len = bnx2_get_regs_len,
5523 .get_regs = bnx2_get_regs,
5524 .get_wol = bnx2_get_wol,
5525 .set_wol = bnx2_set_wol,
5526 .nway_reset = bnx2_nway_reset,
5527 .get_link = ethtool_op_get_link,
5528 .get_eeprom_len = bnx2_get_eeprom_len,
5529 .get_eeprom = bnx2_get_eeprom,
5530 .set_eeprom = bnx2_set_eeprom,
5531 .get_coalesce = bnx2_get_coalesce,
5532 .set_coalesce = bnx2_set_coalesce,
5533 .get_ringparam = bnx2_get_ringparam,
5534 .set_ringparam = bnx2_set_ringparam,
5535 .get_pauseparam = bnx2_get_pauseparam,
5536 .set_pauseparam = bnx2_set_pauseparam,
5537 .get_rx_csum = bnx2_get_rx_csum,
5538 .set_rx_csum = bnx2_set_rx_csum,
5539 .get_tx_csum = ethtool_op_get_tx_csum,
5540 .set_tx_csum = ethtool_op_set_tx_csum,
5541 .get_sg = ethtool_op_get_sg,
5542 .set_sg = ethtool_op_set_sg,
5544 .get_tso = ethtool_op_get_tso,
5545 .set_tso = bnx2_set_tso,
5547 .self_test_count = bnx2_self_test_count,
5548 .self_test = bnx2_self_test,
5549 .get_strings = bnx2_get_strings,
5550 .phys_id = bnx2_phys_id,
5551 .get_stats_count = bnx2_get_stats_count,
5552 .get_ethtool_stats = bnx2_get_ethtool_stats,
5553 .get_perm_addr = ethtool_op_get_perm_addr,
5556 /* Called with rtnl_lock */
5558 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5560 struct mii_ioctl_data *data = if_mii(ifr);
5561 struct bnx2 *bp = netdev_priv(dev);
5566 data->phy_id = bp->phy_addr;
5572 spin_lock_bh(&bp->phy_lock);
5573 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5574 spin_unlock_bh(&bp->phy_lock);
5576 data->val_out = mii_regval;
5582 if (!capable(CAP_NET_ADMIN))
5585 spin_lock_bh(&bp->phy_lock);
5586 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5587 spin_unlock_bh(&bp->phy_lock);
5598 /* Called with rtnl_lock */
5600 bnx2_change_mac_addr(struct net_device *dev, void *p)
5602 struct sockaddr *addr = p;
5603 struct bnx2 *bp = netdev_priv(dev);
5605 if (!is_valid_ether_addr(addr->sa_data))
5608 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5609 if (netif_running(dev))
5610 bnx2_set_mac_addr(bp);
5615 /* Called with rtnl_lock */
5617 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5619 struct bnx2 *bp = netdev_priv(dev);
5621 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5622 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5626 if (netif_running(dev)) {
5627 bnx2_netif_stop(bp);
5631 bnx2_netif_start(bp);
5636 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5638 poll_bnx2(struct net_device *dev)
5640 struct bnx2 *bp = netdev_priv(dev);
5642 disable_irq(bp->pdev->irq);
5643 bnx2_interrupt(bp->pdev->irq, dev);
5644 enable_irq(bp->pdev->irq);
5648 static int __devinit
5649 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5652 unsigned long mem_len;
5656 SET_MODULE_OWNER(dev);
5657 SET_NETDEV_DEV(dev, &pdev->dev);
5658 bp = netdev_priv(dev);
5663 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5664 rc = pci_enable_device(pdev);
5666 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
5670 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5672 "Cannot find PCI device base address, aborting.\n");
5674 goto err_out_disable;
5677 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5679 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
5680 goto err_out_disable;
5683 pci_set_master(pdev);
5685 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5686 if (bp->pm_cap == 0) {
5688 "Cannot find power management capability, aborting.\n");
5690 goto err_out_release;
5693 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5694 bp->flags |= USING_DAC_FLAG;
5695 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
5697 "pci_set_consistent_dma_mask failed, aborting.\n");
5699 goto err_out_release;
5702 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
5703 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
5705 goto err_out_release;
5711 spin_lock_init(&bp->phy_lock);
5712 INIT_WORK(&bp->reset_task, bnx2_reset_task);
5714 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5715 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
5716 dev->mem_end = dev->mem_start + mem_len;
5717 dev->irq = pdev->irq;
5719 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5722 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
5724 goto err_out_release;
5727 /* Configure byte swap and enable write to the reg_window registers.
5728 * Rely on CPU to do target byte swapping on big endian systems
5729 * The chip's target access swapping will not swap all accesses
5731 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5732 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5733 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5735 bnx2_set_power_state(bp, PCI_D0);
5737 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5739 if (CHIP_NUM(bp) != CHIP_NUM_5709) {
5740 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5741 if (bp->pcix_cap == 0) {
5743 "Cannot find PCIX capability, aborting.\n");
5749 /* Get bus information. */
5750 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5751 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5754 bp->flags |= PCIX_FLAG;
5756 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5758 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5760 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5761 bp->bus_speed_mhz = 133;
5764 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5765 bp->bus_speed_mhz = 100;
5768 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5769 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5770 bp->bus_speed_mhz = 66;
5773 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5774 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5775 bp->bus_speed_mhz = 50;
5778 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5779 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5780 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5781 bp->bus_speed_mhz = 33;
5786 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5787 bp->bus_speed_mhz = 66;
5789 bp->bus_speed_mhz = 33;
5792 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5793 bp->flags |= PCI_32BIT_FLAG;
5795 /* 5706A0 may falsely detect SERR and PERR. */
5796 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5797 reg = REG_RD(bp, PCI_COMMAND);
5798 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5799 REG_WR(bp, PCI_COMMAND, reg);
5801 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5802 !(bp->flags & PCIX_FLAG)) {
5805 "5706 A1 can only be used in a PCIX bus, aborting.\n");
5809 bnx2_init_nvram(bp);
5811 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5813 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5814 BNX2_SHM_HDR_SIGNATURE_SIG)
5815 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5817 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5819 /* Get the permanent MAC address. First we need to make sure the
5820 * firmware is actually running.
5822 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
5824 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5825 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5826 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
5831 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
5833 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
5834 bp->mac_addr[0] = (u8) (reg >> 8);
5835 bp->mac_addr[1] = (u8) reg;
5837 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
5838 bp->mac_addr[2] = (u8) (reg >> 24);
5839 bp->mac_addr[3] = (u8) (reg >> 16);
5840 bp->mac_addr[4] = (u8) (reg >> 8);
5841 bp->mac_addr[5] = (u8) reg;
5843 bp->tx_ring_size = MAX_TX_DESC_CNT;
5844 bnx2_set_rx_ring_size(bp, 255);
5848 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5850 bp->tx_quick_cons_trip_int = 20;
5851 bp->tx_quick_cons_trip = 20;
5852 bp->tx_ticks_int = 80;
5855 bp->rx_quick_cons_trip_int = 6;
5856 bp->rx_quick_cons_trip = 6;
5857 bp->rx_ticks_int = 18;
5860 bp->stats_ticks = 1000000 & 0xffff00;
5862 bp->timer_interval = HZ;
5863 bp->current_interval = HZ;
5867 /* Disable WOL support if we are running on a SERDES chip. */
5868 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5869 if (CHIP_BOND_ID(bp) != BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
5870 bp->phy_flags |= PHY_SERDES_FLAG;
5871 } else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
5872 bp->phy_flags |= PHY_SERDES_FLAG;
5874 if (bp->phy_flags & PHY_SERDES_FLAG) {
5875 bp->flags |= NO_WOL_FLAG;
5876 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
5878 reg = REG_RD_IND(bp, bp->shmem_base +
5879 BNX2_SHARED_HW_CFG_CONFIG);
5880 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5881 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5885 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
5886 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5887 (CHIP_ID(bp) == CHIP_ID_5708_B1))
5888 bp->flags |= NO_WOL_FLAG;
5890 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5891 bp->tx_quick_cons_trip_int =
5892 bp->tx_quick_cons_trip;
5893 bp->tx_ticks_int = bp->tx_ticks;
5894 bp->rx_quick_cons_trip_int =
5895 bp->rx_quick_cons_trip;
5896 bp->rx_ticks_int = bp->rx_ticks;
5897 bp->comp_prod_trip_int = bp->comp_prod_trip;
5898 bp->com_ticks_int = bp->com_ticks;
5899 bp->cmd_ticks_int = bp->cmd_ticks;
5902 /* Disable MSI on 5706 if AMD 8132 bridge is found.
5904 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
5905 * with byte enables disabled on the unused 32-bit word. This is legal
5906 * but causes problems on the AMD 8132 which will eventually stop
5907 * responding after a while.
5909 * AMD believes this incompatibility is unique to the 5706, and
5910 * prefers to locally disable MSI rather than globally disabling it
5911 * using pci_msi_quirk.
5913 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
5914 struct pci_dev *amd_8132 = NULL;
5916 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
5917 PCI_DEVICE_ID_AMD_8132_BRIDGE,
5921 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
5922 if (rev >= 0x10 && rev <= 0x13) {
5924 pci_dev_put(amd_8132);
5930 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5931 bp->req_line_speed = 0;
5932 if (bp->phy_flags & PHY_SERDES_FLAG) {
5933 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
5935 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
5936 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5937 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5939 bp->req_line_speed = bp->line_speed = SPEED_1000;
5940 bp->req_duplex = DUPLEX_FULL;
5944 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5947 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5949 init_timer(&bp->timer);
5950 bp->timer.expires = RUN_AT(bp->timer_interval);
5951 bp->timer.data = (unsigned long) bp;
5952 bp->timer.function = bnx2_timer;
5958 iounmap(bp->regview);
5963 pci_release_regions(pdev);
5966 pci_disable_device(pdev);
5967 pci_set_drvdata(pdev, NULL);
5973 static int __devinit
5974 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5976 static int version_printed = 0;
5977 struct net_device *dev = NULL;
5981 if (version_printed++ == 0)
5982 printk(KERN_INFO "%s", version);
5984 /* dev zeroed in init_etherdev */
5985 dev = alloc_etherdev(sizeof(*bp));
5990 rc = bnx2_init_board(pdev, dev);
5996 dev->open = bnx2_open;
5997 dev->hard_start_xmit = bnx2_start_xmit;
5998 dev->stop = bnx2_close;
5999 dev->get_stats = bnx2_get_stats;
6000 dev->set_multicast_list = bnx2_set_rx_mode;
6001 dev->do_ioctl = bnx2_ioctl;
6002 dev->set_mac_address = bnx2_change_mac_addr;
6003 dev->change_mtu = bnx2_change_mtu;
6004 dev->tx_timeout = bnx2_tx_timeout;
6005 dev->watchdog_timeo = TX_TIMEOUT;
6007 dev->vlan_rx_register = bnx2_vlan_rx_register;
6008 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
6010 dev->poll = bnx2_poll;
6011 dev->ethtool_ops = &bnx2_ethtool_ops;
6014 bp = netdev_priv(dev);
6016 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6017 dev->poll_controller = poll_bnx2;
6020 if ((rc = register_netdev(dev))) {
6021 dev_err(&pdev->dev, "Cannot register net device\n");
6023 iounmap(bp->regview);
6024 pci_release_regions(pdev);
6025 pci_disable_device(pdev);
6026 pci_set_drvdata(pdev, NULL);
6031 pci_set_drvdata(pdev, dev);
6033 memcpy(dev->dev_addr, bp->mac_addr, 6);
6034 memcpy(dev->perm_addr, bp->mac_addr, 6);
6035 bp->name = board_info[ent->driver_data].name,
6036 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
6040 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6041 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6042 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
6043 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
6048 printk("node addr ");
6049 for (i = 0; i < 6; i++)
6050 printk("%2.2x", dev->dev_addr[i]);
6053 dev->features |= NETIF_F_SG;
6054 if (bp->flags & USING_DAC_FLAG)
6055 dev->features |= NETIF_F_HIGHDMA;
6056 dev->features |= NETIF_F_IP_CSUM;
6058 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6061 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6064 netif_carrier_off(bp->dev);
6069 static void __devexit
6070 bnx2_remove_one(struct pci_dev *pdev)
6072 struct net_device *dev = pci_get_drvdata(pdev);
6073 struct bnx2 *bp = netdev_priv(dev);
6075 flush_scheduled_work();
6077 unregister_netdev(dev);
6080 iounmap(bp->regview);
6083 pci_release_regions(pdev);
6084 pci_disable_device(pdev);
6085 pci_set_drvdata(pdev, NULL);
6089 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6091 struct net_device *dev = pci_get_drvdata(pdev);
6092 struct bnx2 *bp = netdev_priv(dev);
6095 if (!netif_running(dev))
6098 flush_scheduled_work();
6099 bnx2_netif_stop(bp);
6100 netif_device_detach(dev);
6101 del_timer_sync(&bp->timer);
6102 if (bp->flags & NO_WOL_FLAG)
6103 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6105 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6107 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6108 bnx2_reset_chip(bp, reset_code);
6110 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6115 bnx2_resume(struct pci_dev *pdev)
6117 struct net_device *dev = pci_get_drvdata(pdev);
6118 struct bnx2 *bp = netdev_priv(dev);
6120 if (!netif_running(dev))
6123 bnx2_set_power_state(bp, PCI_D0);
6124 netif_device_attach(dev);
6126 bnx2_netif_start(bp);
6130 static struct pci_driver bnx2_pci_driver = {
6131 .name = DRV_MODULE_NAME,
6132 .id_table = bnx2_pci_tbl,
6133 .probe = bnx2_init_one,
6134 .remove = __devexit_p(bnx2_remove_one),
6135 .suspend = bnx2_suspend,
6136 .resume = bnx2_resume,
6139 static int __init bnx2_init(void)
6141 return pci_register_driver(&bnx2_pci_driver);
6144 static void __exit bnx2_cleanup(void)
6146 pci_unregister_driver(&bnx2_pci_driver);
6149 module_init(bnx2_init);
6150 module_exit(bnx2_cleanup);