1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
12 #include <linux/config.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
17 #include <linux/kernel.h>
18 #include <linux/timer.h>
19 #include <linux/errno.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/interrupt.h>
24 #include <linux/pci.h>
25 #include <linux/init.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/dma-mapping.h>
30 #include <asm/bitops.h>
33 #include <linux/delay.h>
34 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
45 #include <net/checksum.h>
48 #include <linux/workqueue.h>
49 #include <linux/crc32.h>
50 #include <linux/prefetch.h>
51 #include <linux/cache.h>
56 #define DRV_MODULE_NAME "bnx2"
57 #define PFX DRV_MODULE_NAME ": "
58 #define DRV_MODULE_VERSION "1.4.39"
59 #define DRV_MODULE_RELDATE "March 22, 2006"
61 #define RUN_AT(x) (jiffies + (x))
63 /* Time in jiffies before concluding the transmitter is hung. */
64 #define TX_TIMEOUT (5*HZ)
66 static char version[] __devinitdata =
67 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
70 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
71 MODULE_LICENSE("GPL");
72 MODULE_VERSION(DRV_MODULE_VERSION);
74 static int disable_msi = 0;
76 module_param(disable_msi, int, 0);
77 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
89 /* indexed by board_t, above */
92 } board_info[] __devinitdata = {
93 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
94 { "HP NC370T Multifunction Gigabit Server Adapter" },
95 { "HP NC370i Multifunction Gigabit Server Adapter" },
96 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
97 { "HP NC370F Multifunction Gigabit Server Adapter" },
98 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
99 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
102 static struct pci_device_id bnx2_pci_tbl[] = {
103 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
104 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
105 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
106 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
107 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
108 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
109 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
110 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
111 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
112 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
113 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
114 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
120 static struct flash_spec flash_table[] =
123 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
124 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
125 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
127 /* Expansion entry 0001 */
128 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
129 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
130 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
132 /* Saifun SA25F010 (non-buffered flash) */
133 /* strap, cfg1, & write1 need updates */
134 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
135 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
136 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
137 "Non-buffered flash (128kB)"},
138 /* Saifun SA25F020 (non-buffered flash) */
139 /* strap, cfg1, & write1 need updates */
140 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
141 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
142 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
143 "Non-buffered flash (256kB)"},
144 /* Expansion entry 0100 */
145 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
146 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
149 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
150 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
151 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
152 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
153 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
154 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
155 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
156 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
157 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
158 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
159 /* Saifun SA25F005 (non-buffered flash) */
160 /* strap, cfg1, & write1 need updates */
161 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
162 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
163 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
164 "Non-buffered flash (64kB)"},
166 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
167 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
168 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
170 /* Expansion entry 1001 */
171 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
172 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
175 /* Expansion entry 1010 */
176 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
177 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180 /* ATMEL AT45DB011B (buffered flash) */
181 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
182 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
183 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
184 "Buffered flash (128kB)"},
185 /* Expansion entry 1100 */
186 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
187 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
188 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190 /* Expansion entry 1101 */
191 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
192 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
193 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
195 /* Ateml Expansion entry 1110 */
196 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
197 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
198 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
199 "Entry 1110 (Atmel)"},
200 /* ATMEL AT45DB021B (buffered flash) */
201 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
202 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
203 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
204 "Buffered flash (256kB)"},
207 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
209 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
211 u32 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
213 if (diff > MAX_TX_DESC_CNT)
214 diff = (diff & MAX_TX_DESC_CNT) - 1;
215 return (bp->tx_ring_size - diff);
219 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
221 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
222 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
226 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
228 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
229 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
233 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
236 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
237 REG_WR(bp, BNX2_CTX_DATA, val);
241 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
246 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
247 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
248 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
250 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
251 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
256 val1 = (bp->phy_addr << 21) | (reg << 16) |
257 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
258 BNX2_EMAC_MDIO_COMM_START_BUSY;
259 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
261 for (i = 0; i < 50; i++) {
264 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
265 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
268 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
269 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
275 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
284 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
285 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
286 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
288 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
289 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
298 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
303 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
304 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
305 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
307 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
308 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
313 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
314 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
315 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
316 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
318 for (i = 0; i < 50; i++) {
321 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
328 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
333 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
334 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
335 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
337 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
338 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
347 bnx2_disable_int(struct bnx2 *bp)
349 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
350 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
351 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
355 bnx2_enable_int(struct bnx2 *bp)
357 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
358 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
359 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
361 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
362 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
364 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
368 bnx2_disable_int_sync(struct bnx2 *bp)
370 atomic_inc(&bp->intr_sem);
371 bnx2_disable_int(bp);
372 synchronize_irq(bp->pdev->irq);
376 bnx2_netif_stop(struct bnx2 *bp)
378 bnx2_disable_int_sync(bp);
379 if (netif_running(bp->dev)) {
380 netif_poll_disable(bp->dev);
381 netif_tx_disable(bp->dev);
382 bp->dev->trans_start = jiffies; /* prevent tx timeout */
387 bnx2_netif_start(struct bnx2 *bp)
389 if (atomic_dec_and_test(&bp->intr_sem)) {
390 if (netif_running(bp->dev)) {
391 netif_wake_queue(bp->dev);
392 netif_poll_enable(bp->dev);
399 bnx2_free_mem(struct bnx2 *bp)
403 if (bp->status_blk) {
404 pci_free_consistent(bp->pdev, bp->status_stats_size,
405 bp->status_blk, bp->status_blk_mapping);
406 bp->status_blk = NULL;
407 bp->stats_blk = NULL;
409 if (bp->tx_desc_ring) {
410 pci_free_consistent(bp->pdev,
411 sizeof(struct tx_bd) * TX_DESC_CNT,
412 bp->tx_desc_ring, bp->tx_desc_mapping);
413 bp->tx_desc_ring = NULL;
415 kfree(bp->tx_buf_ring);
416 bp->tx_buf_ring = NULL;
417 for (i = 0; i < bp->rx_max_ring; i++) {
418 if (bp->rx_desc_ring[i])
419 pci_free_consistent(bp->pdev,
420 sizeof(struct rx_bd) * RX_DESC_CNT,
422 bp->rx_desc_mapping[i]);
423 bp->rx_desc_ring[i] = NULL;
425 vfree(bp->rx_buf_ring);
426 bp->rx_buf_ring = NULL;
430 bnx2_alloc_mem(struct bnx2 *bp)
432 int i, status_blk_size;
434 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
436 if (bp->tx_buf_ring == NULL)
439 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
440 sizeof(struct tx_bd) *
442 &bp->tx_desc_mapping);
443 if (bp->tx_desc_ring == NULL)
446 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
448 if (bp->rx_buf_ring == NULL)
451 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
454 for (i = 0; i < bp->rx_max_ring; i++) {
455 bp->rx_desc_ring[i] =
456 pci_alloc_consistent(bp->pdev,
457 sizeof(struct rx_bd) * RX_DESC_CNT,
458 &bp->rx_desc_mapping[i]);
459 if (bp->rx_desc_ring[i] == NULL)
464 /* Combine status and statistics blocks into one allocation. */
465 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
466 bp->status_stats_size = status_blk_size +
467 sizeof(struct statistics_block);
469 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
470 &bp->status_blk_mapping);
471 if (bp->status_blk == NULL)
474 memset(bp->status_blk, 0, bp->status_stats_size);
476 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
479 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
489 bnx2_report_fw_link(struct bnx2 *bp)
491 u32 fw_link_status = 0;
496 switch (bp->line_speed) {
498 if (bp->duplex == DUPLEX_HALF)
499 fw_link_status = BNX2_LINK_STATUS_10HALF;
501 fw_link_status = BNX2_LINK_STATUS_10FULL;
504 if (bp->duplex == DUPLEX_HALF)
505 fw_link_status = BNX2_LINK_STATUS_100HALF;
507 fw_link_status = BNX2_LINK_STATUS_100FULL;
510 if (bp->duplex == DUPLEX_HALF)
511 fw_link_status = BNX2_LINK_STATUS_1000HALF;
513 fw_link_status = BNX2_LINK_STATUS_1000FULL;
516 if (bp->duplex == DUPLEX_HALF)
517 fw_link_status = BNX2_LINK_STATUS_2500HALF;
519 fw_link_status = BNX2_LINK_STATUS_2500FULL;
523 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
526 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
528 bnx2_read_phy(bp, MII_BMSR, &bmsr);
529 bnx2_read_phy(bp, MII_BMSR, &bmsr);
531 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
532 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
533 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
535 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
539 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
541 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
545 bnx2_report_link(struct bnx2 *bp)
548 netif_carrier_on(bp->dev);
549 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
551 printk("%d Mbps ", bp->line_speed);
553 if (bp->duplex == DUPLEX_FULL)
554 printk("full duplex");
556 printk("half duplex");
559 if (bp->flow_ctrl & FLOW_CTRL_RX) {
560 printk(", receive ");
561 if (bp->flow_ctrl & FLOW_CTRL_TX)
562 printk("& transmit ");
565 printk(", transmit ");
567 printk("flow control ON");
572 netif_carrier_off(bp->dev);
573 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
576 bnx2_report_fw_link(bp);
580 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
582 u32 local_adv, remote_adv;
585 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
586 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
588 if (bp->duplex == DUPLEX_FULL) {
589 bp->flow_ctrl = bp->req_flow_ctrl;
594 if (bp->duplex != DUPLEX_FULL) {
598 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
599 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
602 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
603 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
604 bp->flow_ctrl |= FLOW_CTRL_TX;
605 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
606 bp->flow_ctrl |= FLOW_CTRL_RX;
610 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
611 bnx2_read_phy(bp, MII_LPA, &remote_adv);
613 if (bp->phy_flags & PHY_SERDES_FLAG) {
614 u32 new_local_adv = 0;
615 u32 new_remote_adv = 0;
617 if (local_adv & ADVERTISE_1000XPAUSE)
618 new_local_adv |= ADVERTISE_PAUSE_CAP;
619 if (local_adv & ADVERTISE_1000XPSE_ASYM)
620 new_local_adv |= ADVERTISE_PAUSE_ASYM;
621 if (remote_adv & ADVERTISE_1000XPAUSE)
622 new_remote_adv |= ADVERTISE_PAUSE_CAP;
623 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
624 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
626 local_adv = new_local_adv;
627 remote_adv = new_remote_adv;
630 /* See Table 28B-3 of 802.3ab-1999 spec. */
631 if (local_adv & ADVERTISE_PAUSE_CAP) {
632 if(local_adv & ADVERTISE_PAUSE_ASYM) {
633 if (remote_adv & ADVERTISE_PAUSE_CAP) {
634 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
636 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
637 bp->flow_ctrl = FLOW_CTRL_RX;
641 if (remote_adv & ADVERTISE_PAUSE_CAP) {
642 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
646 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
647 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
648 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
650 bp->flow_ctrl = FLOW_CTRL_TX;
656 bnx2_5708s_linkup(struct bnx2 *bp)
661 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
662 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
663 case BCM5708S_1000X_STAT1_SPEED_10:
664 bp->line_speed = SPEED_10;
666 case BCM5708S_1000X_STAT1_SPEED_100:
667 bp->line_speed = SPEED_100;
669 case BCM5708S_1000X_STAT1_SPEED_1G:
670 bp->line_speed = SPEED_1000;
672 case BCM5708S_1000X_STAT1_SPEED_2G5:
673 bp->line_speed = SPEED_2500;
676 if (val & BCM5708S_1000X_STAT1_FD)
677 bp->duplex = DUPLEX_FULL;
679 bp->duplex = DUPLEX_HALF;
685 bnx2_5706s_linkup(struct bnx2 *bp)
687 u32 bmcr, local_adv, remote_adv, common;
690 bp->line_speed = SPEED_1000;
692 bnx2_read_phy(bp, MII_BMCR, &bmcr);
693 if (bmcr & BMCR_FULLDPLX) {
694 bp->duplex = DUPLEX_FULL;
697 bp->duplex = DUPLEX_HALF;
700 if (!(bmcr & BMCR_ANENABLE)) {
704 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
705 bnx2_read_phy(bp, MII_LPA, &remote_adv);
707 common = local_adv & remote_adv;
708 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
710 if (common & ADVERTISE_1000XFULL) {
711 bp->duplex = DUPLEX_FULL;
714 bp->duplex = DUPLEX_HALF;
722 bnx2_copper_linkup(struct bnx2 *bp)
726 bnx2_read_phy(bp, MII_BMCR, &bmcr);
727 if (bmcr & BMCR_ANENABLE) {
728 u32 local_adv, remote_adv, common;
730 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
731 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
733 common = local_adv & (remote_adv >> 2);
734 if (common & ADVERTISE_1000FULL) {
735 bp->line_speed = SPEED_1000;
736 bp->duplex = DUPLEX_FULL;
738 else if (common & ADVERTISE_1000HALF) {
739 bp->line_speed = SPEED_1000;
740 bp->duplex = DUPLEX_HALF;
743 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
744 bnx2_read_phy(bp, MII_LPA, &remote_adv);
746 common = local_adv & remote_adv;
747 if (common & ADVERTISE_100FULL) {
748 bp->line_speed = SPEED_100;
749 bp->duplex = DUPLEX_FULL;
751 else if (common & ADVERTISE_100HALF) {
752 bp->line_speed = SPEED_100;
753 bp->duplex = DUPLEX_HALF;
755 else if (common & ADVERTISE_10FULL) {
756 bp->line_speed = SPEED_10;
757 bp->duplex = DUPLEX_FULL;
759 else if (common & ADVERTISE_10HALF) {
760 bp->line_speed = SPEED_10;
761 bp->duplex = DUPLEX_HALF;
770 if (bmcr & BMCR_SPEED100) {
771 bp->line_speed = SPEED_100;
774 bp->line_speed = SPEED_10;
776 if (bmcr & BMCR_FULLDPLX) {
777 bp->duplex = DUPLEX_FULL;
780 bp->duplex = DUPLEX_HALF;
788 bnx2_set_mac_link(struct bnx2 *bp)
792 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
793 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
794 (bp->duplex == DUPLEX_HALF)) {
795 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
798 /* Configure the EMAC mode register. */
799 val = REG_RD(bp, BNX2_EMAC_MODE);
801 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
802 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
806 switch (bp->line_speed) {
808 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
809 val |= BNX2_EMAC_MODE_PORT_MII_10;
814 val |= BNX2_EMAC_MODE_PORT_MII;
817 val |= BNX2_EMAC_MODE_25G;
820 val |= BNX2_EMAC_MODE_PORT_GMII;
825 val |= BNX2_EMAC_MODE_PORT_GMII;
828 /* Set the MAC to operate in the appropriate duplex mode. */
829 if (bp->duplex == DUPLEX_HALF)
830 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
831 REG_WR(bp, BNX2_EMAC_MODE, val);
833 /* Enable/disable rx PAUSE. */
834 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
836 if (bp->flow_ctrl & FLOW_CTRL_RX)
837 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
838 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
840 /* Enable/disable tx PAUSE. */
841 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
842 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
844 if (bp->flow_ctrl & FLOW_CTRL_TX)
845 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
846 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
848 /* Acknowledge the interrupt. */
849 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
855 bnx2_set_link(struct bnx2 *bp)
860 if (bp->loopback == MAC_LOOPBACK) {
865 link_up = bp->link_up;
867 bnx2_read_phy(bp, MII_BMSR, &bmsr);
868 bnx2_read_phy(bp, MII_BMSR, &bmsr);
870 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
871 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
874 val = REG_RD(bp, BNX2_EMAC_STATUS);
875 if (val & BNX2_EMAC_STATUS_LINK)
876 bmsr |= BMSR_LSTATUS;
878 bmsr &= ~BMSR_LSTATUS;
881 if (bmsr & BMSR_LSTATUS) {
884 if (bp->phy_flags & PHY_SERDES_FLAG) {
885 if (CHIP_NUM(bp) == CHIP_NUM_5706)
886 bnx2_5706s_linkup(bp);
887 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
888 bnx2_5708s_linkup(bp);
891 bnx2_copper_linkup(bp);
893 bnx2_resolve_flow_ctrl(bp);
896 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
897 (bp->autoneg & AUTONEG_SPEED)) {
901 bnx2_read_phy(bp, MII_BMCR, &bmcr);
902 if (!(bmcr & BMCR_ANENABLE)) {
903 bnx2_write_phy(bp, MII_BMCR, bmcr |
907 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
911 if (bp->link_up != link_up) {
912 bnx2_report_link(bp);
915 bnx2_set_mac_link(bp);
921 bnx2_reset_phy(struct bnx2 *bp)
926 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
928 #define PHY_RESET_MAX_WAIT 100
929 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
932 bnx2_read_phy(bp, MII_BMCR, ®);
933 if (!(reg & BMCR_RESET)) {
938 if (i == PHY_RESET_MAX_WAIT) {
945 bnx2_phy_get_pause_adv(struct bnx2 *bp)
949 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
950 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
952 if (bp->phy_flags & PHY_SERDES_FLAG) {
953 adv = ADVERTISE_1000XPAUSE;
956 adv = ADVERTISE_PAUSE_CAP;
959 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
960 if (bp->phy_flags & PHY_SERDES_FLAG) {
961 adv = ADVERTISE_1000XPSE_ASYM;
964 adv = ADVERTISE_PAUSE_ASYM;
967 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
968 if (bp->phy_flags & PHY_SERDES_FLAG) {
969 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
972 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
979 bnx2_setup_serdes_phy(struct bnx2 *bp)
984 if (!(bp->autoneg & AUTONEG_SPEED)) {
986 int force_link_down = 0;
988 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
989 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
990 if (up1 & BCM5708S_UP1_2G5) {
991 up1 &= ~BCM5708S_UP1_2G5;
992 bnx2_write_phy(bp, BCM5708S_UP1, up1);
997 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
998 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1000 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1001 new_bmcr = bmcr & ~BMCR_ANENABLE;
1002 new_bmcr |= BMCR_SPEED1000;
1003 if (bp->req_duplex == DUPLEX_FULL) {
1004 adv |= ADVERTISE_1000XFULL;
1005 new_bmcr |= BMCR_FULLDPLX;
1008 adv |= ADVERTISE_1000XHALF;
1009 new_bmcr &= ~BMCR_FULLDPLX;
1011 if ((new_bmcr != bmcr) || (force_link_down)) {
1012 /* Force a link down visible on the other side */
1014 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1015 ~(ADVERTISE_1000XFULL |
1016 ADVERTISE_1000XHALF));
1017 bnx2_write_phy(bp, MII_BMCR, bmcr |
1018 BMCR_ANRESTART | BMCR_ANENABLE);
1021 netif_carrier_off(bp->dev);
1022 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1024 bnx2_write_phy(bp, MII_ADVERTISE, adv);
1025 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1030 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1031 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1032 up1 |= BCM5708S_UP1_2G5;
1033 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1036 if (bp->advertising & ADVERTISED_1000baseT_Full)
1037 new_adv |= ADVERTISE_1000XFULL;
1039 new_adv |= bnx2_phy_get_pause_adv(bp);
1041 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1042 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1044 bp->serdes_an_pending = 0;
1045 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1046 /* Force a link down visible on the other side */
1050 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1051 for (i = 0; i < 110; i++) {
1056 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1057 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1059 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1060 /* Speed up link-up time when the link partner
1061 * does not autonegotiate which is very common
1062 * in blade servers. Some blade servers use
1063 * IPMI for kerboard input and it's important
1064 * to minimize link disruptions. Autoneg. involves
1065 * exchanging base pages plus 3 next pages and
1066 * normally completes in about 120 msec.
1068 bp->current_interval = SERDES_AN_TIMEOUT;
1069 bp->serdes_an_pending = 1;
1070 mod_timer(&bp->timer, jiffies + bp->current_interval);
1077 #define ETHTOOL_ALL_FIBRE_SPEED \
1078 (ADVERTISED_1000baseT_Full)
1080 #define ETHTOOL_ALL_COPPER_SPEED \
1081 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1082 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1083 ADVERTISED_1000baseT_Full)
1085 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1086 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1088 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1091 bnx2_setup_copper_phy(struct bnx2 *bp)
1096 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1098 if (bp->autoneg & AUTONEG_SPEED) {
1099 u32 adv_reg, adv1000_reg;
1100 u32 new_adv_reg = 0;
1101 u32 new_adv1000_reg = 0;
1103 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1104 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1105 ADVERTISE_PAUSE_ASYM);
1107 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1108 adv1000_reg &= PHY_ALL_1000_SPEED;
1110 if (bp->advertising & ADVERTISED_10baseT_Half)
1111 new_adv_reg |= ADVERTISE_10HALF;
1112 if (bp->advertising & ADVERTISED_10baseT_Full)
1113 new_adv_reg |= ADVERTISE_10FULL;
1114 if (bp->advertising & ADVERTISED_100baseT_Half)
1115 new_adv_reg |= ADVERTISE_100HALF;
1116 if (bp->advertising & ADVERTISED_100baseT_Full)
1117 new_adv_reg |= ADVERTISE_100FULL;
1118 if (bp->advertising & ADVERTISED_1000baseT_Full)
1119 new_adv1000_reg |= ADVERTISE_1000FULL;
1121 new_adv_reg |= ADVERTISE_CSMA;
1123 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1125 if ((adv1000_reg != new_adv1000_reg) ||
1126 (adv_reg != new_adv_reg) ||
1127 ((bmcr & BMCR_ANENABLE) == 0)) {
1129 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1130 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1131 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1134 else if (bp->link_up) {
1135 /* Flow ctrl may have changed from auto to forced */
1136 /* or vice-versa. */
1138 bnx2_resolve_flow_ctrl(bp);
1139 bnx2_set_mac_link(bp);
1145 if (bp->req_line_speed == SPEED_100) {
1146 new_bmcr |= BMCR_SPEED100;
1148 if (bp->req_duplex == DUPLEX_FULL) {
1149 new_bmcr |= BMCR_FULLDPLX;
1151 if (new_bmcr != bmcr) {
1155 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1156 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1158 if (bmsr & BMSR_LSTATUS) {
1159 /* Force link down */
1160 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1163 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1164 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1166 } while ((bmsr & BMSR_LSTATUS) && (i < 620));
1169 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1171 /* Normally, the new speed is setup after the link has
1172 * gone down and up again. In some cases, link will not go
1173 * down so we need to set up the new speed here.
1175 if (bmsr & BMSR_LSTATUS) {
1176 bp->line_speed = bp->req_line_speed;
1177 bp->duplex = bp->req_duplex;
1178 bnx2_resolve_flow_ctrl(bp);
1179 bnx2_set_mac_link(bp);
1186 bnx2_setup_phy(struct bnx2 *bp)
1188 if (bp->loopback == MAC_LOOPBACK)
1191 if (bp->phy_flags & PHY_SERDES_FLAG) {
1192 return (bnx2_setup_serdes_phy(bp));
1195 return (bnx2_setup_copper_phy(bp));
1200 bnx2_init_5708s_phy(struct bnx2 *bp)
1204 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1205 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1206 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1208 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1209 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1210 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1212 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1213 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1214 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1216 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1217 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1218 val |= BCM5708S_UP1_2G5;
1219 bnx2_write_phy(bp, BCM5708S_UP1, val);
1222 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1223 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1224 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1225 /* increase tx signal amplitude */
1226 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1227 BCM5708S_BLK_ADDR_TX_MISC);
1228 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1229 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1230 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1231 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1234 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1235 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1240 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1241 BNX2_SHARED_HW_CFG_CONFIG);
1242 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1243 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1244 BCM5708S_BLK_ADDR_TX_MISC);
1245 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1246 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1247 BCM5708S_BLK_ADDR_DIG);
1254 bnx2_init_5706s_phy(struct bnx2 *bp)
1256 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1258 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1259 REG_WR(bp, BNX2_MISC_UNUSED0, 0x300);
1262 if (bp->dev->mtu > 1500) {
1265 /* Set extended packet length bit */
1266 bnx2_write_phy(bp, 0x18, 0x7);
1267 bnx2_read_phy(bp, 0x18, &val);
1268 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1270 bnx2_write_phy(bp, 0x1c, 0x6c00);
1271 bnx2_read_phy(bp, 0x1c, &val);
1272 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1277 bnx2_write_phy(bp, 0x18, 0x7);
1278 bnx2_read_phy(bp, 0x18, &val);
1279 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1281 bnx2_write_phy(bp, 0x1c, 0x6c00);
1282 bnx2_read_phy(bp, 0x1c, &val);
1283 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1290 bnx2_init_copper_phy(struct bnx2 *bp)
1294 bp->phy_flags |= PHY_CRC_FIX_FLAG;
1296 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1297 bnx2_write_phy(bp, 0x18, 0x0c00);
1298 bnx2_write_phy(bp, 0x17, 0x000a);
1299 bnx2_write_phy(bp, 0x15, 0x310b);
1300 bnx2_write_phy(bp, 0x17, 0x201f);
1301 bnx2_write_phy(bp, 0x15, 0x9506);
1302 bnx2_write_phy(bp, 0x17, 0x401f);
1303 bnx2_write_phy(bp, 0x15, 0x14e2);
1304 bnx2_write_phy(bp, 0x18, 0x0400);
1307 if (bp->dev->mtu > 1500) {
1308 /* Set extended packet length bit */
1309 bnx2_write_phy(bp, 0x18, 0x7);
1310 bnx2_read_phy(bp, 0x18, &val);
1311 bnx2_write_phy(bp, 0x18, val | 0x4000);
1313 bnx2_read_phy(bp, 0x10, &val);
1314 bnx2_write_phy(bp, 0x10, val | 0x1);
1317 bnx2_write_phy(bp, 0x18, 0x7);
1318 bnx2_read_phy(bp, 0x18, &val);
1319 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1321 bnx2_read_phy(bp, 0x10, &val);
1322 bnx2_write_phy(bp, 0x10, val & ~0x1);
1325 /* ethernet@wirespeed */
1326 bnx2_write_phy(bp, 0x18, 0x7007);
1327 bnx2_read_phy(bp, 0x18, &val);
1328 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1334 bnx2_init_phy(struct bnx2 *bp)
1339 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1340 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1342 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1346 bnx2_read_phy(bp, MII_PHYSID1, &val);
1347 bp->phy_id = val << 16;
1348 bnx2_read_phy(bp, MII_PHYSID2, &val);
1349 bp->phy_id |= val & 0xffff;
1351 if (bp->phy_flags & PHY_SERDES_FLAG) {
1352 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1353 rc = bnx2_init_5706s_phy(bp);
1354 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1355 rc = bnx2_init_5708s_phy(bp);
1358 rc = bnx2_init_copper_phy(bp);
1367 bnx2_set_mac_loopback(struct bnx2 *bp)
1371 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1372 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1373 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1374 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1379 static int bnx2_test_link(struct bnx2 *);
1382 bnx2_set_phy_loopback(struct bnx2 *bp)
1387 spin_lock_bh(&bp->phy_lock);
1388 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1390 spin_unlock_bh(&bp->phy_lock);
1394 for (i = 0; i < 10; i++) {
1395 if (bnx2_test_link(bp) == 0)
1400 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1401 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1402 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1403 BNX2_EMAC_MODE_25G);
1405 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1406 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1412 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1418 msg_data |= bp->fw_wr_seq;
1420 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1422 /* wait for an acknowledgement. */
1423 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1426 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1428 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1431 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1434 /* If we timed out, inform the firmware that this is the case. */
1435 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1437 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1440 msg_data &= ~BNX2_DRV_MSG_CODE;
1441 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1443 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1448 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1455 bnx2_init_context(struct bnx2 *bp)
1461 u32 vcid_addr, pcid_addr, offset;
1465 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1468 vcid_addr = GET_PCID_ADDR(vcid);
1470 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1475 pcid_addr = GET_PCID_ADDR(new_vcid);
1478 vcid_addr = GET_CID_ADDR(vcid);
1479 pcid_addr = vcid_addr;
1482 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1483 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1485 /* Zero out the context. */
1486 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1487 CTX_WR(bp, 0x00, offset, 0);
1490 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1491 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1496 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1502 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1503 if (good_mbuf == NULL) {
1504 printk(KERN_ERR PFX "Failed to allocate memory in "
1505 "bnx2_alloc_bad_rbuf\n");
1509 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1510 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1514 /* Allocate a bunch of mbufs and save the good ones in an array. */
1515 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1516 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1517 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1519 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1521 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1523 /* The addresses with Bit 9 set are bad memory blocks. */
1524 if (!(val & (1 << 9))) {
1525 good_mbuf[good_mbuf_cnt] = (u16) val;
1529 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1532 /* Free the good ones back to the mbuf pool thus discarding
1533 * all the bad ones. */
1534 while (good_mbuf_cnt) {
1537 val = good_mbuf[good_mbuf_cnt];
1538 val = (val << 9) | val | 1;
1540 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1547 bnx2_set_mac_addr(struct bnx2 *bp)
1550 u8 *mac_addr = bp->dev->dev_addr;
1552 val = (mac_addr[0] << 8) | mac_addr[1];
1554 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1556 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1557 (mac_addr[4] << 8) | mac_addr[5];
1559 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1563 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1565 struct sk_buff *skb;
1566 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1568 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1569 unsigned long align;
1571 skb = dev_alloc_skb(bp->rx_buf_size);
1576 if (unlikely((align = (unsigned long) skb->data & 0x7))) {
1577 skb_reserve(skb, 8 - align);
1581 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1582 PCI_DMA_FROMDEVICE);
1585 pci_unmap_addr_set(rx_buf, mapping, mapping);
1587 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1588 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1590 bp->rx_prod_bseq += bp->rx_buf_use_size;
1596 bnx2_phy_int(struct bnx2 *bp)
1598 u32 new_link_state, old_link_state;
1600 new_link_state = bp->status_blk->status_attn_bits &
1601 STATUS_ATTN_BITS_LINK_STATE;
1602 old_link_state = bp->status_blk->status_attn_bits_ack &
1603 STATUS_ATTN_BITS_LINK_STATE;
1604 if (new_link_state != old_link_state) {
1605 if (new_link_state) {
1606 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1607 STATUS_ATTN_BITS_LINK_STATE);
1610 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1611 STATUS_ATTN_BITS_LINK_STATE);
1618 bnx2_tx_int(struct bnx2 *bp)
1620 struct status_block *sblk = bp->status_blk;
1621 u16 hw_cons, sw_cons, sw_ring_cons;
1624 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1625 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1628 sw_cons = bp->tx_cons;
1630 while (sw_cons != hw_cons) {
1631 struct sw_bd *tx_buf;
1632 struct sk_buff *skb;
1635 sw_ring_cons = TX_RING_IDX(sw_cons);
1637 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1640 /* partial BD completions possible with TSO packets */
1641 if (skb_shinfo(skb)->tso_size) {
1642 u16 last_idx, last_ring_idx;
1644 last_idx = sw_cons +
1645 skb_shinfo(skb)->nr_frags + 1;
1646 last_ring_idx = sw_ring_cons +
1647 skb_shinfo(skb)->nr_frags + 1;
1648 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1651 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1656 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1657 skb_headlen(skb), PCI_DMA_TODEVICE);
1660 last = skb_shinfo(skb)->nr_frags;
1662 for (i = 0; i < last; i++) {
1663 sw_cons = NEXT_TX_BD(sw_cons);
1665 pci_unmap_page(bp->pdev,
1667 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1669 skb_shinfo(skb)->frags[i].size,
1673 sw_cons = NEXT_TX_BD(sw_cons);
1675 tx_free_bd += last + 1;
1677 dev_kfree_skb_irq(skb);
1679 hw_cons = bp->hw_tx_cons =
1680 sblk->status_tx_quick_consumer_index0;
1682 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1687 bp->tx_cons = sw_cons;
1689 if (unlikely(netif_queue_stopped(bp->dev))) {
1690 spin_lock(&bp->tx_lock);
1691 if ((netif_queue_stopped(bp->dev)) &&
1692 (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)) {
1694 netif_wake_queue(bp->dev);
1696 spin_unlock(&bp->tx_lock);
1701 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1704 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1705 struct rx_bd *cons_bd, *prod_bd;
1707 cons_rx_buf = &bp->rx_buf_ring[cons];
1708 prod_rx_buf = &bp->rx_buf_ring[prod];
1710 pci_dma_sync_single_for_device(bp->pdev,
1711 pci_unmap_addr(cons_rx_buf, mapping),
1712 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1714 bp->rx_prod_bseq += bp->rx_buf_use_size;
1716 prod_rx_buf->skb = skb;
1721 pci_unmap_addr_set(prod_rx_buf, mapping,
1722 pci_unmap_addr(cons_rx_buf, mapping));
1724 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1725 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1726 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1727 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
1731 bnx2_rx_int(struct bnx2 *bp, int budget)
1733 struct status_block *sblk = bp->status_blk;
1734 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1735 struct l2_fhdr *rx_hdr;
1738 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1739 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1742 sw_cons = bp->rx_cons;
1743 sw_prod = bp->rx_prod;
1745 /* Memory barrier necessary as speculative reads of the rx
1746 * buffer can be ahead of the index in the status block
1749 while (sw_cons != hw_cons) {
1752 struct sw_bd *rx_buf;
1753 struct sk_buff *skb;
1754 dma_addr_t dma_addr;
1756 sw_ring_cons = RX_RING_IDX(sw_cons);
1757 sw_ring_prod = RX_RING_IDX(sw_prod);
1759 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1764 dma_addr = pci_unmap_addr(rx_buf, mapping);
1766 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
1767 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1769 rx_hdr = (struct l2_fhdr *) skb->data;
1770 len = rx_hdr->l2_fhdr_pkt_len - 4;
1772 if ((status = rx_hdr->l2_fhdr_status) &
1773 (L2_FHDR_ERRORS_BAD_CRC |
1774 L2_FHDR_ERRORS_PHY_DECODE |
1775 L2_FHDR_ERRORS_ALIGNMENT |
1776 L2_FHDR_ERRORS_TOO_SHORT |
1777 L2_FHDR_ERRORS_GIANT_FRAME)) {
1782 /* Since we don't have a jumbo ring, copy small packets
1785 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1786 struct sk_buff *new_skb;
1788 new_skb = dev_alloc_skb(len + 2);
1789 if (new_skb == NULL)
1793 memcpy(new_skb->data,
1794 skb->data + bp->rx_offset - 2,
1797 skb_reserve(new_skb, 2);
1798 skb_put(new_skb, len);
1799 new_skb->dev = bp->dev;
1801 bnx2_reuse_rx_skb(bp, skb,
1802 sw_ring_cons, sw_ring_prod);
1806 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1807 pci_unmap_single(bp->pdev, dma_addr,
1808 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1810 skb_reserve(skb, bp->rx_offset);
1815 bnx2_reuse_rx_skb(bp, skb,
1816 sw_ring_cons, sw_ring_prod);
1820 skb->protocol = eth_type_trans(skb, bp->dev);
1822 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1823 (htons(skb->protocol) != 0x8100)) {
1825 dev_kfree_skb_irq(skb);
1830 skb->ip_summed = CHECKSUM_NONE;
1832 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1833 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1835 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1836 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
1837 skb->ip_summed = CHECKSUM_UNNECESSARY;
1841 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1842 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1843 rx_hdr->l2_fhdr_vlan_tag);
1847 netif_receive_skb(skb);
1849 bp->dev->last_rx = jiffies;
1853 sw_cons = NEXT_RX_BD(sw_cons);
1854 sw_prod = NEXT_RX_BD(sw_prod);
1856 if ((rx_pkt == budget))
1859 /* Refresh hw_cons to see if there is new work */
1860 if (sw_cons == hw_cons) {
1861 hw_cons = bp->hw_rx_cons =
1862 sblk->status_rx_quick_consumer_index0;
1863 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1868 bp->rx_cons = sw_cons;
1869 bp->rx_prod = sw_prod;
1871 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1873 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1881 /* MSI ISR - The only difference between this and the INTx ISR
1882 * is that the MSI interrupt is always serviced.
1885 bnx2_msi(int irq, void *dev_instance, struct pt_regs *regs)
1887 struct net_device *dev = dev_instance;
1888 struct bnx2 *bp = netdev_priv(dev);
1890 prefetch(bp->status_blk);
1891 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1892 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1893 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1895 /* Return here if interrupt is disabled. */
1896 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1899 netif_rx_schedule(dev);
1905 bnx2_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1907 struct net_device *dev = dev_instance;
1908 struct bnx2 *bp = netdev_priv(dev);
1910 /* When using INTx, it is possible for the interrupt to arrive
1911 * at the CPU before the status block posted prior to the
1912 * interrupt. Reading a register will flush the status block.
1913 * When using MSI, the MSI message will always complete after
1914 * the status block write.
1916 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
1917 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1918 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
1921 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1922 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1923 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1925 /* Return here if interrupt is shared and is disabled. */
1926 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1929 netif_rx_schedule(dev);
1935 bnx2_has_work(struct bnx2 *bp)
1937 struct status_block *sblk = bp->status_blk;
1939 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
1940 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
1943 if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
1951 bnx2_poll(struct net_device *dev, int *budget)
1953 struct bnx2 *bp = netdev_priv(dev);
1955 if ((bp->status_blk->status_attn_bits &
1956 STATUS_ATTN_BITS_LINK_STATE) !=
1957 (bp->status_blk->status_attn_bits_ack &
1958 STATUS_ATTN_BITS_LINK_STATE)) {
1960 spin_lock(&bp->phy_lock);
1962 spin_unlock(&bp->phy_lock);
1964 /* This is needed to take care of transient status
1965 * during link changes.
1967 REG_WR(bp, BNX2_HC_COMMAND,
1968 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
1969 REG_RD(bp, BNX2_HC_COMMAND);
1972 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
1975 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
1976 int orig_budget = *budget;
1979 if (orig_budget > dev->quota)
1980 orig_budget = dev->quota;
1982 work_done = bnx2_rx_int(bp, orig_budget);
1983 *budget -= work_done;
1984 dev->quota -= work_done;
1987 bp->last_status_idx = bp->status_blk->status_idx;
1990 if (!bnx2_has_work(bp)) {
1991 netif_rx_complete(dev);
1992 if (likely(bp->flags & USING_MSI_FLAG)) {
1993 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1994 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
1995 bp->last_status_idx);
1998 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1999 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2000 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2001 bp->last_status_idx);
2003 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2004 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2005 bp->last_status_idx);
2012 /* Called with rtnl_lock from vlan functions and also dev->xmit_lock
2013 * from set_multicast.
2016 bnx2_set_rx_mode(struct net_device *dev)
2018 struct bnx2 *bp = netdev_priv(dev);
2019 u32 rx_mode, sort_mode;
2022 spin_lock_bh(&bp->phy_lock);
2024 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2025 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2026 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2028 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2029 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2031 if (!(bp->flags & ASF_ENABLE_FLAG))
2032 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2034 if (dev->flags & IFF_PROMISC) {
2035 /* Promiscuous mode. */
2036 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2037 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN;
2039 else if (dev->flags & IFF_ALLMULTI) {
2040 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2041 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2044 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2047 /* Accept one or more multicast(s). */
2048 struct dev_mc_list *mclist;
2049 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2054 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2056 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2057 i++, mclist = mclist->next) {
2059 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2061 regidx = (bit & 0xe0) >> 5;
2063 mc_filter[regidx] |= (1 << bit);
2066 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2067 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2071 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2074 if (rx_mode != bp->rx_mode) {
2075 bp->rx_mode = rx_mode;
2076 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2079 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2080 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2081 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2083 spin_unlock_bh(&bp->phy_lock);
2087 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2094 for (i = 0; i < rv2p_code_len; i += 8) {
2095 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, *rv2p_code);
2097 REG_WR(bp, BNX2_RV2P_INSTR_LOW, *rv2p_code);
2100 if (rv2p_proc == RV2P_PROC1) {
2101 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2102 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2105 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2106 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2110 /* Reset the processor, un-stall is done later. */
2111 if (rv2p_proc == RV2P_PROC1) {
2112 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2115 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2120 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2126 val = REG_RD_IND(bp, cpu_reg->mode);
2127 val |= cpu_reg->mode_value_halt;
2128 REG_WR_IND(bp, cpu_reg->mode, val);
2129 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2131 /* Load the Text area. */
2132 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2136 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2137 REG_WR_IND(bp, offset, fw->text[j]);
2141 /* Load the Data area. */
2142 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2146 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2147 REG_WR_IND(bp, offset, fw->data[j]);
2151 /* Load the SBSS area. */
2152 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2156 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2157 REG_WR_IND(bp, offset, fw->sbss[j]);
2161 /* Load the BSS area. */
2162 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2166 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2167 REG_WR_IND(bp, offset, fw->bss[j]);
2171 /* Load the Read-Only area. */
2172 offset = cpu_reg->spad_base +
2173 (fw->rodata_addr - cpu_reg->mips_view_base);
2177 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2178 REG_WR_IND(bp, offset, fw->rodata[j]);
2182 /* Clear the pre-fetch instruction. */
2183 REG_WR_IND(bp, cpu_reg->inst, 0);
2184 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2186 /* Start the CPU. */
2187 val = REG_RD_IND(bp, cpu_reg->mode);
2188 val &= ~cpu_reg->mode_value_halt;
2189 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2190 REG_WR_IND(bp, cpu_reg->mode, val);
2194 bnx2_init_cpus(struct bnx2 *bp)
2196 struct cpu_reg cpu_reg;
2199 /* Initialize the RV2P processor. */
2200 load_rv2p_fw(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), RV2P_PROC1);
2201 load_rv2p_fw(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), RV2P_PROC2);
2203 /* Initialize the RX Processor. */
2204 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2205 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2206 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2207 cpu_reg.state = BNX2_RXP_CPU_STATE;
2208 cpu_reg.state_value_clear = 0xffffff;
2209 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2210 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2211 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2212 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2213 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2214 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2215 cpu_reg.mips_view_base = 0x8000000;
2217 fw.ver_major = bnx2_RXP_b06FwReleaseMajor;
2218 fw.ver_minor = bnx2_RXP_b06FwReleaseMinor;
2219 fw.ver_fix = bnx2_RXP_b06FwReleaseFix;
2220 fw.start_addr = bnx2_RXP_b06FwStartAddr;
2222 fw.text_addr = bnx2_RXP_b06FwTextAddr;
2223 fw.text_len = bnx2_RXP_b06FwTextLen;
2225 fw.text = bnx2_RXP_b06FwText;
2227 fw.data_addr = bnx2_RXP_b06FwDataAddr;
2228 fw.data_len = bnx2_RXP_b06FwDataLen;
2230 fw.data = bnx2_RXP_b06FwData;
2232 fw.sbss_addr = bnx2_RXP_b06FwSbssAddr;
2233 fw.sbss_len = bnx2_RXP_b06FwSbssLen;
2235 fw.sbss = bnx2_RXP_b06FwSbss;
2237 fw.bss_addr = bnx2_RXP_b06FwBssAddr;
2238 fw.bss_len = bnx2_RXP_b06FwBssLen;
2240 fw.bss = bnx2_RXP_b06FwBss;
2242 fw.rodata_addr = bnx2_RXP_b06FwRodataAddr;
2243 fw.rodata_len = bnx2_RXP_b06FwRodataLen;
2244 fw.rodata_index = 0;
2245 fw.rodata = bnx2_RXP_b06FwRodata;
2247 load_cpu_fw(bp, &cpu_reg, &fw);
2249 /* Initialize the TX Processor. */
2250 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2251 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2252 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2253 cpu_reg.state = BNX2_TXP_CPU_STATE;
2254 cpu_reg.state_value_clear = 0xffffff;
2255 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2256 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2257 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2258 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2259 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2260 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2261 cpu_reg.mips_view_base = 0x8000000;
2263 fw.ver_major = bnx2_TXP_b06FwReleaseMajor;
2264 fw.ver_minor = bnx2_TXP_b06FwReleaseMinor;
2265 fw.ver_fix = bnx2_TXP_b06FwReleaseFix;
2266 fw.start_addr = bnx2_TXP_b06FwStartAddr;
2268 fw.text_addr = bnx2_TXP_b06FwTextAddr;
2269 fw.text_len = bnx2_TXP_b06FwTextLen;
2271 fw.text = bnx2_TXP_b06FwText;
2273 fw.data_addr = bnx2_TXP_b06FwDataAddr;
2274 fw.data_len = bnx2_TXP_b06FwDataLen;
2276 fw.data = bnx2_TXP_b06FwData;
2278 fw.sbss_addr = bnx2_TXP_b06FwSbssAddr;
2279 fw.sbss_len = bnx2_TXP_b06FwSbssLen;
2281 fw.sbss = bnx2_TXP_b06FwSbss;
2283 fw.bss_addr = bnx2_TXP_b06FwBssAddr;
2284 fw.bss_len = bnx2_TXP_b06FwBssLen;
2286 fw.bss = bnx2_TXP_b06FwBss;
2288 fw.rodata_addr = bnx2_TXP_b06FwRodataAddr;
2289 fw.rodata_len = bnx2_TXP_b06FwRodataLen;
2290 fw.rodata_index = 0;
2291 fw.rodata = bnx2_TXP_b06FwRodata;
2293 load_cpu_fw(bp, &cpu_reg, &fw);
2295 /* Initialize the TX Patch-up Processor. */
2296 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2297 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2298 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2299 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2300 cpu_reg.state_value_clear = 0xffffff;
2301 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2302 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2303 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2304 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2305 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2306 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2307 cpu_reg.mips_view_base = 0x8000000;
2309 fw.ver_major = bnx2_TPAT_b06FwReleaseMajor;
2310 fw.ver_minor = bnx2_TPAT_b06FwReleaseMinor;
2311 fw.ver_fix = bnx2_TPAT_b06FwReleaseFix;
2312 fw.start_addr = bnx2_TPAT_b06FwStartAddr;
2314 fw.text_addr = bnx2_TPAT_b06FwTextAddr;
2315 fw.text_len = bnx2_TPAT_b06FwTextLen;
2317 fw.text = bnx2_TPAT_b06FwText;
2319 fw.data_addr = bnx2_TPAT_b06FwDataAddr;
2320 fw.data_len = bnx2_TPAT_b06FwDataLen;
2322 fw.data = bnx2_TPAT_b06FwData;
2324 fw.sbss_addr = bnx2_TPAT_b06FwSbssAddr;
2325 fw.sbss_len = bnx2_TPAT_b06FwSbssLen;
2327 fw.sbss = bnx2_TPAT_b06FwSbss;
2329 fw.bss_addr = bnx2_TPAT_b06FwBssAddr;
2330 fw.bss_len = bnx2_TPAT_b06FwBssLen;
2332 fw.bss = bnx2_TPAT_b06FwBss;
2334 fw.rodata_addr = bnx2_TPAT_b06FwRodataAddr;
2335 fw.rodata_len = bnx2_TPAT_b06FwRodataLen;
2336 fw.rodata_index = 0;
2337 fw.rodata = bnx2_TPAT_b06FwRodata;
2339 load_cpu_fw(bp, &cpu_reg, &fw);
2341 /* Initialize the Completion Processor. */
2342 cpu_reg.mode = BNX2_COM_CPU_MODE;
2343 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2344 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2345 cpu_reg.state = BNX2_COM_CPU_STATE;
2346 cpu_reg.state_value_clear = 0xffffff;
2347 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2348 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2349 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2350 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2351 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2352 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2353 cpu_reg.mips_view_base = 0x8000000;
2355 fw.ver_major = bnx2_COM_b06FwReleaseMajor;
2356 fw.ver_minor = bnx2_COM_b06FwReleaseMinor;
2357 fw.ver_fix = bnx2_COM_b06FwReleaseFix;
2358 fw.start_addr = bnx2_COM_b06FwStartAddr;
2360 fw.text_addr = bnx2_COM_b06FwTextAddr;
2361 fw.text_len = bnx2_COM_b06FwTextLen;
2363 fw.text = bnx2_COM_b06FwText;
2365 fw.data_addr = bnx2_COM_b06FwDataAddr;
2366 fw.data_len = bnx2_COM_b06FwDataLen;
2368 fw.data = bnx2_COM_b06FwData;
2370 fw.sbss_addr = bnx2_COM_b06FwSbssAddr;
2371 fw.sbss_len = bnx2_COM_b06FwSbssLen;
2373 fw.sbss = bnx2_COM_b06FwSbss;
2375 fw.bss_addr = bnx2_COM_b06FwBssAddr;
2376 fw.bss_len = bnx2_COM_b06FwBssLen;
2378 fw.bss = bnx2_COM_b06FwBss;
2380 fw.rodata_addr = bnx2_COM_b06FwRodataAddr;
2381 fw.rodata_len = bnx2_COM_b06FwRodataLen;
2382 fw.rodata_index = 0;
2383 fw.rodata = bnx2_COM_b06FwRodata;
2385 load_cpu_fw(bp, &cpu_reg, &fw);
2390 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2394 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2400 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2401 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2402 PCI_PM_CTRL_PME_STATUS);
2404 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2405 /* delay required during transition out of D3hot */
2408 val = REG_RD(bp, BNX2_EMAC_MODE);
2409 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2410 val &= ~BNX2_EMAC_MODE_MPKT;
2411 REG_WR(bp, BNX2_EMAC_MODE, val);
2413 val = REG_RD(bp, BNX2_RPM_CONFIG);
2414 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2415 REG_WR(bp, BNX2_RPM_CONFIG, val);
2426 autoneg = bp->autoneg;
2427 advertising = bp->advertising;
2429 bp->autoneg = AUTONEG_SPEED;
2430 bp->advertising = ADVERTISED_10baseT_Half |
2431 ADVERTISED_10baseT_Full |
2432 ADVERTISED_100baseT_Half |
2433 ADVERTISED_100baseT_Full |
2436 bnx2_setup_copper_phy(bp);
2438 bp->autoneg = autoneg;
2439 bp->advertising = advertising;
2441 bnx2_set_mac_addr(bp);
2443 val = REG_RD(bp, BNX2_EMAC_MODE);
2445 /* Enable port mode. */
2446 val &= ~BNX2_EMAC_MODE_PORT;
2447 val |= BNX2_EMAC_MODE_PORT_MII |
2448 BNX2_EMAC_MODE_MPKT_RCVD |
2449 BNX2_EMAC_MODE_ACPI_RCVD |
2450 BNX2_EMAC_MODE_MPKT;
2452 REG_WR(bp, BNX2_EMAC_MODE, val);
2454 /* receive all multicast */
2455 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2456 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2459 REG_WR(bp, BNX2_EMAC_RX_MODE,
2460 BNX2_EMAC_RX_MODE_SORT_MODE);
2462 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2463 BNX2_RPM_SORT_USER0_MC_EN;
2464 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2465 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2466 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2467 BNX2_RPM_SORT_USER0_ENA);
2469 /* Need to enable EMAC and RPM for WOL. */
2470 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2471 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2472 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2473 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2475 val = REG_RD(bp, BNX2_RPM_CONFIG);
2476 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2477 REG_WR(bp, BNX2_RPM_CONFIG, val);
2479 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2482 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2485 if (!(bp->flags & NO_WOL_FLAG))
2486 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2488 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2489 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2490 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2499 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2501 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2504 /* No more memory access after this point until
2505 * device is brought back to D0.
2517 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2522 /* Request access to the flash interface. */
2523 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2524 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2525 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2526 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2532 if (j >= NVRAM_TIMEOUT_COUNT)
2539 bnx2_release_nvram_lock(struct bnx2 *bp)
2544 /* Relinquish nvram interface. */
2545 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2547 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2548 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2549 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2555 if (j >= NVRAM_TIMEOUT_COUNT)
2563 bnx2_enable_nvram_write(struct bnx2 *bp)
2567 val = REG_RD(bp, BNX2_MISC_CFG);
2568 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2570 if (!bp->flash_info->buffered) {
2573 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2574 REG_WR(bp, BNX2_NVM_COMMAND,
2575 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2577 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2580 val = REG_RD(bp, BNX2_NVM_COMMAND);
2581 if (val & BNX2_NVM_COMMAND_DONE)
2585 if (j >= NVRAM_TIMEOUT_COUNT)
2592 bnx2_disable_nvram_write(struct bnx2 *bp)
2596 val = REG_RD(bp, BNX2_MISC_CFG);
2597 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2602 bnx2_enable_nvram_access(struct bnx2 *bp)
2606 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2607 /* Enable both bits, even on read. */
2608 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2609 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2613 bnx2_disable_nvram_access(struct bnx2 *bp)
2617 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2618 /* Disable both bits, even after read. */
2619 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2620 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2621 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2625 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2630 if (bp->flash_info->buffered)
2631 /* Buffered flash, no erase needed */
2634 /* Build an erase command */
2635 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2636 BNX2_NVM_COMMAND_DOIT;
2638 /* Need to clear DONE bit separately. */
2639 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2641 /* Address of the NVRAM to read from. */
2642 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2644 /* Issue an erase command. */
2645 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2647 /* Wait for completion. */
2648 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2653 val = REG_RD(bp, BNX2_NVM_COMMAND);
2654 if (val & BNX2_NVM_COMMAND_DONE)
2658 if (j >= NVRAM_TIMEOUT_COUNT)
2665 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2670 /* Build the command word. */
2671 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2673 /* Calculate an offset of a buffered flash. */
2674 if (bp->flash_info->buffered) {
2675 offset = ((offset / bp->flash_info->page_size) <<
2676 bp->flash_info->page_bits) +
2677 (offset % bp->flash_info->page_size);
2680 /* Need to clear DONE bit separately. */
2681 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2683 /* Address of the NVRAM to read from. */
2684 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2686 /* Issue a read command. */
2687 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2689 /* Wait for completion. */
2690 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2695 val = REG_RD(bp, BNX2_NVM_COMMAND);
2696 if (val & BNX2_NVM_COMMAND_DONE) {
2697 val = REG_RD(bp, BNX2_NVM_READ);
2699 val = be32_to_cpu(val);
2700 memcpy(ret_val, &val, 4);
2704 if (j >= NVRAM_TIMEOUT_COUNT)
2712 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2717 /* Build the command word. */
2718 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2720 /* Calculate an offset of a buffered flash. */
2721 if (bp->flash_info->buffered) {
2722 offset = ((offset / bp->flash_info->page_size) <<
2723 bp->flash_info->page_bits) +
2724 (offset % bp->flash_info->page_size);
2727 /* Need to clear DONE bit separately. */
2728 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2730 memcpy(&val32, val, 4);
2731 val32 = cpu_to_be32(val32);
2733 /* Write the data. */
2734 REG_WR(bp, BNX2_NVM_WRITE, val32);
2736 /* Address of the NVRAM to write to. */
2737 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2739 /* Issue the write command. */
2740 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2742 /* Wait for completion. */
2743 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2746 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2749 if (j >= NVRAM_TIMEOUT_COUNT)
2756 bnx2_init_nvram(struct bnx2 *bp)
2759 int j, entry_count, rc;
2760 struct flash_spec *flash;
2762 /* Determine the selected interface. */
2763 val = REG_RD(bp, BNX2_NVM_CFG1);
2765 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2768 if (val & 0x40000000) {
2770 /* Flash interface has been reconfigured */
2771 for (j = 0, flash = &flash_table[0]; j < entry_count;
2773 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2774 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2775 bp->flash_info = flash;
2782 /* Not yet been reconfigured */
2784 if (val & (1 << 23))
2785 mask = FLASH_BACKUP_STRAP_MASK;
2787 mask = FLASH_STRAP_MASK;
2789 for (j = 0, flash = &flash_table[0]; j < entry_count;
2792 if ((val & mask) == (flash->strapping & mask)) {
2793 bp->flash_info = flash;
2795 /* Request access to the flash interface. */
2796 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2799 /* Enable access to flash interface */
2800 bnx2_enable_nvram_access(bp);
2802 /* Reconfigure the flash interface */
2803 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2804 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2805 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2806 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2808 /* Disable access to flash interface */
2809 bnx2_disable_nvram_access(bp);
2810 bnx2_release_nvram_lock(bp);
2815 } /* if (val & 0x40000000) */
2817 if (j == entry_count) {
2818 bp->flash_info = NULL;
2819 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
2823 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2824 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2826 bp->flash_size = val;
2828 bp->flash_size = bp->flash_info->total_size;
2834 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2838 u32 cmd_flags, offset32, len32, extra;
2843 /* Request access to the flash interface. */
2844 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2847 /* Enable access to flash interface */
2848 bnx2_enable_nvram_access(bp);
2861 pre_len = 4 - (offset & 3);
2863 if (pre_len >= len32) {
2865 cmd_flags = BNX2_NVM_COMMAND_FIRST |
2866 BNX2_NVM_COMMAND_LAST;
2869 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2872 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2877 memcpy(ret_buf, buf + (offset & 3), pre_len);
2884 extra = 4 - (len32 & 3);
2885 len32 = (len32 + 4) & ~3;
2892 cmd_flags = BNX2_NVM_COMMAND_LAST;
2894 cmd_flags = BNX2_NVM_COMMAND_FIRST |
2895 BNX2_NVM_COMMAND_LAST;
2897 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2899 memcpy(ret_buf, buf, 4 - extra);
2901 else if (len32 > 0) {
2904 /* Read the first word. */
2908 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2910 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
2912 /* Advance to the next dword. */
2917 while (len32 > 4 && rc == 0) {
2918 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
2920 /* Advance to the next dword. */
2929 cmd_flags = BNX2_NVM_COMMAND_LAST;
2930 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2932 memcpy(ret_buf, buf, 4 - extra);
2935 /* Disable access to flash interface */
2936 bnx2_disable_nvram_access(bp);
2938 bnx2_release_nvram_lock(bp);
2944 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
2947 u32 written, offset32, len32;
2948 u8 *buf, start[4], end[4];
2950 int align_start, align_end;
2955 align_start = align_end = 0;
2957 if ((align_start = (offset32 & 3))) {
2959 len32 += align_start;
2960 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
2965 if ((len32 > 4) || !align_start) {
2966 align_end = 4 - (len32 & 3);
2968 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
2975 if (align_start || align_end) {
2976 buf = kmalloc(len32, GFP_KERNEL);
2980 memcpy(buf, start, 4);
2983 memcpy(buf + len32 - 4, end, 4);
2985 memcpy(buf + align_start, data_buf, buf_size);
2989 while ((written < len32) && (rc == 0)) {
2990 u32 page_start, page_end, data_start, data_end;
2991 u32 addr, cmd_flags;
2993 u8 flash_buffer[264];
2995 /* Find the page_start addr */
2996 page_start = offset32 + written;
2997 page_start -= (page_start % bp->flash_info->page_size);
2998 /* Find the page_end addr */
2999 page_end = page_start + bp->flash_info->page_size;
3000 /* Find the data_start addr */
3001 data_start = (written == 0) ? offset32 : page_start;
3002 /* Find the data_end addr */
3003 data_end = (page_end > offset32 + len32) ?
3004 (offset32 + len32) : page_end;
3006 /* Request access to the flash interface. */
3007 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3008 goto nvram_write_end;
3010 /* Enable access to flash interface */
3011 bnx2_enable_nvram_access(bp);
3013 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3014 if (bp->flash_info->buffered == 0) {
3017 /* Read the whole page into the buffer
3018 * (non-buffer flash only) */
3019 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3020 if (j == (bp->flash_info->page_size - 4)) {
3021 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3023 rc = bnx2_nvram_read_dword(bp,
3029 goto nvram_write_end;
3035 /* Enable writes to flash interface (unlock write-protect) */
3036 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3037 goto nvram_write_end;
3039 /* Erase the page */
3040 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3041 goto nvram_write_end;
3043 /* Re-enable the write again for the actual write */
3044 bnx2_enable_nvram_write(bp);
3046 /* Loop to write back the buffer data from page_start to
3049 if (bp->flash_info->buffered == 0) {
3050 for (addr = page_start; addr < data_start;
3051 addr += 4, i += 4) {
3053 rc = bnx2_nvram_write_dword(bp, addr,
3054 &flash_buffer[i], cmd_flags);
3057 goto nvram_write_end;
3063 /* Loop to write the new data from data_start to data_end */
3064 for (addr = data_start; addr < data_end; addr += 4, i++) {
3065 if ((addr == page_end - 4) ||
3066 ((bp->flash_info->buffered) &&
3067 (addr == data_end - 4))) {
3069 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3071 rc = bnx2_nvram_write_dword(bp, addr, buf,
3075 goto nvram_write_end;
3081 /* Loop to write back the buffer data from data_end
3083 if (bp->flash_info->buffered == 0) {
3084 for (addr = data_end; addr < page_end;
3085 addr += 4, i += 4) {
3087 if (addr == page_end-4) {
3088 cmd_flags = BNX2_NVM_COMMAND_LAST;
3090 rc = bnx2_nvram_write_dword(bp, addr,
3091 &flash_buffer[i], cmd_flags);
3094 goto nvram_write_end;
3100 /* Disable writes to flash interface (lock write-protect) */
3101 bnx2_disable_nvram_write(bp);
3103 /* Disable access to flash interface */
3104 bnx2_disable_nvram_access(bp);
3105 bnx2_release_nvram_lock(bp);
3107 /* Increment written */
3108 written += data_end - data_start;
3112 if (align_start || align_end)
3118 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3123 /* Wait for the current PCI transaction to complete before
3124 * issuing a reset. */
3125 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3126 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3127 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3128 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3129 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3130 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3133 /* Wait for the firmware to tell us it is ok to issue a reset. */
3134 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3136 /* Deposit a driver reset signature so the firmware knows that
3137 * this is a soft reset. */
3138 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3139 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3141 /* Do a dummy read to force the chip to complete all current transaction
3142 * before we issue a reset. */
3143 val = REG_RD(bp, BNX2_MISC_ID);
3145 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3146 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3147 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3150 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3152 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3153 (CHIP_ID(bp) == CHIP_ID_5706_A1))
3156 /* Reset takes approximate 30 usec */
3157 for (i = 0; i < 10; i++) {
3158 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3159 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3160 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3166 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3167 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3168 printk(KERN_ERR PFX "Chip reset did not complete\n");
3172 /* Make sure byte swapping is properly configured. */
3173 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3174 if (val != 0x01020304) {
3175 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3179 /* Wait for the firmware to finish its initialization. */
3180 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3184 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3185 /* Adjust the voltage regular to two steps lower. The default
3186 * of this register is 0x0000000e. */
3187 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3189 /* Remove bad rbuf memory from the free pool. */
3190 rc = bnx2_alloc_bad_rbuf(bp);
3197 bnx2_init_chip(struct bnx2 *bp)
3202 /* Make sure the interrupt is not active. */
3203 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3205 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3206 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3208 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3210 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3211 DMA_READ_CHANS << 12 |
3212 DMA_WRITE_CHANS << 16;
3214 val |= (0x2 << 20) | (1 << 11);
3216 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3219 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3220 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3221 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3223 REG_WR(bp, BNX2_DMA_CONFIG, val);
3225 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3226 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3227 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3228 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3231 if (bp->flags & PCIX_FLAG) {
3234 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3236 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3237 val16 & ~PCI_X_CMD_ERO);
3240 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3241 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3242 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3243 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3245 /* Initialize context mapping and zero out the quick contexts. The
3246 * context block must have already been enabled. */
3247 bnx2_init_context(bp);
3250 bnx2_init_nvram(bp);
3252 bnx2_set_mac_addr(bp);
3254 val = REG_RD(bp, BNX2_MQ_CONFIG);
3255 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3256 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3257 REG_WR(bp, BNX2_MQ_CONFIG, val);
3259 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3260 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3261 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3263 val = (BCM_PAGE_BITS - 8) << 24;
3264 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3266 /* Configure page size. */
3267 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3268 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3269 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3270 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3272 val = bp->mac_addr[0] +
3273 (bp->mac_addr[1] << 8) +
3274 (bp->mac_addr[2] << 16) +
3276 (bp->mac_addr[4] << 8) +
3277 (bp->mac_addr[5] << 16);
3278 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3280 /* Program the MTU. Also include 4 bytes for CRC32. */
3281 val = bp->dev->mtu + ETH_HLEN + 4;
3282 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3283 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3284 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3286 bp->last_status_idx = 0;
3287 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3289 /* Set up how to generate a link change interrupt. */
3290 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3292 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3293 (u64) bp->status_blk_mapping & 0xffffffff);
3294 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3296 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3297 (u64) bp->stats_blk_mapping & 0xffffffff);
3298 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3299 (u64) bp->stats_blk_mapping >> 32);
3301 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3302 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3304 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3305 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3307 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3308 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3310 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3312 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3314 REG_WR(bp, BNX2_HC_COM_TICKS,
3315 (bp->com_ticks_int << 16) | bp->com_ticks);
3317 REG_WR(bp, BNX2_HC_CMD_TICKS,
3318 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3320 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3321 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3323 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3324 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3326 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3327 BNX2_HC_CONFIG_TX_TMR_MODE |
3328 BNX2_HC_CONFIG_COLLECT_STATS);
3331 /* Clear internal stats counters. */
3332 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3334 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3336 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3337 BNX2_PORT_FEATURE_ASF_ENABLED)
3338 bp->flags |= ASF_ENABLE_FLAG;
3340 /* Initialize the receive filter. */
3341 bnx2_set_rx_mode(bp->dev);
3343 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3346 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3347 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3351 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3358 bnx2_init_tx_ring(struct bnx2 *bp)
3363 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3365 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3366 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3371 bp->tx_prod_bseq = 0;
3373 val = BNX2_L2CTX_TYPE_TYPE_L2;
3374 val |= BNX2_L2CTX_TYPE_SIZE_L2;
3375 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TYPE, val);
3377 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2;
3379 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_CMD_TYPE, val);
3381 val = (u64) bp->tx_desc_mapping >> 32;
3382 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_HI, val);
3384 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3385 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_LO, val);
3389 bnx2_init_rx_ring(struct bnx2 *bp)
3393 u16 prod, ring_prod;
3396 /* 8 for CRC and VLAN */
3397 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3398 /* 8 for alignment */
3399 bp->rx_buf_size = bp->rx_buf_use_size + 8;
3401 ring_prod = prod = bp->rx_prod = 0;
3404 bp->rx_prod_bseq = 0;
3406 for (i = 0; i < bp->rx_max_ring; i++) {
3409 rxbd = &bp->rx_desc_ring[i][0];
3410 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3411 rxbd->rx_bd_len = bp->rx_buf_use_size;
3412 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3414 if (i == (bp->rx_max_ring - 1))
3418 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3419 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3423 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3424 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3426 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3428 val = (u64) bp->rx_desc_mapping[0] >> 32;
3429 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3431 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3432 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3434 for (i = 0; i < bp->rx_ring_size; i++) {
3435 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3438 prod = NEXT_RX_BD(prod);
3439 ring_prod = RX_RING_IDX(prod);
3443 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3445 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3449 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3453 bp->rx_ring_size = size;
3455 while (size > MAX_RX_DESC_CNT) {
3456 size -= MAX_RX_DESC_CNT;
3459 /* round to next power of 2 */
3461 while ((max & num_rings) == 0)
3464 if (num_rings != max)
3467 bp->rx_max_ring = max;
3468 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3472 bnx2_free_tx_skbs(struct bnx2 *bp)
3476 if (bp->tx_buf_ring == NULL)
3479 for (i = 0; i < TX_DESC_CNT; ) {
3480 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3481 struct sk_buff *skb = tx_buf->skb;
3489 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3490 skb_headlen(skb), PCI_DMA_TODEVICE);
3494 last = skb_shinfo(skb)->nr_frags;
3495 for (j = 0; j < last; j++) {
3496 tx_buf = &bp->tx_buf_ring[i + j + 1];
3497 pci_unmap_page(bp->pdev,
3498 pci_unmap_addr(tx_buf, mapping),
3499 skb_shinfo(skb)->frags[j].size,
3502 dev_kfree_skb_any(skb);
3509 bnx2_free_rx_skbs(struct bnx2 *bp)
3513 if (bp->rx_buf_ring == NULL)
3516 for (i = 0; i < bp->rx_max_ring_idx; i++) {
3517 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3518 struct sk_buff *skb = rx_buf->skb;
3523 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3524 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3528 dev_kfree_skb_any(skb);
3533 bnx2_free_skbs(struct bnx2 *bp)
3535 bnx2_free_tx_skbs(bp);
3536 bnx2_free_rx_skbs(bp);
3540 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3544 rc = bnx2_reset_chip(bp, reset_code);
3550 bnx2_init_tx_ring(bp);
3551 bnx2_init_rx_ring(bp);
3556 bnx2_init_nic(struct bnx2 *bp)
3560 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3569 bnx2_test_registers(struct bnx2 *bp)
3573 static const struct {
3579 { 0x006c, 0, 0x00000000, 0x0000003f },
3580 { 0x0090, 0, 0xffffffff, 0x00000000 },
3581 { 0x0094, 0, 0x00000000, 0x00000000 },
3583 { 0x0404, 0, 0x00003f00, 0x00000000 },
3584 { 0x0418, 0, 0x00000000, 0xffffffff },
3585 { 0x041c, 0, 0x00000000, 0xffffffff },
3586 { 0x0420, 0, 0x00000000, 0x80ffffff },
3587 { 0x0424, 0, 0x00000000, 0x00000000 },
3588 { 0x0428, 0, 0x00000000, 0x00000001 },
3589 { 0x0450, 0, 0x00000000, 0x0000ffff },
3590 { 0x0454, 0, 0x00000000, 0xffffffff },
3591 { 0x0458, 0, 0x00000000, 0xffffffff },
3593 { 0x0808, 0, 0x00000000, 0xffffffff },
3594 { 0x0854, 0, 0x00000000, 0xffffffff },
3595 { 0x0868, 0, 0x00000000, 0x77777777 },
3596 { 0x086c, 0, 0x00000000, 0x77777777 },
3597 { 0x0870, 0, 0x00000000, 0x77777777 },
3598 { 0x0874, 0, 0x00000000, 0x77777777 },
3600 { 0x0c00, 0, 0x00000000, 0x00000001 },
3601 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3602 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
3604 { 0x1000, 0, 0x00000000, 0x00000001 },
3605 { 0x1004, 0, 0x00000000, 0x000f0001 },
3607 { 0x1408, 0, 0x01c00800, 0x00000000 },
3608 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3609 { 0x14a8, 0, 0x00000000, 0x000001ff },
3610 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3611 { 0x14b0, 0, 0x00000002, 0x00000001 },
3612 { 0x14b8, 0, 0x00000000, 0x00000000 },
3613 { 0x14c0, 0, 0x00000000, 0x00000009 },
3614 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3615 { 0x14cc, 0, 0x00000000, 0x00000001 },
3616 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3618 { 0x1800, 0, 0x00000000, 0x00000001 },
3619 { 0x1804, 0, 0x00000000, 0x00000003 },
3621 { 0x2800, 0, 0x00000000, 0x00000001 },
3622 { 0x2804, 0, 0x00000000, 0x00003f01 },
3623 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3624 { 0x2810, 0, 0xffff0000, 0x00000000 },
3625 { 0x2814, 0, 0xffff0000, 0x00000000 },
3626 { 0x2818, 0, 0xffff0000, 0x00000000 },
3627 { 0x281c, 0, 0xffff0000, 0x00000000 },
3628 { 0x2834, 0, 0xffffffff, 0x00000000 },
3629 { 0x2840, 0, 0x00000000, 0xffffffff },
3630 { 0x2844, 0, 0x00000000, 0xffffffff },
3631 { 0x2848, 0, 0xffffffff, 0x00000000 },
3632 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3634 { 0x2c00, 0, 0x00000000, 0x00000011 },
3635 { 0x2c04, 0, 0x00000000, 0x00030007 },
3637 { 0x3c00, 0, 0x00000000, 0x00000001 },
3638 { 0x3c04, 0, 0x00000000, 0x00070000 },
3639 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3640 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3641 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3642 { 0x3c14, 0, 0x00000000, 0xffffffff },
3643 { 0x3c18, 0, 0x00000000, 0xffffffff },
3644 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3645 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3647 { 0x5004, 0, 0x00000000, 0x0000007f },
3648 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3649 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3651 { 0x5c00, 0, 0x00000000, 0x00000001 },
3652 { 0x5c04, 0, 0x00000000, 0x0003000f },
3653 { 0x5c08, 0, 0x00000003, 0x00000000 },
3654 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3655 { 0x5c10, 0, 0x00000000, 0xffffffff },
3656 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3657 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3658 { 0x5c88, 0, 0x00000000, 0x00077373 },
3659 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3661 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3662 { 0x680c, 0, 0xffffffff, 0x00000000 },
3663 { 0x6810, 0, 0xffffffff, 0x00000000 },
3664 { 0x6814, 0, 0xffffffff, 0x00000000 },
3665 { 0x6818, 0, 0xffffffff, 0x00000000 },
3666 { 0x681c, 0, 0xffffffff, 0x00000000 },
3667 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3668 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3669 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3670 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3671 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3672 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3673 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3674 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3675 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3676 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3677 { 0x684c, 0, 0xffffffff, 0x00000000 },
3678 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3679 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3680 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3681 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3682 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3683 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3685 { 0xffff, 0, 0x00000000, 0x00000000 },
3689 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3690 u32 offset, rw_mask, ro_mask, save_val, val;
3692 offset = (u32) reg_tbl[i].offset;
3693 rw_mask = reg_tbl[i].rw_mask;
3694 ro_mask = reg_tbl[i].ro_mask;
3696 save_val = readl(bp->regview + offset);
3698 writel(0, bp->regview + offset);
3700 val = readl(bp->regview + offset);
3701 if ((val & rw_mask) != 0) {
3705 if ((val & ro_mask) != (save_val & ro_mask)) {
3709 writel(0xffffffff, bp->regview + offset);
3711 val = readl(bp->regview + offset);
3712 if ((val & rw_mask) != rw_mask) {
3716 if ((val & ro_mask) != (save_val & ro_mask)) {
3720 writel(save_val, bp->regview + offset);
3724 writel(save_val, bp->regview + offset);
3732 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3734 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
3735 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3738 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3741 for (offset = 0; offset < size; offset += 4) {
3743 REG_WR_IND(bp, start + offset, test_pattern[i]);
3745 if (REG_RD_IND(bp, start + offset) !=
3755 bnx2_test_memory(struct bnx2 *bp)
3759 static const struct {
3763 { 0x60000, 0x4000 },
3764 { 0xa0000, 0x3000 },
3765 { 0xe0000, 0x4000 },
3766 { 0x120000, 0x4000 },
3767 { 0x1a0000, 0x4000 },
3768 { 0x160000, 0x4000 },
3772 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3773 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3774 mem_tbl[i].len)) != 0) {
3782 #define BNX2_MAC_LOOPBACK 0
3783 #define BNX2_PHY_LOOPBACK 1
3786 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
3788 unsigned int pkt_size, num_pkts, i;
3789 struct sk_buff *skb, *rx_skb;
3790 unsigned char *packet;
3791 u16 rx_start_idx, rx_idx;
3794 struct sw_bd *rx_buf;
3795 struct l2_fhdr *rx_hdr;
3798 if (loopback_mode == BNX2_MAC_LOOPBACK) {
3799 bp->loopback = MAC_LOOPBACK;
3800 bnx2_set_mac_loopback(bp);
3802 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
3804 bnx2_set_phy_loopback(bp);
3810 skb = dev_alloc_skb(pkt_size);
3813 packet = skb_put(skb, pkt_size);
3814 memcpy(packet, bp->mac_addr, 6);
3815 memset(packet + 6, 0x0, 8);
3816 for (i = 14; i < pkt_size; i++)
3817 packet[i] = (unsigned char) (i & 0xff);
3819 map = pci_map_single(bp->pdev, skb->data, pkt_size,
3822 REG_WR(bp, BNX2_HC_COMMAND,
3823 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3825 REG_RD(bp, BNX2_HC_COMMAND);
3828 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
3832 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
3834 txbd->tx_bd_haddr_hi = (u64) map >> 32;
3835 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
3836 txbd->tx_bd_mss_nbytes = pkt_size;
3837 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
3840 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
3841 bp->tx_prod_bseq += pkt_size;
3843 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, bp->tx_prod);
3844 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
3848 REG_WR(bp, BNX2_HC_COMMAND,
3849 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3851 REG_RD(bp, BNX2_HC_COMMAND);
3855 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
3856 dev_kfree_skb_irq(skb);
3858 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
3859 goto loopback_test_done;
3862 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
3863 if (rx_idx != rx_start_idx + num_pkts) {
3864 goto loopback_test_done;
3867 rx_buf = &bp->rx_buf_ring[rx_start_idx];
3868 rx_skb = rx_buf->skb;
3870 rx_hdr = (struct l2_fhdr *) rx_skb->data;
3871 skb_reserve(rx_skb, bp->rx_offset);
3873 pci_dma_sync_single_for_cpu(bp->pdev,
3874 pci_unmap_addr(rx_buf, mapping),
3875 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
3877 if (rx_hdr->l2_fhdr_status &
3878 (L2_FHDR_ERRORS_BAD_CRC |
3879 L2_FHDR_ERRORS_PHY_DECODE |
3880 L2_FHDR_ERRORS_ALIGNMENT |
3881 L2_FHDR_ERRORS_TOO_SHORT |
3882 L2_FHDR_ERRORS_GIANT_FRAME)) {
3884 goto loopback_test_done;
3887 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
3888 goto loopback_test_done;
3891 for (i = 14; i < pkt_size; i++) {
3892 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
3893 goto loopback_test_done;
3904 #define BNX2_MAC_LOOPBACK_FAILED 1
3905 #define BNX2_PHY_LOOPBACK_FAILED 2
3906 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
3907 BNX2_PHY_LOOPBACK_FAILED)
3910 bnx2_test_loopback(struct bnx2 *bp)
3914 if (!netif_running(bp->dev))
3915 return BNX2_LOOPBACK_FAILED;
3917 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
3918 spin_lock_bh(&bp->phy_lock);
3920 spin_unlock_bh(&bp->phy_lock);
3921 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
3922 rc |= BNX2_MAC_LOOPBACK_FAILED;
3923 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
3924 rc |= BNX2_PHY_LOOPBACK_FAILED;
3928 #define NVRAM_SIZE 0x200
3929 #define CRC32_RESIDUAL 0xdebb20e3
3932 bnx2_test_nvram(struct bnx2 *bp)
3934 u32 buf[NVRAM_SIZE / 4];
3935 u8 *data = (u8 *) buf;
3939 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
3940 goto test_nvram_done;
3942 magic = be32_to_cpu(buf[0]);
3943 if (magic != 0x669955aa) {
3945 goto test_nvram_done;
3948 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
3949 goto test_nvram_done;
3951 csum = ether_crc_le(0x100, data);
3952 if (csum != CRC32_RESIDUAL) {
3954 goto test_nvram_done;
3957 csum = ether_crc_le(0x100, data + 0x100);
3958 if (csum != CRC32_RESIDUAL) {
3967 bnx2_test_link(struct bnx2 *bp)
3971 spin_lock_bh(&bp->phy_lock);
3972 bnx2_read_phy(bp, MII_BMSR, &bmsr);
3973 bnx2_read_phy(bp, MII_BMSR, &bmsr);
3974 spin_unlock_bh(&bp->phy_lock);
3976 if (bmsr & BMSR_LSTATUS) {
3983 bnx2_test_intr(struct bnx2 *bp)
3988 if (!netif_running(bp->dev))
3991 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
3993 /* This register is not touched during run-time. */
3994 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
3995 REG_RD(bp, BNX2_HC_COMMAND);
3997 for (i = 0; i < 10; i++) {
3998 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4004 msleep_interruptible(10);
4013 bnx2_timer(unsigned long data)
4015 struct bnx2 *bp = (struct bnx2 *) data;
4018 if (!netif_running(bp->dev))
4021 if (atomic_read(&bp->intr_sem) != 0)
4022 goto bnx2_restart_timer;
4024 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4025 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4027 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
4028 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
4030 spin_lock(&bp->phy_lock);
4031 if (bp->serdes_an_pending) {
4032 bp->serdes_an_pending--;
4034 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4037 bp->current_interval = bp->timer_interval;
4039 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4041 if (bmcr & BMCR_ANENABLE) {
4044 bnx2_write_phy(bp, 0x1c, 0x7c00);
4045 bnx2_read_phy(bp, 0x1c, &phy1);
4047 bnx2_write_phy(bp, 0x17, 0x0f01);
4048 bnx2_read_phy(bp, 0x15, &phy2);
4049 bnx2_write_phy(bp, 0x17, 0x0f01);
4050 bnx2_read_phy(bp, 0x15, &phy2);
4052 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4053 !(phy2 & 0x20)) { /* no CONFIG */
4055 bmcr &= ~BMCR_ANENABLE;
4056 bmcr |= BMCR_SPEED1000 |
4058 bnx2_write_phy(bp, MII_BMCR, bmcr);
4060 PHY_PARALLEL_DETECT_FLAG;
4064 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4065 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4068 bnx2_write_phy(bp, 0x17, 0x0f01);
4069 bnx2_read_phy(bp, 0x15, &phy2);
4073 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4074 bmcr |= BMCR_ANENABLE;
4075 bnx2_write_phy(bp, MII_BMCR, bmcr);
4077 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4082 bp->current_interval = bp->timer_interval;
4084 spin_unlock(&bp->phy_lock);
4088 mod_timer(&bp->timer, jiffies + bp->current_interval);
4091 /* Called with rtnl_lock */
4093 bnx2_open(struct net_device *dev)
4095 struct bnx2 *bp = netdev_priv(dev);
4098 bnx2_set_power_state(bp, PCI_D0);
4099 bnx2_disable_int(bp);
4101 rc = bnx2_alloc_mem(bp);
4105 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4106 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4109 if (pci_enable_msi(bp->pdev) == 0) {
4110 bp->flags |= USING_MSI_FLAG;
4111 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4115 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4116 SA_SHIRQ, dev->name, dev);
4120 rc = request_irq(bp->pdev->irq, bnx2_interrupt, SA_SHIRQ,
4128 rc = bnx2_init_nic(bp);
4131 free_irq(bp->pdev->irq, dev);
4132 if (bp->flags & USING_MSI_FLAG) {
4133 pci_disable_msi(bp->pdev);
4134 bp->flags &= ~USING_MSI_FLAG;
4141 mod_timer(&bp->timer, jiffies + bp->current_interval);
4143 atomic_set(&bp->intr_sem, 0);
4145 bnx2_enable_int(bp);
4147 if (bp->flags & USING_MSI_FLAG) {
4148 /* Test MSI to make sure it is working
4149 * If MSI test fails, go back to INTx mode
4151 if (bnx2_test_intr(bp) != 0) {
4152 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4153 " using MSI, switching to INTx mode. Please"
4154 " report this failure to the PCI maintainer"
4155 " and include system chipset information.\n",
4158 bnx2_disable_int(bp);
4159 free_irq(bp->pdev->irq, dev);
4160 pci_disable_msi(bp->pdev);
4161 bp->flags &= ~USING_MSI_FLAG;
4163 rc = bnx2_init_nic(bp);
4166 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4167 SA_SHIRQ, dev->name, dev);
4172 del_timer_sync(&bp->timer);
4175 bnx2_enable_int(bp);
4178 if (bp->flags & USING_MSI_FLAG) {
4179 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4182 netif_start_queue(dev);
4188 bnx2_reset_task(void *data)
4190 struct bnx2 *bp = data;
4192 if (!netif_running(bp->dev))
4195 bp->in_reset_task = 1;
4196 bnx2_netif_stop(bp);
4200 atomic_set(&bp->intr_sem, 1);
4201 bnx2_netif_start(bp);
4202 bp->in_reset_task = 0;
4206 bnx2_tx_timeout(struct net_device *dev)
4208 struct bnx2 *bp = netdev_priv(dev);
4210 /* This allows the netif to be shutdown gracefully before resetting */
4211 schedule_work(&bp->reset_task);
4215 /* Called with rtnl_lock */
4217 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4219 struct bnx2 *bp = netdev_priv(dev);
4221 bnx2_netif_stop(bp);
4224 bnx2_set_rx_mode(dev);
4226 bnx2_netif_start(bp);
4229 /* Called with rtnl_lock */
4231 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4233 struct bnx2 *bp = netdev_priv(dev);
4235 bnx2_netif_stop(bp);
4238 bp->vlgrp->vlan_devices[vid] = NULL;
4239 bnx2_set_rx_mode(dev);
4241 bnx2_netif_start(bp);
4245 /* Called with dev->xmit_lock.
4246 * hard_start_xmit is pseudo-lockless - a lock is only required when
4247 * the tx queue is full. This way, we get the benefit of lockless
4248 * operations most of the time without the complexities to handle
4249 * netif_stop_queue/wake_queue race conditions.
4252 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4254 struct bnx2 *bp = netdev_priv(dev);
4257 struct sw_bd *tx_buf;
4258 u32 len, vlan_tag_flags, last_frag, mss;
4259 u16 prod, ring_prod;
4262 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4263 netif_stop_queue(dev);
4264 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4267 return NETDEV_TX_BUSY;
4269 len = skb_headlen(skb);
4271 ring_prod = TX_RING_IDX(prod);
4274 if (skb->ip_summed == CHECKSUM_HW) {
4275 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4278 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4280 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4283 if ((mss = skb_shinfo(skb)->tso_size) &&
4284 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4285 u32 tcp_opt_len, ip_tcp_len;
4287 if (skb_header_cloned(skb) &&
4288 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4290 return NETDEV_TX_OK;
4293 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4294 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4297 if (skb->h.th->doff > 5) {
4298 tcp_opt_len = (skb->h.th->doff - 5) << 2;
4300 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4302 skb->nh.iph->check = 0;
4303 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
4305 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4309 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4310 vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4311 (tcp_opt_len >> 2)) << 8;
4320 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4322 tx_buf = &bp->tx_buf_ring[ring_prod];
4324 pci_unmap_addr_set(tx_buf, mapping, mapping);
4326 txbd = &bp->tx_desc_ring[ring_prod];
4328 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4329 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4330 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4331 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4333 last_frag = skb_shinfo(skb)->nr_frags;
4335 for (i = 0; i < last_frag; i++) {
4336 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4338 prod = NEXT_TX_BD(prod);
4339 ring_prod = TX_RING_IDX(prod);
4340 txbd = &bp->tx_desc_ring[ring_prod];
4343 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4344 len, PCI_DMA_TODEVICE);
4345 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4348 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4349 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4350 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4351 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4354 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4356 prod = NEXT_TX_BD(prod);
4357 bp->tx_prod_bseq += skb->len;
4359 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
4360 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4365 dev->trans_start = jiffies;
4367 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4368 spin_lock(&bp->tx_lock);
4369 netif_stop_queue(dev);
4371 if (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)
4372 netif_wake_queue(dev);
4373 spin_unlock(&bp->tx_lock);
4376 return NETDEV_TX_OK;
4379 /* Called with rtnl_lock */
4381 bnx2_close(struct net_device *dev)
4383 struct bnx2 *bp = netdev_priv(dev);
4386 /* Calling flush_scheduled_work() may deadlock because
4387 * linkwatch_event() may be on the workqueue and it will try to get
4388 * the rtnl_lock which we are holding.
4390 while (bp->in_reset_task)
4393 bnx2_netif_stop(bp);
4394 del_timer_sync(&bp->timer);
4395 if (bp->flags & NO_WOL_FLAG)
4396 reset_code = BNX2_DRV_MSG_CODE_UNLOAD;
4398 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4400 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4401 bnx2_reset_chip(bp, reset_code);
4402 free_irq(bp->pdev->irq, dev);
4403 if (bp->flags & USING_MSI_FLAG) {
4404 pci_disable_msi(bp->pdev);
4405 bp->flags &= ~USING_MSI_FLAG;
4410 netif_carrier_off(bp->dev);
4411 bnx2_set_power_state(bp, PCI_D3hot);
4415 #define GET_NET_STATS64(ctr) \
4416 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4417 (unsigned long) (ctr##_lo)
4419 #define GET_NET_STATS32(ctr) \
4422 #if (BITS_PER_LONG == 64)
4423 #define GET_NET_STATS GET_NET_STATS64
4425 #define GET_NET_STATS GET_NET_STATS32
4428 static struct net_device_stats *
4429 bnx2_get_stats(struct net_device *dev)
4431 struct bnx2 *bp = netdev_priv(dev);
4432 struct statistics_block *stats_blk = bp->stats_blk;
4433 struct net_device_stats *net_stats = &bp->net_stats;
4435 if (bp->stats_blk == NULL) {
4438 net_stats->rx_packets =
4439 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4440 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4441 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4443 net_stats->tx_packets =
4444 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4445 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4446 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4448 net_stats->rx_bytes =
4449 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4451 net_stats->tx_bytes =
4452 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4454 net_stats->multicast =
4455 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4457 net_stats->collisions =
4458 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4460 net_stats->rx_length_errors =
4461 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4462 stats_blk->stat_EtherStatsOverrsizePkts);
4464 net_stats->rx_over_errors =
4465 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4467 net_stats->rx_frame_errors =
4468 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4470 net_stats->rx_crc_errors =
4471 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4473 net_stats->rx_errors = net_stats->rx_length_errors +
4474 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4475 net_stats->rx_crc_errors;
4477 net_stats->tx_aborted_errors =
4478 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4479 stats_blk->stat_Dot3StatsLateCollisions);
4481 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4482 (CHIP_ID(bp) == CHIP_ID_5708_A0))
4483 net_stats->tx_carrier_errors = 0;
4485 net_stats->tx_carrier_errors =
4487 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4490 net_stats->tx_errors =
4492 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4494 net_stats->tx_aborted_errors +
4495 net_stats->tx_carrier_errors;
4500 /* All ethtool functions called with rtnl_lock */
4503 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4505 struct bnx2 *bp = netdev_priv(dev);
4507 cmd->supported = SUPPORTED_Autoneg;
4508 if (bp->phy_flags & PHY_SERDES_FLAG) {
4509 cmd->supported |= SUPPORTED_1000baseT_Full |
4512 cmd->port = PORT_FIBRE;
4515 cmd->supported |= SUPPORTED_10baseT_Half |
4516 SUPPORTED_10baseT_Full |
4517 SUPPORTED_100baseT_Half |
4518 SUPPORTED_100baseT_Full |
4519 SUPPORTED_1000baseT_Full |
4522 cmd->port = PORT_TP;
4525 cmd->advertising = bp->advertising;
4527 if (bp->autoneg & AUTONEG_SPEED) {
4528 cmd->autoneg = AUTONEG_ENABLE;
4531 cmd->autoneg = AUTONEG_DISABLE;
4534 if (netif_carrier_ok(dev)) {
4535 cmd->speed = bp->line_speed;
4536 cmd->duplex = bp->duplex;
4543 cmd->transceiver = XCVR_INTERNAL;
4544 cmd->phy_address = bp->phy_addr;
4550 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4552 struct bnx2 *bp = netdev_priv(dev);
4553 u8 autoneg = bp->autoneg;
4554 u8 req_duplex = bp->req_duplex;
4555 u16 req_line_speed = bp->req_line_speed;
4556 u32 advertising = bp->advertising;
4558 if (cmd->autoneg == AUTONEG_ENABLE) {
4559 autoneg |= AUTONEG_SPEED;
4561 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
4563 /* allow advertising 1 speed */
4564 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4565 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4566 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4567 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4569 if (bp->phy_flags & PHY_SERDES_FLAG)
4572 advertising = cmd->advertising;
4575 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4576 advertising = cmd->advertising;
4578 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4582 if (bp->phy_flags & PHY_SERDES_FLAG) {
4583 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4586 advertising = ETHTOOL_ALL_COPPER_SPEED;
4589 advertising |= ADVERTISED_Autoneg;
4592 if (bp->phy_flags & PHY_SERDES_FLAG) {
4593 if ((cmd->speed != SPEED_1000) ||
4594 (cmd->duplex != DUPLEX_FULL)) {
4598 else if (cmd->speed == SPEED_1000) {
4601 autoneg &= ~AUTONEG_SPEED;
4602 req_line_speed = cmd->speed;
4603 req_duplex = cmd->duplex;
4607 bp->autoneg = autoneg;
4608 bp->advertising = advertising;
4609 bp->req_line_speed = req_line_speed;
4610 bp->req_duplex = req_duplex;
4612 spin_lock_bh(&bp->phy_lock);
4616 spin_unlock_bh(&bp->phy_lock);
4622 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4624 struct bnx2 *bp = netdev_priv(dev);
4626 strcpy(info->driver, DRV_MODULE_NAME);
4627 strcpy(info->version, DRV_MODULE_VERSION);
4628 strcpy(info->bus_info, pci_name(bp->pdev));
4629 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4630 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4631 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
4632 info->fw_version[1] = info->fw_version[3] = '.';
4633 info->fw_version[5] = 0;
4636 #define BNX2_REGDUMP_LEN (32 * 1024)
4639 bnx2_get_regs_len(struct net_device *dev)
4641 return BNX2_REGDUMP_LEN;
4645 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4647 u32 *p = _p, i, offset;
4649 struct bnx2 *bp = netdev_priv(dev);
4650 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4651 0x0800, 0x0880, 0x0c00, 0x0c10,
4652 0x0c30, 0x0d08, 0x1000, 0x101c,
4653 0x1040, 0x1048, 0x1080, 0x10a4,
4654 0x1400, 0x1490, 0x1498, 0x14f0,
4655 0x1500, 0x155c, 0x1580, 0x15dc,
4656 0x1600, 0x1658, 0x1680, 0x16d8,
4657 0x1800, 0x1820, 0x1840, 0x1854,
4658 0x1880, 0x1894, 0x1900, 0x1984,
4659 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4660 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4661 0x2000, 0x2030, 0x23c0, 0x2400,
4662 0x2800, 0x2820, 0x2830, 0x2850,
4663 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4664 0x3c00, 0x3c94, 0x4000, 0x4010,
4665 0x4080, 0x4090, 0x43c0, 0x4458,
4666 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4667 0x4fc0, 0x5010, 0x53c0, 0x5444,
4668 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4669 0x5fc0, 0x6000, 0x6400, 0x6428,
4670 0x6800, 0x6848, 0x684c, 0x6860,
4671 0x6888, 0x6910, 0x8000 };
4675 memset(p, 0, BNX2_REGDUMP_LEN);
4677 if (!netif_running(bp->dev))
4681 offset = reg_boundaries[0];
4683 while (offset < BNX2_REGDUMP_LEN) {
4684 *p++ = REG_RD(bp, offset);
4686 if (offset == reg_boundaries[i + 1]) {
4687 offset = reg_boundaries[i + 2];
4688 p = (u32 *) (orig_p + offset);
4695 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4697 struct bnx2 *bp = netdev_priv(dev);
4699 if (bp->flags & NO_WOL_FLAG) {
4704 wol->supported = WAKE_MAGIC;
4706 wol->wolopts = WAKE_MAGIC;
4710 memset(&wol->sopass, 0, sizeof(wol->sopass));
4714 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4716 struct bnx2 *bp = netdev_priv(dev);
4718 if (wol->wolopts & ~WAKE_MAGIC)
4721 if (wol->wolopts & WAKE_MAGIC) {
4722 if (bp->flags & NO_WOL_FLAG)
4734 bnx2_nway_reset(struct net_device *dev)
4736 struct bnx2 *bp = netdev_priv(dev);
4739 if (!(bp->autoneg & AUTONEG_SPEED)) {
4743 spin_lock_bh(&bp->phy_lock);
4745 /* Force a link down visible on the other side */
4746 if (bp->phy_flags & PHY_SERDES_FLAG) {
4747 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
4748 spin_unlock_bh(&bp->phy_lock);
4752 spin_lock_bh(&bp->phy_lock);
4753 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
4754 bp->current_interval = SERDES_AN_TIMEOUT;
4755 bp->serdes_an_pending = 1;
4756 mod_timer(&bp->timer, jiffies + bp->current_interval);
4760 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4761 bmcr &= ~BMCR_LOOPBACK;
4762 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4764 spin_unlock_bh(&bp->phy_lock);
4770 bnx2_get_eeprom_len(struct net_device *dev)
4772 struct bnx2 *bp = netdev_priv(dev);
4774 if (bp->flash_info == NULL)
4777 return (int) bp->flash_size;
4781 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4784 struct bnx2 *bp = netdev_priv(dev);
4787 /* parameters already validated in ethtool_get_eeprom */
4789 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
4795 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4798 struct bnx2 *bp = netdev_priv(dev);
4801 /* parameters already validated in ethtool_set_eeprom */
4803 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
4809 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4811 struct bnx2 *bp = netdev_priv(dev);
4813 memset(coal, 0, sizeof(struct ethtool_coalesce));
4815 coal->rx_coalesce_usecs = bp->rx_ticks;
4816 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
4817 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
4818 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
4820 coal->tx_coalesce_usecs = bp->tx_ticks;
4821 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
4822 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
4823 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
4825 coal->stats_block_coalesce_usecs = bp->stats_ticks;
4831 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4833 struct bnx2 *bp = netdev_priv(dev);
4835 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
4836 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
4838 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
4839 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
4841 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
4842 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
4844 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
4845 if (bp->rx_quick_cons_trip_int > 0xff)
4846 bp->rx_quick_cons_trip_int = 0xff;
4848 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
4849 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
4851 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
4852 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
4854 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
4855 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
4857 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
4858 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
4861 bp->stats_ticks = coal->stats_block_coalesce_usecs;
4862 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
4863 bp->stats_ticks &= 0xffff00;
4865 if (netif_running(bp->dev)) {
4866 bnx2_netif_stop(bp);
4868 bnx2_netif_start(bp);
4875 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
4877 struct bnx2 *bp = netdev_priv(dev);
4879 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
4880 ering->rx_mini_max_pending = 0;
4881 ering->rx_jumbo_max_pending = 0;
4883 ering->rx_pending = bp->rx_ring_size;
4884 ering->rx_mini_pending = 0;
4885 ering->rx_jumbo_pending = 0;
4887 ering->tx_max_pending = MAX_TX_DESC_CNT;
4888 ering->tx_pending = bp->tx_ring_size;
4892 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
4894 struct bnx2 *bp = netdev_priv(dev);
4896 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
4897 (ering->tx_pending > MAX_TX_DESC_CNT) ||
4898 (ering->tx_pending <= MAX_SKB_FRAGS)) {
4902 if (netif_running(bp->dev)) {
4903 bnx2_netif_stop(bp);
4904 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
4909 bnx2_set_rx_ring_size(bp, ering->rx_pending);
4910 bp->tx_ring_size = ering->tx_pending;
4912 if (netif_running(bp->dev)) {
4915 rc = bnx2_alloc_mem(bp);
4919 bnx2_netif_start(bp);
4926 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
4928 struct bnx2 *bp = netdev_priv(dev);
4930 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
4931 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
4932 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
4936 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
4938 struct bnx2 *bp = netdev_priv(dev);
4940 bp->req_flow_ctrl = 0;
4941 if (epause->rx_pause)
4942 bp->req_flow_ctrl |= FLOW_CTRL_RX;
4943 if (epause->tx_pause)
4944 bp->req_flow_ctrl |= FLOW_CTRL_TX;
4946 if (epause->autoneg) {
4947 bp->autoneg |= AUTONEG_FLOW_CTRL;
4950 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
4953 spin_lock_bh(&bp->phy_lock);
4957 spin_unlock_bh(&bp->phy_lock);
4963 bnx2_get_rx_csum(struct net_device *dev)
4965 struct bnx2 *bp = netdev_priv(dev);
4971 bnx2_set_rx_csum(struct net_device *dev, u32 data)
4973 struct bnx2 *bp = netdev_priv(dev);
4979 #define BNX2_NUM_STATS 45
4982 char string[ETH_GSTRING_LEN];
4983 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
4985 { "rx_error_bytes" },
4987 { "tx_error_bytes" },
4988 { "rx_ucast_packets" },
4989 { "rx_mcast_packets" },
4990 { "rx_bcast_packets" },
4991 { "tx_ucast_packets" },
4992 { "tx_mcast_packets" },
4993 { "tx_bcast_packets" },
4994 { "tx_mac_errors" },
4995 { "tx_carrier_errors" },
4996 { "rx_crc_errors" },
4997 { "rx_align_errors" },
4998 { "tx_single_collisions" },
4999 { "tx_multi_collisions" },
5001 { "tx_excess_collisions" },
5002 { "tx_late_collisions" },
5003 { "tx_total_collisions" },
5006 { "rx_undersize_packets" },
5007 { "rx_oversize_packets" },
5008 { "rx_64_byte_packets" },
5009 { "rx_65_to_127_byte_packets" },
5010 { "rx_128_to_255_byte_packets" },
5011 { "rx_256_to_511_byte_packets" },
5012 { "rx_512_to_1023_byte_packets" },
5013 { "rx_1024_to_1522_byte_packets" },
5014 { "rx_1523_to_9022_byte_packets" },
5015 { "tx_64_byte_packets" },
5016 { "tx_65_to_127_byte_packets" },
5017 { "tx_128_to_255_byte_packets" },
5018 { "tx_256_to_511_byte_packets" },
5019 { "tx_512_to_1023_byte_packets" },
5020 { "tx_1024_to_1522_byte_packets" },
5021 { "tx_1523_to_9022_byte_packets" },
5022 { "rx_xon_frames" },
5023 { "rx_xoff_frames" },
5024 { "tx_xon_frames" },
5025 { "tx_xoff_frames" },
5026 { "rx_mac_ctrl_frames" },
5027 { "rx_filtered_packets" },
5031 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5033 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5034 STATS_OFFSET32(stat_IfHCInOctets_hi),
5035 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5036 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5037 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5038 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5039 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5040 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5041 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5042 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5043 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5044 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5045 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5046 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5047 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5048 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5049 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5050 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5051 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5052 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5053 STATS_OFFSET32(stat_EtherStatsCollisions),
5054 STATS_OFFSET32(stat_EtherStatsFragments),
5055 STATS_OFFSET32(stat_EtherStatsJabbers),
5056 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5057 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5058 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5059 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5060 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5061 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5062 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5063 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5064 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5065 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5066 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5067 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5068 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5069 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5070 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5071 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5072 STATS_OFFSET32(stat_XonPauseFramesReceived),
5073 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5074 STATS_OFFSET32(stat_OutXonSent),
5075 STATS_OFFSET32(stat_OutXoffSent),
5076 STATS_OFFSET32(stat_MacControlFramesReceived),
5077 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5078 STATS_OFFSET32(stat_IfInMBUFDiscards),
5081 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5082 * skipped because of errata.
5084 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5085 8,0,8,8,8,8,8,8,8,8,
5086 4,0,4,4,4,4,4,4,4,4,
5087 4,4,4,4,4,4,4,4,4,4,
5088 4,4,4,4,4,4,4,4,4,4,
5092 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5093 8,0,8,8,8,8,8,8,8,8,
5094 4,4,4,4,4,4,4,4,4,4,
5095 4,4,4,4,4,4,4,4,4,4,
5096 4,4,4,4,4,4,4,4,4,4,
5100 #define BNX2_NUM_TESTS 6
5103 char string[ETH_GSTRING_LEN];
5104 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5105 { "register_test (offline)" },
5106 { "memory_test (offline)" },
5107 { "loopback_test (offline)" },
5108 { "nvram_test (online)" },
5109 { "interrupt_test (online)" },
5110 { "link_test (online)" },
5114 bnx2_self_test_count(struct net_device *dev)
5116 return BNX2_NUM_TESTS;
5120 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5122 struct bnx2 *bp = netdev_priv(dev);
5124 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5125 if (etest->flags & ETH_TEST_FL_OFFLINE) {
5126 bnx2_netif_stop(bp);
5127 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5130 if (bnx2_test_registers(bp) != 0) {
5132 etest->flags |= ETH_TEST_FL_FAILED;
5134 if (bnx2_test_memory(bp) != 0) {
5136 etest->flags |= ETH_TEST_FL_FAILED;
5138 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5139 etest->flags |= ETH_TEST_FL_FAILED;
5141 if (!netif_running(bp->dev)) {
5142 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5146 bnx2_netif_start(bp);
5149 /* wait for link up */
5150 msleep_interruptible(3000);
5151 if ((!bp->link_up) && !(bp->phy_flags & PHY_SERDES_FLAG))
5152 msleep_interruptible(4000);
5155 if (bnx2_test_nvram(bp) != 0) {
5157 etest->flags |= ETH_TEST_FL_FAILED;
5159 if (bnx2_test_intr(bp) != 0) {
5161 etest->flags |= ETH_TEST_FL_FAILED;
5164 if (bnx2_test_link(bp) != 0) {
5166 etest->flags |= ETH_TEST_FL_FAILED;
5172 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5174 switch (stringset) {
5176 memcpy(buf, bnx2_stats_str_arr,
5177 sizeof(bnx2_stats_str_arr));
5180 memcpy(buf, bnx2_tests_str_arr,
5181 sizeof(bnx2_tests_str_arr));
5187 bnx2_get_stats_count(struct net_device *dev)
5189 return BNX2_NUM_STATS;
5193 bnx2_get_ethtool_stats(struct net_device *dev,
5194 struct ethtool_stats *stats, u64 *buf)
5196 struct bnx2 *bp = netdev_priv(dev);
5198 u32 *hw_stats = (u32 *) bp->stats_blk;
5199 u8 *stats_len_arr = NULL;
5201 if (hw_stats == NULL) {
5202 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5206 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5207 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5208 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5209 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5210 stats_len_arr = bnx2_5706_stats_len_arr;
5212 stats_len_arr = bnx2_5708_stats_len_arr;
5214 for (i = 0; i < BNX2_NUM_STATS; i++) {
5215 if (stats_len_arr[i] == 0) {
5216 /* skip this counter */
5220 if (stats_len_arr[i] == 4) {
5221 /* 4-byte counter */
5223 *(hw_stats + bnx2_stats_offset_arr[i]);
5226 /* 8-byte counter */
5227 buf[i] = (((u64) *(hw_stats +
5228 bnx2_stats_offset_arr[i])) << 32) +
5229 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5234 bnx2_phys_id(struct net_device *dev, u32 data)
5236 struct bnx2 *bp = netdev_priv(dev);
5243 save = REG_RD(bp, BNX2_MISC_CFG);
5244 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5246 for (i = 0; i < (data * 2); i++) {
5248 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5251 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5252 BNX2_EMAC_LED_1000MB_OVERRIDE |
5253 BNX2_EMAC_LED_100MB_OVERRIDE |
5254 BNX2_EMAC_LED_10MB_OVERRIDE |
5255 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5256 BNX2_EMAC_LED_TRAFFIC);
5258 msleep_interruptible(500);
5259 if (signal_pending(current))
5262 REG_WR(bp, BNX2_EMAC_LED, 0);
5263 REG_WR(bp, BNX2_MISC_CFG, save);
5267 static struct ethtool_ops bnx2_ethtool_ops = {
5268 .get_settings = bnx2_get_settings,
5269 .set_settings = bnx2_set_settings,
5270 .get_drvinfo = bnx2_get_drvinfo,
5271 .get_regs_len = bnx2_get_regs_len,
5272 .get_regs = bnx2_get_regs,
5273 .get_wol = bnx2_get_wol,
5274 .set_wol = bnx2_set_wol,
5275 .nway_reset = bnx2_nway_reset,
5276 .get_link = ethtool_op_get_link,
5277 .get_eeprom_len = bnx2_get_eeprom_len,
5278 .get_eeprom = bnx2_get_eeprom,
5279 .set_eeprom = bnx2_set_eeprom,
5280 .get_coalesce = bnx2_get_coalesce,
5281 .set_coalesce = bnx2_set_coalesce,
5282 .get_ringparam = bnx2_get_ringparam,
5283 .set_ringparam = bnx2_set_ringparam,
5284 .get_pauseparam = bnx2_get_pauseparam,
5285 .set_pauseparam = bnx2_set_pauseparam,
5286 .get_rx_csum = bnx2_get_rx_csum,
5287 .set_rx_csum = bnx2_set_rx_csum,
5288 .get_tx_csum = ethtool_op_get_tx_csum,
5289 .set_tx_csum = ethtool_op_set_tx_csum,
5290 .get_sg = ethtool_op_get_sg,
5291 .set_sg = ethtool_op_set_sg,
5293 .get_tso = ethtool_op_get_tso,
5294 .set_tso = ethtool_op_set_tso,
5296 .self_test_count = bnx2_self_test_count,
5297 .self_test = bnx2_self_test,
5298 .get_strings = bnx2_get_strings,
5299 .phys_id = bnx2_phys_id,
5300 .get_stats_count = bnx2_get_stats_count,
5301 .get_ethtool_stats = bnx2_get_ethtool_stats,
5302 .get_perm_addr = ethtool_op_get_perm_addr,
5305 /* Called with rtnl_lock */
5307 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5309 struct mii_ioctl_data *data = if_mii(ifr);
5310 struct bnx2 *bp = netdev_priv(dev);
5315 data->phy_id = bp->phy_addr;
5321 spin_lock_bh(&bp->phy_lock);
5322 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5323 spin_unlock_bh(&bp->phy_lock);
5325 data->val_out = mii_regval;
5331 if (!capable(CAP_NET_ADMIN))
5334 spin_lock_bh(&bp->phy_lock);
5335 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5336 spin_unlock_bh(&bp->phy_lock);
5347 /* Called with rtnl_lock */
5349 bnx2_change_mac_addr(struct net_device *dev, void *p)
5351 struct sockaddr *addr = p;
5352 struct bnx2 *bp = netdev_priv(dev);
5354 if (!is_valid_ether_addr(addr->sa_data))
5357 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5358 if (netif_running(dev))
5359 bnx2_set_mac_addr(bp);
5364 /* Called with rtnl_lock */
5366 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5368 struct bnx2 *bp = netdev_priv(dev);
5370 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5371 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5375 if (netif_running(dev)) {
5376 bnx2_netif_stop(bp);
5380 bnx2_netif_start(bp);
5385 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5387 poll_bnx2(struct net_device *dev)
5389 struct bnx2 *bp = netdev_priv(dev);
5391 disable_irq(bp->pdev->irq);
5392 bnx2_interrupt(bp->pdev->irq, dev, NULL);
5393 enable_irq(bp->pdev->irq);
5397 static int __devinit
5398 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5401 unsigned long mem_len;
5405 SET_MODULE_OWNER(dev);
5406 SET_NETDEV_DEV(dev, &pdev->dev);
5407 bp = netdev_priv(dev);
5412 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5413 rc = pci_enable_device(pdev);
5415 printk(KERN_ERR PFX "Cannot enable PCI device, aborting.");
5419 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5420 printk(KERN_ERR PFX "Cannot find PCI device base address, "
5423 goto err_out_disable;
5426 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5428 printk(KERN_ERR PFX "Cannot obtain PCI resources, aborting.\n");
5429 goto err_out_disable;
5432 pci_set_master(pdev);
5434 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5435 if (bp->pm_cap == 0) {
5436 printk(KERN_ERR PFX "Cannot find power management capability, "
5439 goto err_out_release;
5442 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5443 if (bp->pcix_cap == 0) {
5444 printk(KERN_ERR PFX "Cannot find PCIX capability, aborting.\n");
5446 goto err_out_release;
5449 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5450 bp->flags |= USING_DAC_FLAG;
5451 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
5452 printk(KERN_ERR PFX "pci_set_consistent_dma_mask "
5453 "failed, aborting.\n");
5455 goto err_out_release;
5458 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
5459 printk(KERN_ERR PFX "System does not support DMA, aborting.\n");
5461 goto err_out_release;
5467 spin_lock_init(&bp->phy_lock);
5468 spin_lock_init(&bp->tx_lock);
5469 INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5471 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5472 mem_len = MB_GET_CID_ADDR(17);
5473 dev->mem_end = dev->mem_start + mem_len;
5474 dev->irq = pdev->irq;
5476 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5479 printk(KERN_ERR PFX "Cannot map register space, aborting.\n");
5481 goto err_out_release;
5484 /* Configure byte swap and enable write to the reg_window registers.
5485 * Rely on CPU to do target byte swapping on big endian systems
5486 * The chip's target access swapping will not swap all accesses
5488 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5489 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5490 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5492 bnx2_set_power_state(bp, PCI_D0);
5494 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5496 /* Get bus information. */
5497 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5498 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5501 bp->flags |= PCIX_FLAG;
5503 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5505 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5507 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5508 bp->bus_speed_mhz = 133;
5511 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5512 bp->bus_speed_mhz = 100;
5515 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5516 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5517 bp->bus_speed_mhz = 66;
5520 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5521 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5522 bp->bus_speed_mhz = 50;
5525 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5526 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5527 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5528 bp->bus_speed_mhz = 33;
5533 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5534 bp->bus_speed_mhz = 66;
5536 bp->bus_speed_mhz = 33;
5539 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5540 bp->flags |= PCI_32BIT_FLAG;
5542 /* 5706A0 may falsely detect SERR and PERR. */
5543 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5544 reg = REG_RD(bp, PCI_COMMAND);
5545 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5546 REG_WR(bp, PCI_COMMAND, reg);
5548 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5549 !(bp->flags & PCIX_FLAG)) {
5551 printk(KERN_ERR PFX "5706 A1 can only be used in a PCIX bus, "
5556 bnx2_init_nvram(bp);
5558 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5560 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5561 BNX2_SHM_HDR_SIGNATURE_SIG)
5562 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5564 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5566 /* Get the permanent MAC address. First we need to make sure the
5567 * firmware is actually running.
5569 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
5571 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5572 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5573 printk(KERN_ERR PFX "Firmware not running, aborting.\n");
5578 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
5580 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
5581 bp->mac_addr[0] = (u8) (reg >> 8);
5582 bp->mac_addr[1] = (u8) reg;
5584 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
5585 bp->mac_addr[2] = (u8) (reg >> 24);
5586 bp->mac_addr[3] = (u8) (reg >> 16);
5587 bp->mac_addr[4] = (u8) (reg >> 8);
5588 bp->mac_addr[5] = (u8) reg;
5590 bp->tx_ring_size = MAX_TX_DESC_CNT;
5591 bnx2_set_rx_ring_size(bp, 100);
5595 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5597 bp->tx_quick_cons_trip_int = 20;
5598 bp->tx_quick_cons_trip = 20;
5599 bp->tx_ticks_int = 80;
5602 bp->rx_quick_cons_trip_int = 6;
5603 bp->rx_quick_cons_trip = 6;
5604 bp->rx_ticks_int = 18;
5607 bp->stats_ticks = 1000000 & 0xffff00;
5609 bp->timer_interval = HZ;
5610 bp->current_interval = HZ;
5614 /* Disable WOL support if we are running on a SERDES chip. */
5615 if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
5616 bp->phy_flags |= PHY_SERDES_FLAG;
5617 bp->flags |= NO_WOL_FLAG;
5618 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5620 reg = REG_RD_IND(bp, bp->shmem_base +
5621 BNX2_SHARED_HW_CFG_CONFIG);
5622 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5623 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5627 if (CHIP_NUM(bp) == CHIP_NUM_5708)
5628 bp->flags |= NO_WOL_FLAG;
5630 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5631 bp->tx_quick_cons_trip_int =
5632 bp->tx_quick_cons_trip;
5633 bp->tx_ticks_int = bp->tx_ticks;
5634 bp->rx_quick_cons_trip_int =
5635 bp->rx_quick_cons_trip;
5636 bp->rx_ticks_int = bp->rx_ticks;
5637 bp->comp_prod_trip_int = bp->comp_prod_trip;
5638 bp->com_ticks_int = bp->com_ticks;
5639 bp->cmd_ticks_int = bp->cmd_ticks;
5642 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5643 bp->req_line_speed = 0;
5644 if (bp->phy_flags & PHY_SERDES_FLAG) {
5645 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
5647 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
5648 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5649 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5651 bp->req_line_speed = bp->line_speed = SPEED_1000;
5652 bp->req_duplex = DUPLEX_FULL;
5656 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5659 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5661 init_timer(&bp->timer);
5662 bp->timer.expires = RUN_AT(bp->timer_interval);
5663 bp->timer.data = (unsigned long) bp;
5664 bp->timer.function = bnx2_timer;
5670 iounmap(bp->regview);
5675 pci_release_regions(pdev);
5678 pci_disable_device(pdev);
5679 pci_set_drvdata(pdev, NULL);
5685 static int __devinit
5686 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5688 static int version_printed = 0;
5689 struct net_device *dev = NULL;
5693 if (version_printed++ == 0)
5694 printk(KERN_INFO "%s", version);
5696 /* dev zeroed in init_etherdev */
5697 dev = alloc_etherdev(sizeof(*bp));
5702 rc = bnx2_init_board(pdev, dev);
5708 dev->open = bnx2_open;
5709 dev->hard_start_xmit = bnx2_start_xmit;
5710 dev->stop = bnx2_close;
5711 dev->get_stats = bnx2_get_stats;
5712 dev->set_multicast_list = bnx2_set_rx_mode;
5713 dev->do_ioctl = bnx2_ioctl;
5714 dev->set_mac_address = bnx2_change_mac_addr;
5715 dev->change_mtu = bnx2_change_mtu;
5716 dev->tx_timeout = bnx2_tx_timeout;
5717 dev->watchdog_timeo = TX_TIMEOUT;
5719 dev->vlan_rx_register = bnx2_vlan_rx_register;
5720 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
5722 dev->poll = bnx2_poll;
5723 dev->ethtool_ops = &bnx2_ethtool_ops;
5726 bp = netdev_priv(dev);
5728 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5729 dev->poll_controller = poll_bnx2;
5732 if ((rc = register_netdev(dev))) {
5733 printk(KERN_ERR PFX "Cannot register net device\n");
5735 iounmap(bp->regview);
5736 pci_release_regions(pdev);
5737 pci_disable_device(pdev);
5738 pci_set_drvdata(pdev, NULL);
5743 pci_set_drvdata(pdev, dev);
5745 memcpy(dev->dev_addr, bp->mac_addr, 6);
5746 memcpy(dev->perm_addr, bp->mac_addr, 6);
5747 bp->name = board_info[ent->driver_data].name,
5748 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
5752 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
5753 ((CHIP_ID(bp) & 0x0ff0) >> 4),
5754 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
5755 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
5760 printk("node addr ");
5761 for (i = 0; i < 6; i++)
5762 printk("%2.2x", dev->dev_addr[i]);
5765 dev->features |= NETIF_F_SG;
5766 if (bp->flags & USING_DAC_FLAG)
5767 dev->features |= NETIF_F_HIGHDMA;
5768 dev->features |= NETIF_F_IP_CSUM;
5770 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5773 dev->features |= NETIF_F_TSO;
5776 netif_carrier_off(bp->dev);
5781 static void __devexit
5782 bnx2_remove_one(struct pci_dev *pdev)
5784 struct net_device *dev = pci_get_drvdata(pdev);
5785 struct bnx2 *bp = netdev_priv(dev);
5787 flush_scheduled_work();
5789 unregister_netdev(dev);
5792 iounmap(bp->regview);
5795 pci_release_regions(pdev);
5796 pci_disable_device(pdev);
5797 pci_set_drvdata(pdev, NULL);
5801 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
5803 struct net_device *dev = pci_get_drvdata(pdev);
5804 struct bnx2 *bp = netdev_priv(dev);
5807 if (!netif_running(dev))
5810 flush_scheduled_work();
5811 bnx2_netif_stop(bp);
5812 netif_device_detach(dev);
5813 del_timer_sync(&bp->timer);
5814 if (bp->flags & NO_WOL_FLAG)
5815 reset_code = BNX2_DRV_MSG_CODE_UNLOAD;
5817 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5819 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5820 bnx2_reset_chip(bp, reset_code);
5822 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
5827 bnx2_resume(struct pci_dev *pdev)
5829 struct net_device *dev = pci_get_drvdata(pdev);
5830 struct bnx2 *bp = netdev_priv(dev);
5832 if (!netif_running(dev))
5835 bnx2_set_power_state(bp, PCI_D0);
5836 netif_device_attach(dev);
5838 bnx2_netif_start(bp);
5842 static struct pci_driver bnx2_pci_driver = {
5843 .name = DRV_MODULE_NAME,
5844 .id_table = bnx2_pci_tbl,
5845 .probe = bnx2_init_one,
5846 .remove = __devexit_p(bnx2_remove_one),
5847 .suspend = bnx2_suspend,
5848 .resume = bnx2_resume,
5851 static int __init bnx2_init(void)
5853 return pci_module_init(&bnx2_pci_driver);
5856 static void __exit bnx2_cleanup(void)
5858 pci_unregister_driver(&bnx2_pci_driver);
5861 module_init(bnx2_init);
5862 module_exit(bnx2_cleanup);