1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
12 #include <linux/config.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
17 #include <linux/kernel.h>
18 #include <linux/timer.h>
19 #include <linux/errno.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/interrupt.h>
24 #include <linux/pci.h>
25 #include <linux/init.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/dma-mapping.h>
30 #include <asm/bitops.h>
33 #include <linux/delay.h>
34 #include <asm/byteorder.h>
36 #include <linux/time.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #ifdef NETIF_F_HW_VLAN_TX
40 #include <linux/if_vlan.h>
46 #include <net/checksum.h>
49 #include <linux/workqueue.h>
50 #include <linux/crc32.h>
51 #include <linux/prefetch.h>
52 #include <linux/cache.h>
53 #include <linux/zlib.h>
58 #define DRV_MODULE_NAME "bnx2"
59 #define PFX DRV_MODULE_NAME ": "
60 #define DRV_MODULE_VERSION "1.4.42"
61 #define DRV_MODULE_RELDATE "June 12, 2006"
63 #define RUN_AT(x) (jiffies + (x))
65 /* Time in jiffies before concluding the transmitter is hung. */
66 #define TX_TIMEOUT (5*HZ)
68 static const char version[] __devinitdata =
69 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
71 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
76 static int disable_msi = 0;
78 module_param(disable_msi, int, 0);
79 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
91 /* indexed by board_t, above */
94 } board_info[] __devinitdata = {
95 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
96 { "HP NC370T Multifunction Gigabit Server Adapter" },
97 { "HP NC370i Multifunction Gigabit Server Adapter" },
98 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
99 { "HP NC370F Multifunction Gigabit Server Adapter" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
101 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
104 static struct pci_device_id bnx2_pci_tbl[] = {
105 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
106 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
107 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
108 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
109 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
110 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
111 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
112 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
113 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
114 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122 static struct flash_spec flash_table[] =
125 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
126 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
127 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
129 /* Expansion entry 0001 */
130 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
131 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
132 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
134 /* Saifun SA25F010 (non-buffered flash) */
135 /* strap, cfg1, & write1 need updates */
136 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
137 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
138 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
139 "Non-buffered flash (128kB)"},
140 /* Saifun SA25F020 (non-buffered flash) */
141 /* strap, cfg1, & write1 need updates */
142 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
143 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
144 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
145 "Non-buffered flash (256kB)"},
146 /* Expansion entry 0100 */
147 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
148 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
149 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
151 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
152 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
153 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
154 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
155 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
156 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
157 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
158 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
159 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
160 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
161 /* Saifun SA25F005 (non-buffered flash) */
162 /* strap, cfg1, & write1 need updates */
163 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
164 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
165 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
166 "Non-buffered flash (64kB)"},
168 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
169 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
170 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
172 /* Expansion entry 1001 */
173 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
174 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
175 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
177 /* Expansion entry 1010 */
178 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
179 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
180 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
182 /* ATMEL AT45DB011B (buffered flash) */
183 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
184 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
185 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
186 "Buffered flash (128kB)"},
187 /* Expansion entry 1100 */
188 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
189 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
190 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
192 /* Expansion entry 1101 */
193 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
194 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
197 /* Ateml Expansion entry 1110 */
198 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
199 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
200 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
201 "Entry 1110 (Atmel)"},
202 /* ATMEL AT45DB021B (buffered flash) */
203 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
204 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
205 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
206 "Buffered flash (256kB)"},
209 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
211 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
213 u32 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
215 if (diff > MAX_TX_DESC_CNT)
216 diff = (diff & MAX_TX_DESC_CNT) - 1;
217 return (bp->tx_ring_size - diff);
221 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
223 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
224 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
228 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
230 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
231 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
235 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
238 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
239 REG_WR(bp, BNX2_CTX_DATA, val);
243 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
248 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
249 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
250 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
252 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
253 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
258 val1 = (bp->phy_addr << 21) | (reg << 16) |
259 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
260 BNX2_EMAC_MDIO_COMM_START_BUSY;
261 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
263 for (i = 0; i < 50; i++) {
266 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
267 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
270 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
271 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
277 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
286 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
287 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
288 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
290 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
291 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
300 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
305 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
306 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
307 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
309 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
310 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
315 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
316 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
317 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
318 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
320 for (i = 0; i < 50; i++) {
323 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
324 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
330 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
335 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
336 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
337 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
339 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
340 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
349 bnx2_disable_int(struct bnx2 *bp)
351 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
352 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
353 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
357 bnx2_enable_int(struct bnx2 *bp)
359 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
360 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
361 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
363 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
364 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
366 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
370 bnx2_disable_int_sync(struct bnx2 *bp)
372 atomic_inc(&bp->intr_sem);
373 bnx2_disable_int(bp);
374 synchronize_irq(bp->pdev->irq);
378 bnx2_netif_stop(struct bnx2 *bp)
380 bnx2_disable_int_sync(bp);
381 if (netif_running(bp->dev)) {
382 netif_poll_disable(bp->dev);
383 netif_tx_disable(bp->dev);
384 bp->dev->trans_start = jiffies; /* prevent tx timeout */
389 bnx2_netif_start(struct bnx2 *bp)
391 if (atomic_dec_and_test(&bp->intr_sem)) {
392 if (netif_running(bp->dev)) {
393 netif_wake_queue(bp->dev);
394 netif_poll_enable(bp->dev);
401 bnx2_free_mem(struct bnx2 *bp)
405 if (bp->status_blk) {
406 pci_free_consistent(bp->pdev, bp->status_stats_size,
407 bp->status_blk, bp->status_blk_mapping);
408 bp->status_blk = NULL;
409 bp->stats_blk = NULL;
411 if (bp->tx_desc_ring) {
412 pci_free_consistent(bp->pdev,
413 sizeof(struct tx_bd) * TX_DESC_CNT,
414 bp->tx_desc_ring, bp->tx_desc_mapping);
415 bp->tx_desc_ring = NULL;
417 kfree(bp->tx_buf_ring);
418 bp->tx_buf_ring = NULL;
419 for (i = 0; i < bp->rx_max_ring; i++) {
420 if (bp->rx_desc_ring[i])
421 pci_free_consistent(bp->pdev,
422 sizeof(struct rx_bd) * RX_DESC_CNT,
424 bp->rx_desc_mapping[i]);
425 bp->rx_desc_ring[i] = NULL;
427 vfree(bp->rx_buf_ring);
428 bp->rx_buf_ring = NULL;
432 bnx2_alloc_mem(struct bnx2 *bp)
434 int i, status_blk_size;
436 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
438 if (bp->tx_buf_ring == NULL)
441 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
442 sizeof(struct tx_bd) *
444 &bp->tx_desc_mapping);
445 if (bp->tx_desc_ring == NULL)
448 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
450 if (bp->rx_buf_ring == NULL)
453 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
456 for (i = 0; i < bp->rx_max_ring; i++) {
457 bp->rx_desc_ring[i] =
458 pci_alloc_consistent(bp->pdev,
459 sizeof(struct rx_bd) * RX_DESC_CNT,
460 &bp->rx_desc_mapping[i]);
461 if (bp->rx_desc_ring[i] == NULL)
466 /* Combine status and statistics blocks into one allocation. */
467 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
468 bp->status_stats_size = status_blk_size +
469 sizeof(struct statistics_block);
471 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
472 &bp->status_blk_mapping);
473 if (bp->status_blk == NULL)
476 memset(bp->status_blk, 0, bp->status_stats_size);
478 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
481 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
491 bnx2_report_fw_link(struct bnx2 *bp)
493 u32 fw_link_status = 0;
498 switch (bp->line_speed) {
500 if (bp->duplex == DUPLEX_HALF)
501 fw_link_status = BNX2_LINK_STATUS_10HALF;
503 fw_link_status = BNX2_LINK_STATUS_10FULL;
506 if (bp->duplex == DUPLEX_HALF)
507 fw_link_status = BNX2_LINK_STATUS_100HALF;
509 fw_link_status = BNX2_LINK_STATUS_100FULL;
512 if (bp->duplex == DUPLEX_HALF)
513 fw_link_status = BNX2_LINK_STATUS_1000HALF;
515 fw_link_status = BNX2_LINK_STATUS_1000FULL;
518 if (bp->duplex == DUPLEX_HALF)
519 fw_link_status = BNX2_LINK_STATUS_2500HALF;
521 fw_link_status = BNX2_LINK_STATUS_2500FULL;
525 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
528 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
530 bnx2_read_phy(bp, MII_BMSR, &bmsr);
531 bnx2_read_phy(bp, MII_BMSR, &bmsr);
533 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
534 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
535 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
537 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
541 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
543 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
547 bnx2_report_link(struct bnx2 *bp)
550 netif_carrier_on(bp->dev);
551 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
553 printk("%d Mbps ", bp->line_speed);
555 if (bp->duplex == DUPLEX_FULL)
556 printk("full duplex");
558 printk("half duplex");
561 if (bp->flow_ctrl & FLOW_CTRL_RX) {
562 printk(", receive ");
563 if (bp->flow_ctrl & FLOW_CTRL_TX)
564 printk("& transmit ");
567 printk(", transmit ");
569 printk("flow control ON");
574 netif_carrier_off(bp->dev);
575 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
578 bnx2_report_fw_link(bp);
582 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
584 u32 local_adv, remote_adv;
587 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
588 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
590 if (bp->duplex == DUPLEX_FULL) {
591 bp->flow_ctrl = bp->req_flow_ctrl;
596 if (bp->duplex != DUPLEX_FULL) {
600 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
601 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
604 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
605 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
606 bp->flow_ctrl |= FLOW_CTRL_TX;
607 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
608 bp->flow_ctrl |= FLOW_CTRL_RX;
612 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
613 bnx2_read_phy(bp, MII_LPA, &remote_adv);
615 if (bp->phy_flags & PHY_SERDES_FLAG) {
616 u32 new_local_adv = 0;
617 u32 new_remote_adv = 0;
619 if (local_adv & ADVERTISE_1000XPAUSE)
620 new_local_adv |= ADVERTISE_PAUSE_CAP;
621 if (local_adv & ADVERTISE_1000XPSE_ASYM)
622 new_local_adv |= ADVERTISE_PAUSE_ASYM;
623 if (remote_adv & ADVERTISE_1000XPAUSE)
624 new_remote_adv |= ADVERTISE_PAUSE_CAP;
625 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
626 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
628 local_adv = new_local_adv;
629 remote_adv = new_remote_adv;
632 /* See Table 28B-3 of 802.3ab-1999 spec. */
633 if (local_adv & ADVERTISE_PAUSE_CAP) {
634 if(local_adv & ADVERTISE_PAUSE_ASYM) {
635 if (remote_adv & ADVERTISE_PAUSE_CAP) {
636 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
638 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
639 bp->flow_ctrl = FLOW_CTRL_RX;
643 if (remote_adv & ADVERTISE_PAUSE_CAP) {
644 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
648 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
649 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
650 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
652 bp->flow_ctrl = FLOW_CTRL_TX;
658 bnx2_5708s_linkup(struct bnx2 *bp)
663 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
664 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
665 case BCM5708S_1000X_STAT1_SPEED_10:
666 bp->line_speed = SPEED_10;
668 case BCM5708S_1000X_STAT1_SPEED_100:
669 bp->line_speed = SPEED_100;
671 case BCM5708S_1000X_STAT1_SPEED_1G:
672 bp->line_speed = SPEED_1000;
674 case BCM5708S_1000X_STAT1_SPEED_2G5:
675 bp->line_speed = SPEED_2500;
678 if (val & BCM5708S_1000X_STAT1_FD)
679 bp->duplex = DUPLEX_FULL;
681 bp->duplex = DUPLEX_HALF;
687 bnx2_5706s_linkup(struct bnx2 *bp)
689 u32 bmcr, local_adv, remote_adv, common;
692 bp->line_speed = SPEED_1000;
694 bnx2_read_phy(bp, MII_BMCR, &bmcr);
695 if (bmcr & BMCR_FULLDPLX) {
696 bp->duplex = DUPLEX_FULL;
699 bp->duplex = DUPLEX_HALF;
702 if (!(bmcr & BMCR_ANENABLE)) {
706 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
707 bnx2_read_phy(bp, MII_LPA, &remote_adv);
709 common = local_adv & remote_adv;
710 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
712 if (common & ADVERTISE_1000XFULL) {
713 bp->duplex = DUPLEX_FULL;
716 bp->duplex = DUPLEX_HALF;
724 bnx2_copper_linkup(struct bnx2 *bp)
728 bnx2_read_phy(bp, MII_BMCR, &bmcr);
729 if (bmcr & BMCR_ANENABLE) {
730 u32 local_adv, remote_adv, common;
732 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
733 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
735 common = local_adv & (remote_adv >> 2);
736 if (common & ADVERTISE_1000FULL) {
737 bp->line_speed = SPEED_1000;
738 bp->duplex = DUPLEX_FULL;
740 else if (common & ADVERTISE_1000HALF) {
741 bp->line_speed = SPEED_1000;
742 bp->duplex = DUPLEX_HALF;
745 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
746 bnx2_read_phy(bp, MII_LPA, &remote_adv);
748 common = local_adv & remote_adv;
749 if (common & ADVERTISE_100FULL) {
750 bp->line_speed = SPEED_100;
751 bp->duplex = DUPLEX_FULL;
753 else if (common & ADVERTISE_100HALF) {
754 bp->line_speed = SPEED_100;
755 bp->duplex = DUPLEX_HALF;
757 else if (common & ADVERTISE_10FULL) {
758 bp->line_speed = SPEED_10;
759 bp->duplex = DUPLEX_FULL;
761 else if (common & ADVERTISE_10HALF) {
762 bp->line_speed = SPEED_10;
763 bp->duplex = DUPLEX_HALF;
772 if (bmcr & BMCR_SPEED100) {
773 bp->line_speed = SPEED_100;
776 bp->line_speed = SPEED_10;
778 if (bmcr & BMCR_FULLDPLX) {
779 bp->duplex = DUPLEX_FULL;
782 bp->duplex = DUPLEX_HALF;
790 bnx2_set_mac_link(struct bnx2 *bp)
794 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
795 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
796 (bp->duplex == DUPLEX_HALF)) {
797 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
800 /* Configure the EMAC mode register. */
801 val = REG_RD(bp, BNX2_EMAC_MODE);
803 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
804 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
808 switch (bp->line_speed) {
810 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
811 val |= BNX2_EMAC_MODE_PORT_MII_10;
816 val |= BNX2_EMAC_MODE_PORT_MII;
819 val |= BNX2_EMAC_MODE_25G;
822 val |= BNX2_EMAC_MODE_PORT_GMII;
827 val |= BNX2_EMAC_MODE_PORT_GMII;
830 /* Set the MAC to operate in the appropriate duplex mode. */
831 if (bp->duplex == DUPLEX_HALF)
832 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
833 REG_WR(bp, BNX2_EMAC_MODE, val);
835 /* Enable/disable rx PAUSE. */
836 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
838 if (bp->flow_ctrl & FLOW_CTRL_RX)
839 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
840 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
842 /* Enable/disable tx PAUSE. */
843 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
844 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
846 if (bp->flow_ctrl & FLOW_CTRL_TX)
847 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
848 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
850 /* Acknowledge the interrupt. */
851 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
857 bnx2_set_link(struct bnx2 *bp)
862 if (bp->loopback == MAC_LOOPBACK) {
867 link_up = bp->link_up;
869 bnx2_read_phy(bp, MII_BMSR, &bmsr);
870 bnx2_read_phy(bp, MII_BMSR, &bmsr);
872 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
873 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
876 val = REG_RD(bp, BNX2_EMAC_STATUS);
877 if (val & BNX2_EMAC_STATUS_LINK)
878 bmsr |= BMSR_LSTATUS;
880 bmsr &= ~BMSR_LSTATUS;
883 if (bmsr & BMSR_LSTATUS) {
886 if (bp->phy_flags & PHY_SERDES_FLAG) {
887 if (CHIP_NUM(bp) == CHIP_NUM_5706)
888 bnx2_5706s_linkup(bp);
889 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
890 bnx2_5708s_linkup(bp);
893 bnx2_copper_linkup(bp);
895 bnx2_resolve_flow_ctrl(bp);
898 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
899 (bp->autoneg & AUTONEG_SPEED)) {
903 bnx2_read_phy(bp, MII_BMCR, &bmcr);
904 if (!(bmcr & BMCR_ANENABLE)) {
905 bnx2_write_phy(bp, MII_BMCR, bmcr |
909 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
913 if (bp->link_up != link_up) {
914 bnx2_report_link(bp);
917 bnx2_set_mac_link(bp);
923 bnx2_reset_phy(struct bnx2 *bp)
928 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
930 #define PHY_RESET_MAX_WAIT 100
931 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
934 bnx2_read_phy(bp, MII_BMCR, ®);
935 if (!(reg & BMCR_RESET)) {
940 if (i == PHY_RESET_MAX_WAIT) {
947 bnx2_phy_get_pause_adv(struct bnx2 *bp)
951 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
952 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
954 if (bp->phy_flags & PHY_SERDES_FLAG) {
955 adv = ADVERTISE_1000XPAUSE;
958 adv = ADVERTISE_PAUSE_CAP;
961 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
962 if (bp->phy_flags & PHY_SERDES_FLAG) {
963 adv = ADVERTISE_1000XPSE_ASYM;
966 adv = ADVERTISE_PAUSE_ASYM;
969 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
970 if (bp->phy_flags & PHY_SERDES_FLAG) {
971 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
974 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
981 bnx2_setup_serdes_phy(struct bnx2 *bp)
986 if (!(bp->autoneg & AUTONEG_SPEED)) {
988 int force_link_down = 0;
990 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
991 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
992 if (up1 & BCM5708S_UP1_2G5) {
993 up1 &= ~BCM5708S_UP1_2G5;
994 bnx2_write_phy(bp, BCM5708S_UP1, up1);
999 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1000 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1002 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1003 new_bmcr = bmcr & ~BMCR_ANENABLE;
1004 new_bmcr |= BMCR_SPEED1000;
1005 if (bp->req_duplex == DUPLEX_FULL) {
1006 adv |= ADVERTISE_1000XFULL;
1007 new_bmcr |= BMCR_FULLDPLX;
1010 adv |= ADVERTISE_1000XHALF;
1011 new_bmcr &= ~BMCR_FULLDPLX;
1013 if ((new_bmcr != bmcr) || (force_link_down)) {
1014 /* Force a link down visible on the other side */
1016 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1017 ~(ADVERTISE_1000XFULL |
1018 ADVERTISE_1000XHALF));
1019 bnx2_write_phy(bp, MII_BMCR, bmcr |
1020 BMCR_ANRESTART | BMCR_ANENABLE);
1023 netif_carrier_off(bp->dev);
1024 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1026 bnx2_write_phy(bp, MII_ADVERTISE, adv);
1027 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1032 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1033 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1034 up1 |= BCM5708S_UP1_2G5;
1035 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1038 if (bp->advertising & ADVERTISED_1000baseT_Full)
1039 new_adv |= ADVERTISE_1000XFULL;
1041 new_adv |= bnx2_phy_get_pause_adv(bp);
1043 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1044 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1046 bp->serdes_an_pending = 0;
1047 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1048 /* Force a link down visible on the other side */
1052 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1053 for (i = 0; i < 110; i++) {
1058 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1059 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1061 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1062 /* Speed up link-up time when the link partner
1063 * does not autonegotiate which is very common
1064 * in blade servers. Some blade servers use
1065 * IPMI for kerboard input and it's important
1066 * to minimize link disruptions. Autoneg. involves
1067 * exchanging base pages plus 3 next pages and
1068 * normally completes in about 120 msec.
1070 bp->current_interval = SERDES_AN_TIMEOUT;
1071 bp->serdes_an_pending = 1;
1072 mod_timer(&bp->timer, jiffies + bp->current_interval);
1079 #define ETHTOOL_ALL_FIBRE_SPEED \
1080 (ADVERTISED_1000baseT_Full)
1082 #define ETHTOOL_ALL_COPPER_SPEED \
1083 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1084 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1085 ADVERTISED_1000baseT_Full)
1087 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1088 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1090 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1093 bnx2_setup_copper_phy(struct bnx2 *bp)
1098 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1100 if (bp->autoneg & AUTONEG_SPEED) {
1101 u32 adv_reg, adv1000_reg;
1102 u32 new_adv_reg = 0;
1103 u32 new_adv1000_reg = 0;
1105 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1106 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1107 ADVERTISE_PAUSE_ASYM);
1109 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1110 adv1000_reg &= PHY_ALL_1000_SPEED;
1112 if (bp->advertising & ADVERTISED_10baseT_Half)
1113 new_adv_reg |= ADVERTISE_10HALF;
1114 if (bp->advertising & ADVERTISED_10baseT_Full)
1115 new_adv_reg |= ADVERTISE_10FULL;
1116 if (bp->advertising & ADVERTISED_100baseT_Half)
1117 new_adv_reg |= ADVERTISE_100HALF;
1118 if (bp->advertising & ADVERTISED_100baseT_Full)
1119 new_adv_reg |= ADVERTISE_100FULL;
1120 if (bp->advertising & ADVERTISED_1000baseT_Full)
1121 new_adv1000_reg |= ADVERTISE_1000FULL;
1123 new_adv_reg |= ADVERTISE_CSMA;
1125 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1127 if ((adv1000_reg != new_adv1000_reg) ||
1128 (adv_reg != new_adv_reg) ||
1129 ((bmcr & BMCR_ANENABLE) == 0)) {
1131 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1132 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1133 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1136 else if (bp->link_up) {
1137 /* Flow ctrl may have changed from auto to forced */
1138 /* or vice-versa. */
1140 bnx2_resolve_flow_ctrl(bp);
1141 bnx2_set_mac_link(bp);
1147 if (bp->req_line_speed == SPEED_100) {
1148 new_bmcr |= BMCR_SPEED100;
1150 if (bp->req_duplex == DUPLEX_FULL) {
1151 new_bmcr |= BMCR_FULLDPLX;
1153 if (new_bmcr != bmcr) {
1157 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1158 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1160 if (bmsr & BMSR_LSTATUS) {
1161 /* Force link down */
1162 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1165 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1166 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1168 } while ((bmsr & BMSR_LSTATUS) && (i < 620));
1171 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1173 /* Normally, the new speed is setup after the link has
1174 * gone down and up again. In some cases, link will not go
1175 * down so we need to set up the new speed here.
1177 if (bmsr & BMSR_LSTATUS) {
1178 bp->line_speed = bp->req_line_speed;
1179 bp->duplex = bp->req_duplex;
1180 bnx2_resolve_flow_ctrl(bp);
1181 bnx2_set_mac_link(bp);
1188 bnx2_setup_phy(struct bnx2 *bp)
1190 if (bp->loopback == MAC_LOOPBACK)
1193 if (bp->phy_flags & PHY_SERDES_FLAG) {
1194 return (bnx2_setup_serdes_phy(bp));
1197 return (bnx2_setup_copper_phy(bp));
1202 bnx2_init_5708s_phy(struct bnx2 *bp)
1206 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1207 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1208 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1210 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1211 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1212 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1214 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1215 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1216 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1218 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1219 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1220 val |= BCM5708S_UP1_2G5;
1221 bnx2_write_phy(bp, BCM5708S_UP1, val);
1224 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1225 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1226 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1227 /* increase tx signal amplitude */
1228 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1229 BCM5708S_BLK_ADDR_TX_MISC);
1230 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1231 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1232 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1233 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1236 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1237 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1242 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1243 BNX2_SHARED_HW_CFG_CONFIG);
1244 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1245 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1246 BCM5708S_BLK_ADDR_TX_MISC);
1247 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1248 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1249 BCM5708S_BLK_ADDR_DIG);
1256 bnx2_init_5706s_phy(struct bnx2 *bp)
1258 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1260 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1261 REG_WR(bp, BNX2_MISC_UNUSED0, 0x300);
1264 if (bp->dev->mtu > 1500) {
1267 /* Set extended packet length bit */
1268 bnx2_write_phy(bp, 0x18, 0x7);
1269 bnx2_read_phy(bp, 0x18, &val);
1270 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1272 bnx2_write_phy(bp, 0x1c, 0x6c00);
1273 bnx2_read_phy(bp, 0x1c, &val);
1274 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1279 bnx2_write_phy(bp, 0x18, 0x7);
1280 bnx2_read_phy(bp, 0x18, &val);
1281 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1283 bnx2_write_phy(bp, 0x1c, 0x6c00);
1284 bnx2_read_phy(bp, 0x1c, &val);
1285 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1292 bnx2_init_copper_phy(struct bnx2 *bp)
1296 bp->phy_flags |= PHY_CRC_FIX_FLAG;
1298 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1299 bnx2_write_phy(bp, 0x18, 0x0c00);
1300 bnx2_write_phy(bp, 0x17, 0x000a);
1301 bnx2_write_phy(bp, 0x15, 0x310b);
1302 bnx2_write_phy(bp, 0x17, 0x201f);
1303 bnx2_write_phy(bp, 0x15, 0x9506);
1304 bnx2_write_phy(bp, 0x17, 0x401f);
1305 bnx2_write_phy(bp, 0x15, 0x14e2);
1306 bnx2_write_phy(bp, 0x18, 0x0400);
1309 if (bp->dev->mtu > 1500) {
1310 /* Set extended packet length bit */
1311 bnx2_write_phy(bp, 0x18, 0x7);
1312 bnx2_read_phy(bp, 0x18, &val);
1313 bnx2_write_phy(bp, 0x18, val | 0x4000);
1315 bnx2_read_phy(bp, 0x10, &val);
1316 bnx2_write_phy(bp, 0x10, val | 0x1);
1319 bnx2_write_phy(bp, 0x18, 0x7);
1320 bnx2_read_phy(bp, 0x18, &val);
1321 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1323 bnx2_read_phy(bp, 0x10, &val);
1324 bnx2_write_phy(bp, 0x10, val & ~0x1);
1327 /* ethernet@wirespeed */
1328 bnx2_write_phy(bp, 0x18, 0x7007);
1329 bnx2_read_phy(bp, 0x18, &val);
1330 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1336 bnx2_init_phy(struct bnx2 *bp)
1341 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1342 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1344 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1348 bnx2_read_phy(bp, MII_PHYSID1, &val);
1349 bp->phy_id = val << 16;
1350 bnx2_read_phy(bp, MII_PHYSID2, &val);
1351 bp->phy_id |= val & 0xffff;
1353 if (bp->phy_flags & PHY_SERDES_FLAG) {
1354 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1355 rc = bnx2_init_5706s_phy(bp);
1356 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1357 rc = bnx2_init_5708s_phy(bp);
1360 rc = bnx2_init_copper_phy(bp);
1369 bnx2_set_mac_loopback(struct bnx2 *bp)
1373 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1374 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1375 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1376 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1381 static int bnx2_test_link(struct bnx2 *);
1384 bnx2_set_phy_loopback(struct bnx2 *bp)
1389 spin_lock_bh(&bp->phy_lock);
1390 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1392 spin_unlock_bh(&bp->phy_lock);
1396 for (i = 0; i < 10; i++) {
1397 if (bnx2_test_link(bp) == 0)
1402 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1403 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1404 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1405 BNX2_EMAC_MODE_25G);
1407 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1408 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1414 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1420 msg_data |= bp->fw_wr_seq;
1422 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1424 /* wait for an acknowledgement. */
1425 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1428 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1430 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1433 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1436 /* If we timed out, inform the firmware that this is the case. */
1437 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1439 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1442 msg_data &= ~BNX2_DRV_MSG_CODE;
1443 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1445 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1450 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1457 bnx2_init_context(struct bnx2 *bp)
1463 u32 vcid_addr, pcid_addr, offset;
1467 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1470 vcid_addr = GET_PCID_ADDR(vcid);
1472 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1477 pcid_addr = GET_PCID_ADDR(new_vcid);
1480 vcid_addr = GET_CID_ADDR(vcid);
1481 pcid_addr = vcid_addr;
1484 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1485 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1487 /* Zero out the context. */
1488 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1489 CTX_WR(bp, 0x00, offset, 0);
1492 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1493 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1498 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1504 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1505 if (good_mbuf == NULL) {
1506 printk(KERN_ERR PFX "Failed to allocate memory in "
1507 "bnx2_alloc_bad_rbuf\n");
1511 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1512 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1516 /* Allocate a bunch of mbufs and save the good ones in an array. */
1517 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1518 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1519 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1521 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1523 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1525 /* The addresses with Bit 9 set are bad memory blocks. */
1526 if (!(val & (1 << 9))) {
1527 good_mbuf[good_mbuf_cnt] = (u16) val;
1531 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1534 /* Free the good ones back to the mbuf pool thus discarding
1535 * all the bad ones. */
1536 while (good_mbuf_cnt) {
1539 val = good_mbuf[good_mbuf_cnt];
1540 val = (val << 9) | val | 1;
1542 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1549 bnx2_set_mac_addr(struct bnx2 *bp)
1552 u8 *mac_addr = bp->dev->dev_addr;
1554 val = (mac_addr[0] << 8) | mac_addr[1];
1556 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1558 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1559 (mac_addr[4] << 8) | mac_addr[5];
1561 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1565 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1567 struct sk_buff *skb;
1568 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1570 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1571 unsigned long align;
1573 skb = dev_alloc_skb(bp->rx_buf_size);
1578 if (unlikely((align = (unsigned long) skb->data & 0x7))) {
1579 skb_reserve(skb, 8 - align);
1583 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1584 PCI_DMA_FROMDEVICE);
1587 pci_unmap_addr_set(rx_buf, mapping, mapping);
1589 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1590 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1592 bp->rx_prod_bseq += bp->rx_buf_use_size;
1598 bnx2_phy_int(struct bnx2 *bp)
1600 u32 new_link_state, old_link_state;
1602 new_link_state = bp->status_blk->status_attn_bits &
1603 STATUS_ATTN_BITS_LINK_STATE;
1604 old_link_state = bp->status_blk->status_attn_bits_ack &
1605 STATUS_ATTN_BITS_LINK_STATE;
1606 if (new_link_state != old_link_state) {
1607 if (new_link_state) {
1608 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1609 STATUS_ATTN_BITS_LINK_STATE);
1612 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1613 STATUS_ATTN_BITS_LINK_STATE);
1620 bnx2_tx_int(struct bnx2 *bp)
1622 struct status_block *sblk = bp->status_blk;
1623 u16 hw_cons, sw_cons, sw_ring_cons;
1626 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1627 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1630 sw_cons = bp->tx_cons;
1632 while (sw_cons != hw_cons) {
1633 struct sw_bd *tx_buf;
1634 struct sk_buff *skb;
1637 sw_ring_cons = TX_RING_IDX(sw_cons);
1639 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1642 /* partial BD completions possible with TSO packets */
1643 if (skb_shinfo(skb)->tso_size) {
1644 u16 last_idx, last_ring_idx;
1646 last_idx = sw_cons +
1647 skb_shinfo(skb)->nr_frags + 1;
1648 last_ring_idx = sw_ring_cons +
1649 skb_shinfo(skb)->nr_frags + 1;
1650 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1653 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1658 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1659 skb_headlen(skb), PCI_DMA_TODEVICE);
1662 last = skb_shinfo(skb)->nr_frags;
1664 for (i = 0; i < last; i++) {
1665 sw_cons = NEXT_TX_BD(sw_cons);
1667 pci_unmap_page(bp->pdev,
1669 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1671 skb_shinfo(skb)->frags[i].size,
1675 sw_cons = NEXT_TX_BD(sw_cons);
1677 tx_free_bd += last + 1;
1679 dev_kfree_skb_irq(skb);
1681 hw_cons = bp->hw_tx_cons =
1682 sblk->status_tx_quick_consumer_index0;
1684 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1689 bp->tx_cons = sw_cons;
1691 if (unlikely(netif_queue_stopped(bp->dev))) {
1692 spin_lock(&bp->tx_lock);
1693 if ((netif_queue_stopped(bp->dev)) &&
1694 (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)) {
1696 netif_wake_queue(bp->dev);
1698 spin_unlock(&bp->tx_lock);
1703 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1706 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1707 struct rx_bd *cons_bd, *prod_bd;
1709 cons_rx_buf = &bp->rx_buf_ring[cons];
1710 prod_rx_buf = &bp->rx_buf_ring[prod];
1712 pci_dma_sync_single_for_device(bp->pdev,
1713 pci_unmap_addr(cons_rx_buf, mapping),
1714 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1716 bp->rx_prod_bseq += bp->rx_buf_use_size;
1718 prod_rx_buf->skb = skb;
1723 pci_unmap_addr_set(prod_rx_buf, mapping,
1724 pci_unmap_addr(cons_rx_buf, mapping));
1726 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1727 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1728 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1729 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
1733 bnx2_rx_int(struct bnx2 *bp, int budget)
1735 struct status_block *sblk = bp->status_blk;
1736 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1737 struct l2_fhdr *rx_hdr;
1740 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1741 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1744 sw_cons = bp->rx_cons;
1745 sw_prod = bp->rx_prod;
1747 /* Memory barrier necessary as speculative reads of the rx
1748 * buffer can be ahead of the index in the status block
1751 while (sw_cons != hw_cons) {
1754 struct sw_bd *rx_buf;
1755 struct sk_buff *skb;
1756 dma_addr_t dma_addr;
1758 sw_ring_cons = RX_RING_IDX(sw_cons);
1759 sw_ring_prod = RX_RING_IDX(sw_prod);
1761 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1766 dma_addr = pci_unmap_addr(rx_buf, mapping);
1768 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
1769 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1771 rx_hdr = (struct l2_fhdr *) skb->data;
1772 len = rx_hdr->l2_fhdr_pkt_len - 4;
1774 if ((status = rx_hdr->l2_fhdr_status) &
1775 (L2_FHDR_ERRORS_BAD_CRC |
1776 L2_FHDR_ERRORS_PHY_DECODE |
1777 L2_FHDR_ERRORS_ALIGNMENT |
1778 L2_FHDR_ERRORS_TOO_SHORT |
1779 L2_FHDR_ERRORS_GIANT_FRAME)) {
1784 /* Since we don't have a jumbo ring, copy small packets
1787 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1788 struct sk_buff *new_skb;
1790 new_skb = dev_alloc_skb(len + 2);
1791 if (new_skb == NULL)
1795 memcpy(new_skb->data,
1796 skb->data + bp->rx_offset - 2,
1799 skb_reserve(new_skb, 2);
1800 skb_put(new_skb, len);
1801 new_skb->dev = bp->dev;
1803 bnx2_reuse_rx_skb(bp, skb,
1804 sw_ring_cons, sw_ring_prod);
1808 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1809 pci_unmap_single(bp->pdev, dma_addr,
1810 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1812 skb_reserve(skb, bp->rx_offset);
1817 bnx2_reuse_rx_skb(bp, skb,
1818 sw_ring_cons, sw_ring_prod);
1822 skb->protocol = eth_type_trans(skb, bp->dev);
1824 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1825 (ntohs(skb->protocol) != 0x8100)) {
1827 dev_kfree_skb_irq(skb);
1832 skb->ip_summed = CHECKSUM_NONE;
1834 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1835 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1837 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1838 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
1839 skb->ip_summed = CHECKSUM_UNNECESSARY;
1843 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1844 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1845 rx_hdr->l2_fhdr_vlan_tag);
1849 netif_receive_skb(skb);
1851 bp->dev->last_rx = jiffies;
1855 sw_cons = NEXT_RX_BD(sw_cons);
1856 sw_prod = NEXT_RX_BD(sw_prod);
1858 if ((rx_pkt == budget))
1861 /* Refresh hw_cons to see if there is new work */
1862 if (sw_cons == hw_cons) {
1863 hw_cons = bp->hw_rx_cons =
1864 sblk->status_rx_quick_consumer_index0;
1865 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1870 bp->rx_cons = sw_cons;
1871 bp->rx_prod = sw_prod;
1873 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1875 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1883 /* MSI ISR - The only difference between this and the INTx ISR
1884 * is that the MSI interrupt is always serviced.
1887 bnx2_msi(int irq, void *dev_instance, struct pt_regs *regs)
1889 struct net_device *dev = dev_instance;
1890 struct bnx2 *bp = netdev_priv(dev);
1892 prefetch(bp->status_blk);
1893 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1894 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1895 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1897 /* Return here if interrupt is disabled. */
1898 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1901 netif_rx_schedule(dev);
1907 bnx2_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1909 struct net_device *dev = dev_instance;
1910 struct bnx2 *bp = netdev_priv(dev);
1912 /* When using INTx, it is possible for the interrupt to arrive
1913 * at the CPU before the status block posted prior to the
1914 * interrupt. Reading a register will flush the status block.
1915 * When using MSI, the MSI message will always complete after
1916 * the status block write.
1918 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
1919 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1920 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
1923 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1924 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1925 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1927 /* Return here if interrupt is shared and is disabled. */
1928 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1931 netif_rx_schedule(dev);
1937 bnx2_has_work(struct bnx2 *bp)
1939 struct status_block *sblk = bp->status_blk;
1941 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
1942 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
1945 if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
1953 bnx2_poll(struct net_device *dev, int *budget)
1955 struct bnx2 *bp = netdev_priv(dev);
1957 if ((bp->status_blk->status_attn_bits &
1958 STATUS_ATTN_BITS_LINK_STATE) !=
1959 (bp->status_blk->status_attn_bits_ack &
1960 STATUS_ATTN_BITS_LINK_STATE)) {
1962 spin_lock(&bp->phy_lock);
1964 spin_unlock(&bp->phy_lock);
1966 /* This is needed to take care of transient status
1967 * during link changes.
1969 REG_WR(bp, BNX2_HC_COMMAND,
1970 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
1971 REG_RD(bp, BNX2_HC_COMMAND);
1974 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
1977 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
1978 int orig_budget = *budget;
1981 if (orig_budget > dev->quota)
1982 orig_budget = dev->quota;
1984 work_done = bnx2_rx_int(bp, orig_budget);
1985 *budget -= work_done;
1986 dev->quota -= work_done;
1989 bp->last_status_idx = bp->status_blk->status_idx;
1992 if (!bnx2_has_work(bp)) {
1993 netif_rx_complete(dev);
1994 if (likely(bp->flags & USING_MSI_FLAG)) {
1995 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1996 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
1997 bp->last_status_idx);
2000 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2001 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2002 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2003 bp->last_status_idx);
2005 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2006 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2007 bp->last_status_idx);
2014 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2015 * from set_multicast.
2018 bnx2_set_rx_mode(struct net_device *dev)
2020 struct bnx2 *bp = netdev_priv(dev);
2021 u32 rx_mode, sort_mode;
2024 spin_lock_bh(&bp->phy_lock);
2026 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2027 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2028 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2030 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2031 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2033 if (!(bp->flags & ASF_ENABLE_FLAG))
2034 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2036 if (dev->flags & IFF_PROMISC) {
2037 /* Promiscuous mode. */
2038 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2039 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN;
2041 else if (dev->flags & IFF_ALLMULTI) {
2042 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2043 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2046 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2049 /* Accept one or more multicast(s). */
2050 struct dev_mc_list *mclist;
2051 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2056 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2058 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2059 i++, mclist = mclist->next) {
2061 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2063 regidx = (bit & 0xe0) >> 5;
2065 mc_filter[regidx] |= (1 << bit);
2068 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2069 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2073 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2076 if (rx_mode != bp->rx_mode) {
2077 bp->rx_mode = rx_mode;
2078 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2081 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2082 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2083 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2085 spin_unlock_bh(&bp->phy_lock);
2088 #define FW_BUF_SIZE 0x8000
2091 bnx2_gunzip_init(struct bnx2 *bp)
2093 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2096 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2099 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2100 if (bp->strm->workspace == NULL)
2110 vfree(bp->gunzip_buf);
2111 bp->gunzip_buf = NULL;
2114 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2115 "uncompression.\n", bp->dev->name);
2120 bnx2_gunzip_end(struct bnx2 *bp)
2122 kfree(bp->strm->workspace);
2127 if (bp->gunzip_buf) {
2128 vfree(bp->gunzip_buf);
2129 bp->gunzip_buf = NULL;
2134 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2138 /* check gzip header */
2139 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2145 if (zbuf[3] & FNAME)
2146 while ((zbuf[n++] != 0) && (n < len));
2148 bp->strm->next_in = zbuf + n;
2149 bp->strm->avail_in = len - n;
2150 bp->strm->next_out = bp->gunzip_buf;
2151 bp->strm->avail_out = FW_BUF_SIZE;
2153 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2157 rc = zlib_inflate(bp->strm, Z_FINISH);
2159 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2160 *outbuf = bp->gunzip_buf;
2162 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2163 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2164 bp->dev->name, bp->strm->msg);
2166 zlib_inflateEnd(bp->strm);
2168 if (rc == Z_STREAM_END)
2175 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2182 for (i = 0; i < rv2p_code_len; i += 8) {
2183 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2185 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2188 if (rv2p_proc == RV2P_PROC1) {
2189 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2190 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2193 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2194 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2198 /* Reset the processor, un-stall is done later. */
2199 if (rv2p_proc == RV2P_PROC1) {
2200 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2203 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2208 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2214 val = REG_RD_IND(bp, cpu_reg->mode);
2215 val |= cpu_reg->mode_value_halt;
2216 REG_WR_IND(bp, cpu_reg->mode, val);
2217 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2219 /* Load the Text area. */
2220 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2224 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2225 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2229 /* Load the Data area. */
2230 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2234 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2235 REG_WR_IND(bp, offset, fw->data[j]);
2239 /* Load the SBSS area. */
2240 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2244 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2245 REG_WR_IND(bp, offset, fw->sbss[j]);
2249 /* Load the BSS area. */
2250 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2254 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2255 REG_WR_IND(bp, offset, fw->bss[j]);
2259 /* Load the Read-Only area. */
2260 offset = cpu_reg->spad_base +
2261 (fw->rodata_addr - cpu_reg->mips_view_base);
2265 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2266 REG_WR_IND(bp, offset, fw->rodata[j]);
2270 /* Clear the pre-fetch instruction. */
2271 REG_WR_IND(bp, cpu_reg->inst, 0);
2272 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2274 /* Start the CPU. */
2275 val = REG_RD_IND(bp, cpu_reg->mode);
2276 val &= ~cpu_reg->mode_value_halt;
2277 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2278 REG_WR_IND(bp, cpu_reg->mode, val);
2282 bnx2_init_cpus(struct bnx2 *bp)
2284 struct cpu_reg cpu_reg;
2290 if ((rc = bnx2_gunzip_init(bp)) != 0)
2293 /* Initialize the RV2P processor. */
2294 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2299 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2301 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2306 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2308 /* Initialize the RX Processor. */
2309 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2310 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2311 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2312 cpu_reg.state = BNX2_RXP_CPU_STATE;
2313 cpu_reg.state_value_clear = 0xffffff;
2314 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2315 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2316 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2317 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2318 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2319 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2320 cpu_reg.mips_view_base = 0x8000000;
2322 fw.ver_major = bnx2_RXP_b06FwReleaseMajor;
2323 fw.ver_minor = bnx2_RXP_b06FwReleaseMinor;
2324 fw.ver_fix = bnx2_RXP_b06FwReleaseFix;
2325 fw.start_addr = bnx2_RXP_b06FwStartAddr;
2327 fw.text_addr = bnx2_RXP_b06FwTextAddr;
2328 fw.text_len = bnx2_RXP_b06FwTextLen;
2331 rc = bnx2_gunzip(bp, bnx2_RXP_b06FwText, sizeof(bnx2_RXP_b06FwText),
2338 fw.data_addr = bnx2_RXP_b06FwDataAddr;
2339 fw.data_len = bnx2_RXP_b06FwDataLen;
2341 fw.data = bnx2_RXP_b06FwData;
2343 fw.sbss_addr = bnx2_RXP_b06FwSbssAddr;
2344 fw.sbss_len = bnx2_RXP_b06FwSbssLen;
2346 fw.sbss = bnx2_RXP_b06FwSbss;
2348 fw.bss_addr = bnx2_RXP_b06FwBssAddr;
2349 fw.bss_len = bnx2_RXP_b06FwBssLen;
2351 fw.bss = bnx2_RXP_b06FwBss;
2353 fw.rodata_addr = bnx2_RXP_b06FwRodataAddr;
2354 fw.rodata_len = bnx2_RXP_b06FwRodataLen;
2355 fw.rodata_index = 0;
2356 fw.rodata = bnx2_RXP_b06FwRodata;
2358 load_cpu_fw(bp, &cpu_reg, &fw);
2360 /* Initialize the TX Processor. */
2361 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2362 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2363 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2364 cpu_reg.state = BNX2_TXP_CPU_STATE;
2365 cpu_reg.state_value_clear = 0xffffff;
2366 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2367 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2368 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2369 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2370 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2371 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2372 cpu_reg.mips_view_base = 0x8000000;
2374 fw.ver_major = bnx2_TXP_b06FwReleaseMajor;
2375 fw.ver_minor = bnx2_TXP_b06FwReleaseMinor;
2376 fw.ver_fix = bnx2_TXP_b06FwReleaseFix;
2377 fw.start_addr = bnx2_TXP_b06FwStartAddr;
2379 fw.text_addr = bnx2_TXP_b06FwTextAddr;
2380 fw.text_len = bnx2_TXP_b06FwTextLen;
2383 rc = bnx2_gunzip(bp, bnx2_TXP_b06FwText, sizeof(bnx2_TXP_b06FwText),
2390 fw.data_addr = bnx2_TXP_b06FwDataAddr;
2391 fw.data_len = bnx2_TXP_b06FwDataLen;
2393 fw.data = bnx2_TXP_b06FwData;
2395 fw.sbss_addr = bnx2_TXP_b06FwSbssAddr;
2396 fw.sbss_len = bnx2_TXP_b06FwSbssLen;
2398 fw.sbss = bnx2_TXP_b06FwSbss;
2400 fw.bss_addr = bnx2_TXP_b06FwBssAddr;
2401 fw.bss_len = bnx2_TXP_b06FwBssLen;
2403 fw.bss = bnx2_TXP_b06FwBss;
2405 fw.rodata_addr = bnx2_TXP_b06FwRodataAddr;
2406 fw.rodata_len = bnx2_TXP_b06FwRodataLen;
2407 fw.rodata_index = 0;
2408 fw.rodata = bnx2_TXP_b06FwRodata;
2410 load_cpu_fw(bp, &cpu_reg, &fw);
2412 /* Initialize the TX Patch-up Processor. */
2413 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2414 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2415 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2416 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2417 cpu_reg.state_value_clear = 0xffffff;
2418 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2419 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2420 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2421 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2422 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2423 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2424 cpu_reg.mips_view_base = 0x8000000;
2426 fw.ver_major = bnx2_TPAT_b06FwReleaseMajor;
2427 fw.ver_minor = bnx2_TPAT_b06FwReleaseMinor;
2428 fw.ver_fix = bnx2_TPAT_b06FwReleaseFix;
2429 fw.start_addr = bnx2_TPAT_b06FwStartAddr;
2431 fw.text_addr = bnx2_TPAT_b06FwTextAddr;
2432 fw.text_len = bnx2_TPAT_b06FwTextLen;
2435 rc = bnx2_gunzip(bp, bnx2_TPAT_b06FwText, sizeof(bnx2_TPAT_b06FwText),
2442 fw.data_addr = bnx2_TPAT_b06FwDataAddr;
2443 fw.data_len = bnx2_TPAT_b06FwDataLen;
2445 fw.data = bnx2_TPAT_b06FwData;
2447 fw.sbss_addr = bnx2_TPAT_b06FwSbssAddr;
2448 fw.sbss_len = bnx2_TPAT_b06FwSbssLen;
2450 fw.sbss = bnx2_TPAT_b06FwSbss;
2452 fw.bss_addr = bnx2_TPAT_b06FwBssAddr;
2453 fw.bss_len = bnx2_TPAT_b06FwBssLen;
2455 fw.bss = bnx2_TPAT_b06FwBss;
2457 fw.rodata_addr = bnx2_TPAT_b06FwRodataAddr;
2458 fw.rodata_len = bnx2_TPAT_b06FwRodataLen;
2459 fw.rodata_index = 0;
2460 fw.rodata = bnx2_TPAT_b06FwRodata;
2462 load_cpu_fw(bp, &cpu_reg, &fw);
2464 /* Initialize the Completion Processor. */
2465 cpu_reg.mode = BNX2_COM_CPU_MODE;
2466 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2467 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2468 cpu_reg.state = BNX2_COM_CPU_STATE;
2469 cpu_reg.state_value_clear = 0xffffff;
2470 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2471 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2472 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2473 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2474 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2475 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2476 cpu_reg.mips_view_base = 0x8000000;
2478 fw.ver_major = bnx2_COM_b06FwReleaseMajor;
2479 fw.ver_minor = bnx2_COM_b06FwReleaseMinor;
2480 fw.ver_fix = bnx2_COM_b06FwReleaseFix;
2481 fw.start_addr = bnx2_COM_b06FwStartAddr;
2483 fw.text_addr = bnx2_COM_b06FwTextAddr;
2484 fw.text_len = bnx2_COM_b06FwTextLen;
2487 rc = bnx2_gunzip(bp, bnx2_COM_b06FwText, sizeof(bnx2_COM_b06FwText),
2494 fw.data_addr = bnx2_COM_b06FwDataAddr;
2495 fw.data_len = bnx2_COM_b06FwDataLen;
2497 fw.data = bnx2_COM_b06FwData;
2499 fw.sbss_addr = bnx2_COM_b06FwSbssAddr;
2500 fw.sbss_len = bnx2_COM_b06FwSbssLen;
2502 fw.sbss = bnx2_COM_b06FwSbss;
2504 fw.bss_addr = bnx2_COM_b06FwBssAddr;
2505 fw.bss_len = bnx2_COM_b06FwBssLen;
2507 fw.bss = bnx2_COM_b06FwBss;
2509 fw.rodata_addr = bnx2_COM_b06FwRodataAddr;
2510 fw.rodata_len = bnx2_COM_b06FwRodataLen;
2511 fw.rodata_index = 0;
2512 fw.rodata = bnx2_COM_b06FwRodata;
2514 load_cpu_fw(bp, &cpu_reg, &fw);
2517 bnx2_gunzip_end(bp);
2522 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2526 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2532 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2533 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2534 PCI_PM_CTRL_PME_STATUS);
2536 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2537 /* delay required during transition out of D3hot */
2540 val = REG_RD(bp, BNX2_EMAC_MODE);
2541 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2542 val &= ~BNX2_EMAC_MODE_MPKT;
2543 REG_WR(bp, BNX2_EMAC_MODE, val);
2545 val = REG_RD(bp, BNX2_RPM_CONFIG);
2546 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2547 REG_WR(bp, BNX2_RPM_CONFIG, val);
2558 autoneg = bp->autoneg;
2559 advertising = bp->advertising;
2561 bp->autoneg = AUTONEG_SPEED;
2562 bp->advertising = ADVERTISED_10baseT_Half |
2563 ADVERTISED_10baseT_Full |
2564 ADVERTISED_100baseT_Half |
2565 ADVERTISED_100baseT_Full |
2568 bnx2_setup_copper_phy(bp);
2570 bp->autoneg = autoneg;
2571 bp->advertising = advertising;
2573 bnx2_set_mac_addr(bp);
2575 val = REG_RD(bp, BNX2_EMAC_MODE);
2577 /* Enable port mode. */
2578 val &= ~BNX2_EMAC_MODE_PORT;
2579 val |= BNX2_EMAC_MODE_PORT_MII |
2580 BNX2_EMAC_MODE_MPKT_RCVD |
2581 BNX2_EMAC_MODE_ACPI_RCVD |
2582 BNX2_EMAC_MODE_MPKT;
2584 REG_WR(bp, BNX2_EMAC_MODE, val);
2586 /* receive all multicast */
2587 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2588 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2591 REG_WR(bp, BNX2_EMAC_RX_MODE,
2592 BNX2_EMAC_RX_MODE_SORT_MODE);
2594 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2595 BNX2_RPM_SORT_USER0_MC_EN;
2596 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2597 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2598 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2599 BNX2_RPM_SORT_USER0_ENA);
2601 /* Need to enable EMAC and RPM for WOL. */
2602 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2603 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2604 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2605 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2607 val = REG_RD(bp, BNX2_RPM_CONFIG);
2608 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2609 REG_WR(bp, BNX2_RPM_CONFIG, val);
2611 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2614 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2617 if (!(bp->flags & NO_WOL_FLAG))
2618 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2620 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2621 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2622 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2631 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2633 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2636 /* No more memory access after this point until
2637 * device is brought back to D0.
2649 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2654 /* Request access to the flash interface. */
2655 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2656 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2657 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2658 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2664 if (j >= NVRAM_TIMEOUT_COUNT)
2671 bnx2_release_nvram_lock(struct bnx2 *bp)
2676 /* Relinquish nvram interface. */
2677 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2679 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2680 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2681 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2687 if (j >= NVRAM_TIMEOUT_COUNT)
2695 bnx2_enable_nvram_write(struct bnx2 *bp)
2699 val = REG_RD(bp, BNX2_MISC_CFG);
2700 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2702 if (!bp->flash_info->buffered) {
2705 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2706 REG_WR(bp, BNX2_NVM_COMMAND,
2707 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2709 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2712 val = REG_RD(bp, BNX2_NVM_COMMAND);
2713 if (val & BNX2_NVM_COMMAND_DONE)
2717 if (j >= NVRAM_TIMEOUT_COUNT)
2724 bnx2_disable_nvram_write(struct bnx2 *bp)
2728 val = REG_RD(bp, BNX2_MISC_CFG);
2729 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2734 bnx2_enable_nvram_access(struct bnx2 *bp)
2738 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2739 /* Enable both bits, even on read. */
2740 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2741 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2745 bnx2_disable_nvram_access(struct bnx2 *bp)
2749 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2750 /* Disable both bits, even after read. */
2751 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2752 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2753 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2757 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2762 if (bp->flash_info->buffered)
2763 /* Buffered flash, no erase needed */
2766 /* Build an erase command */
2767 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2768 BNX2_NVM_COMMAND_DOIT;
2770 /* Need to clear DONE bit separately. */
2771 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2773 /* Address of the NVRAM to read from. */
2774 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2776 /* Issue an erase command. */
2777 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2779 /* Wait for completion. */
2780 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2785 val = REG_RD(bp, BNX2_NVM_COMMAND);
2786 if (val & BNX2_NVM_COMMAND_DONE)
2790 if (j >= NVRAM_TIMEOUT_COUNT)
2797 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2802 /* Build the command word. */
2803 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2805 /* Calculate an offset of a buffered flash. */
2806 if (bp->flash_info->buffered) {
2807 offset = ((offset / bp->flash_info->page_size) <<
2808 bp->flash_info->page_bits) +
2809 (offset % bp->flash_info->page_size);
2812 /* Need to clear DONE bit separately. */
2813 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2815 /* Address of the NVRAM to read from. */
2816 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2818 /* Issue a read command. */
2819 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2821 /* Wait for completion. */
2822 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2827 val = REG_RD(bp, BNX2_NVM_COMMAND);
2828 if (val & BNX2_NVM_COMMAND_DONE) {
2829 val = REG_RD(bp, BNX2_NVM_READ);
2831 val = be32_to_cpu(val);
2832 memcpy(ret_val, &val, 4);
2836 if (j >= NVRAM_TIMEOUT_COUNT)
2844 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2849 /* Build the command word. */
2850 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2852 /* Calculate an offset of a buffered flash. */
2853 if (bp->flash_info->buffered) {
2854 offset = ((offset / bp->flash_info->page_size) <<
2855 bp->flash_info->page_bits) +
2856 (offset % bp->flash_info->page_size);
2859 /* Need to clear DONE bit separately. */
2860 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2862 memcpy(&val32, val, 4);
2863 val32 = cpu_to_be32(val32);
2865 /* Write the data. */
2866 REG_WR(bp, BNX2_NVM_WRITE, val32);
2868 /* Address of the NVRAM to write to. */
2869 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2871 /* Issue the write command. */
2872 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2874 /* Wait for completion. */
2875 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2878 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2881 if (j >= NVRAM_TIMEOUT_COUNT)
2888 bnx2_init_nvram(struct bnx2 *bp)
2891 int j, entry_count, rc;
2892 struct flash_spec *flash;
2894 /* Determine the selected interface. */
2895 val = REG_RD(bp, BNX2_NVM_CFG1);
2897 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2900 if (val & 0x40000000) {
2902 /* Flash interface has been reconfigured */
2903 for (j = 0, flash = &flash_table[0]; j < entry_count;
2905 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2906 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2907 bp->flash_info = flash;
2914 /* Not yet been reconfigured */
2916 if (val & (1 << 23))
2917 mask = FLASH_BACKUP_STRAP_MASK;
2919 mask = FLASH_STRAP_MASK;
2921 for (j = 0, flash = &flash_table[0]; j < entry_count;
2924 if ((val & mask) == (flash->strapping & mask)) {
2925 bp->flash_info = flash;
2927 /* Request access to the flash interface. */
2928 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2931 /* Enable access to flash interface */
2932 bnx2_enable_nvram_access(bp);
2934 /* Reconfigure the flash interface */
2935 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2936 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2937 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2938 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2940 /* Disable access to flash interface */
2941 bnx2_disable_nvram_access(bp);
2942 bnx2_release_nvram_lock(bp);
2947 } /* if (val & 0x40000000) */
2949 if (j == entry_count) {
2950 bp->flash_info = NULL;
2951 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
2955 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2956 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2958 bp->flash_size = val;
2960 bp->flash_size = bp->flash_info->total_size;
2966 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2970 u32 cmd_flags, offset32, len32, extra;
2975 /* Request access to the flash interface. */
2976 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2979 /* Enable access to flash interface */
2980 bnx2_enable_nvram_access(bp);
2993 pre_len = 4 - (offset & 3);
2995 if (pre_len >= len32) {
2997 cmd_flags = BNX2_NVM_COMMAND_FIRST |
2998 BNX2_NVM_COMMAND_LAST;
3001 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3004 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3009 memcpy(ret_buf, buf + (offset & 3), pre_len);
3016 extra = 4 - (len32 & 3);
3017 len32 = (len32 + 4) & ~3;
3024 cmd_flags = BNX2_NVM_COMMAND_LAST;
3026 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3027 BNX2_NVM_COMMAND_LAST;
3029 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3031 memcpy(ret_buf, buf, 4 - extra);
3033 else if (len32 > 0) {
3036 /* Read the first word. */
3040 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3042 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3044 /* Advance to the next dword. */
3049 while (len32 > 4 && rc == 0) {
3050 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3052 /* Advance to the next dword. */
3061 cmd_flags = BNX2_NVM_COMMAND_LAST;
3062 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3064 memcpy(ret_buf, buf, 4 - extra);
3067 /* Disable access to flash interface */
3068 bnx2_disable_nvram_access(bp);
3070 bnx2_release_nvram_lock(bp);
3076 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3079 u32 written, offset32, len32;
3080 u8 *buf, start[4], end[4], *flash_buffer = NULL;
3082 int align_start, align_end;
3087 align_start = align_end = 0;
3089 if ((align_start = (offset32 & 3))) {
3091 len32 += align_start;
3092 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3097 if ((len32 > 4) || !align_start) {
3098 align_end = 4 - (len32 & 3);
3100 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
3107 if (align_start || align_end) {
3108 buf = kmalloc(len32, GFP_KERNEL);
3112 memcpy(buf, start, 4);
3115 memcpy(buf + len32 - 4, end, 4);
3117 memcpy(buf + align_start, data_buf, buf_size);
3120 if (bp->flash_info->buffered == 0) {
3121 flash_buffer = kmalloc(264, GFP_KERNEL);
3122 if (flash_buffer == NULL) {
3124 goto nvram_write_end;
3129 while ((written < len32) && (rc == 0)) {
3130 u32 page_start, page_end, data_start, data_end;
3131 u32 addr, cmd_flags;
3134 /* Find the page_start addr */
3135 page_start = offset32 + written;
3136 page_start -= (page_start % bp->flash_info->page_size);
3137 /* Find the page_end addr */
3138 page_end = page_start + bp->flash_info->page_size;
3139 /* Find the data_start addr */
3140 data_start = (written == 0) ? offset32 : page_start;
3141 /* Find the data_end addr */
3142 data_end = (page_end > offset32 + len32) ?
3143 (offset32 + len32) : page_end;
3145 /* Request access to the flash interface. */
3146 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3147 goto nvram_write_end;
3149 /* Enable access to flash interface */
3150 bnx2_enable_nvram_access(bp);
3152 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3153 if (bp->flash_info->buffered == 0) {
3156 /* Read the whole page into the buffer
3157 * (non-buffer flash only) */
3158 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3159 if (j == (bp->flash_info->page_size - 4)) {
3160 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3162 rc = bnx2_nvram_read_dword(bp,
3168 goto nvram_write_end;
3174 /* Enable writes to flash interface (unlock write-protect) */
3175 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3176 goto nvram_write_end;
3178 /* Erase the page */
3179 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3180 goto nvram_write_end;
3182 /* Re-enable the write again for the actual write */
3183 bnx2_enable_nvram_write(bp);
3185 /* Loop to write back the buffer data from page_start to
3188 if (bp->flash_info->buffered == 0) {
3189 for (addr = page_start; addr < data_start;
3190 addr += 4, i += 4) {
3192 rc = bnx2_nvram_write_dword(bp, addr,
3193 &flash_buffer[i], cmd_flags);
3196 goto nvram_write_end;
3202 /* Loop to write the new data from data_start to data_end */
3203 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3204 if ((addr == page_end - 4) ||
3205 ((bp->flash_info->buffered) &&
3206 (addr == data_end - 4))) {
3208 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3210 rc = bnx2_nvram_write_dword(bp, addr, buf,
3214 goto nvram_write_end;
3220 /* Loop to write back the buffer data from data_end
3222 if (bp->flash_info->buffered == 0) {
3223 for (addr = data_end; addr < page_end;
3224 addr += 4, i += 4) {
3226 if (addr == page_end-4) {
3227 cmd_flags = BNX2_NVM_COMMAND_LAST;
3229 rc = bnx2_nvram_write_dword(bp, addr,
3230 &flash_buffer[i], cmd_flags);
3233 goto nvram_write_end;
3239 /* Disable writes to flash interface (lock write-protect) */
3240 bnx2_disable_nvram_write(bp);
3242 /* Disable access to flash interface */
3243 bnx2_disable_nvram_access(bp);
3244 bnx2_release_nvram_lock(bp);
3246 /* Increment written */
3247 written += data_end - data_start;
3251 if (bp->flash_info->buffered == 0)
3252 kfree(flash_buffer);
3254 if (align_start || align_end)
3260 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3265 /* Wait for the current PCI transaction to complete before
3266 * issuing a reset. */
3267 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3268 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3269 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3270 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3271 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3272 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3275 /* Wait for the firmware to tell us it is ok to issue a reset. */
3276 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3278 /* Deposit a driver reset signature so the firmware knows that
3279 * this is a soft reset. */
3280 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3281 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3283 /* Do a dummy read to force the chip to complete all current transaction
3284 * before we issue a reset. */
3285 val = REG_RD(bp, BNX2_MISC_ID);
3287 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3288 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3289 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3292 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3294 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3295 (CHIP_ID(bp) == CHIP_ID_5706_A1))
3298 /* Reset takes approximate 30 usec */
3299 for (i = 0; i < 10; i++) {
3300 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3301 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3302 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3308 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3309 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3310 printk(KERN_ERR PFX "Chip reset did not complete\n");
3314 /* Make sure byte swapping is properly configured. */
3315 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3316 if (val != 0x01020304) {
3317 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3321 /* Wait for the firmware to finish its initialization. */
3322 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3326 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3327 /* Adjust the voltage regular to two steps lower. The default
3328 * of this register is 0x0000000e. */
3329 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3331 /* Remove bad rbuf memory from the free pool. */
3332 rc = bnx2_alloc_bad_rbuf(bp);
3339 bnx2_init_chip(struct bnx2 *bp)
3344 /* Make sure the interrupt is not active. */
3345 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3347 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3348 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3350 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3352 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3353 DMA_READ_CHANS << 12 |
3354 DMA_WRITE_CHANS << 16;
3356 val |= (0x2 << 20) | (1 << 11);
3358 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3361 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3362 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3363 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3365 REG_WR(bp, BNX2_DMA_CONFIG, val);
3367 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3368 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3369 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3370 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3373 if (bp->flags & PCIX_FLAG) {
3376 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3378 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3379 val16 & ~PCI_X_CMD_ERO);
3382 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3383 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3384 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3385 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3387 /* Initialize context mapping and zero out the quick contexts. The
3388 * context block must have already been enabled. */
3389 bnx2_init_context(bp);
3391 if ((rc = bnx2_init_cpus(bp)) != 0)
3394 bnx2_init_nvram(bp);
3396 bnx2_set_mac_addr(bp);
3398 val = REG_RD(bp, BNX2_MQ_CONFIG);
3399 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3400 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3401 REG_WR(bp, BNX2_MQ_CONFIG, val);
3403 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3404 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3405 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3407 val = (BCM_PAGE_BITS - 8) << 24;
3408 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3410 /* Configure page size. */
3411 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3412 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3413 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3414 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3416 val = bp->mac_addr[0] +
3417 (bp->mac_addr[1] << 8) +
3418 (bp->mac_addr[2] << 16) +
3420 (bp->mac_addr[4] << 8) +
3421 (bp->mac_addr[5] << 16);
3422 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3424 /* Program the MTU. Also include 4 bytes for CRC32. */
3425 val = bp->dev->mtu + ETH_HLEN + 4;
3426 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3427 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3428 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3430 bp->last_status_idx = 0;
3431 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3433 /* Set up how to generate a link change interrupt. */
3434 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3436 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3437 (u64) bp->status_blk_mapping & 0xffffffff);
3438 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3440 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3441 (u64) bp->stats_blk_mapping & 0xffffffff);
3442 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3443 (u64) bp->stats_blk_mapping >> 32);
3445 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3446 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3448 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3449 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3451 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3452 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3454 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3456 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3458 REG_WR(bp, BNX2_HC_COM_TICKS,
3459 (bp->com_ticks_int << 16) | bp->com_ticks);
3461 REG_WR(bp, BNX2_HC_CMD_TICKS,
3462 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3464 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3465 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3467 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3468 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3470 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3471 BNX2_HC_CONFIG_TX_TMR_MODE |
3472 BNX2_HC_CONFIG_COLLECT_STATS);
3475 /* Clear internal stats counters. */
3476 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3478 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3480 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3481 BNX2_PORT_FEATURE_ASF_ENABLED)
3482 bp->flags |= ASF_ENABLE_FLAG;
3484 /* Initialize the receive filter. */
3485 bnx2_set_rx_mode(bp->dev);
3487 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3490 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3491 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3495 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3502 bnx2_init_tx_ring(struct bnx2 *bp)
3507 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3509 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3510 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3515 bp->tx_prod_bseq = 0;
3517 val = BNX2_L2CTX_TYPE_TYPE_L2;
3518 val |= BNX2_L2CTX_TYPE_SIZE_L2;
3519 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TYPE, val);
3521 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2;
3523 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_CMD_TYPE, val);
3525 val = (u64) bp->tx_desc_mapping >> 32;
3526 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_HI, val);
3528 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3529 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_LO, val);
3533 bnx2_init_rx_ring(struct bnx2 *bp)
3537 u16 prod, ring_prod;
3540 /* 8 for CRC and VLAN */
3541 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3542 /* 8 for alignment */
3543 bp->rx_buf_size = bp->rx_buf_use_size + 8;
3545 ring_prod = prod = bp->rx_prod = 0;
3548 bp->rx_prod_bseq = 0;
3550 for (i = 0; i < bp->rx_max_ring; i++) {
3553 rxbd = &bp->rx_desc_ring[i][0];
3554 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3555 rxbd->rx_bd_len = bp->rx_buf_use_size;
3556 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3558 if (i == (bp->rx_max_ring - 1))
3562 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3563 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3567 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3568 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3570 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3572 val = (u64) bp->rx_desc_mapping[0] >> 32;
3573 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3575 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3576 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3578 for (i = 0; i < bp->rx_ring_size; i++) {
3579 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3582 prod = NEXT_RX_BD(prod);
3583 ring_prod = RX_RING_IDX(prod);
3587 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3589 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3593 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3597 bp->rx_ring_size = size;
3599 while (size > MAX_RX_DESC_CNT) {
3600 size -= MAX_RX_DESC_CNT;
3603 /* round to next power of 2 */
3605 while ((max & num_rings) == 0)
3608 if (num_rings != max)
3611 bp->rx_max_ring = max;
3612 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3616 bnx2_free_tx_skbs(struct bnx2 *bp)
3620 if (bp->tx_buf_ring == NULL)
3623 for (i = 0; i < TX_DESC_CNT; ) {
3624 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3625 struct sk_buff *skb = tx_buf->skb;
3633 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3634 skb_headlen(skb), PCI_DMA_TODEVICE);
3638 last = skb_shinfo(skb)->nr_frags;
3639 for (j = 0; j < last; j++) {
3640 tx_buf = &bp->tx_buf_ring[i + j + 1];
3641 pci_unmap_page(bp->pdev,
3642 pci_unmap_addr(tx_buf, mapping),
3643 skb_shinfo(skb)->frags[j].size,
3646 dev_kfree_skb_any(skb);
3653 bnx2_free_rx_skbs(struct bnx2 *bp)
3657 if (bp->rx_buf_ring == NULL)
3660 for (i = 0; i < bp->rx_max_ring_idx; i++) {
3661 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3662 struct sk_buff *skb = rx_buf->skb;
3667 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3668 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3672 dev_kfree_skb_any(skb);
3677 bnx2_free_skbs(struct bnx2 *bp)
3679 bnx2_free_tx_skbs(bp);
3680 bnx2_free_rx_skbs(bp);
3684 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3688 rc = bnx2_reset_chip(bp, reset_code);
3693 if ((rc = bnx2_init_chip(bp)) != 0)
3696 bnx2_init_tx_ring(bp);
3697 bnx2_init_rx_ring(bp);
3702 bnx2_init_nic(struct bnx2 *bp)
3706 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3715 bnx2_test_registers(struct bnx2 *bp)
3719 static const struct {
3725 { 0x006c, 0, 0x00000000, 0x0000003f },
3726 { 0x0090, 0, 0xffffffff, 0x00000000 },
3727 { 0x0094, 0, 0x00000000, 0x00000000 },
3729 { 0x0404, 0, 0x00003f00, 0x00000000 },
3730 { 0x0418, 0, 0x00000000, 0xffffffff },
3731 { 0x041c, 0, 0x00000000, 0xffffffff },
3732 { 0x0420, 0, 0x00000000, 0x80ffffff },
3733 { 0x0424, 0, 0x00000000, 0x00000000 },
3734 { 0x0428, 0, 0x00000000, 0x00000001 },
3735 { 0x0450, 0, 0x00000000, 0x0000ffff },
3736 { 0x0454, 0, 0x00000000, 0xffffffff },
3737 { 0x0458, 0, 0x00000000, 0xffffffff },
3739 { 0x0808, 0, 0x00000000, 0xffffffff },
3740 { 0x0854, 0, 0x00000000, 0xffffffff },
3741 { 0x0868, 0, 0x00000000, 0x77777777 },
3742 { 0x086c, 0, 0x00000000, 0x77777777 },
3743 { 0x0870, 0, 0x00000000, 0x77777777 },
3744 { 0x0874, 0, 0x00000000, 0x77777777 },
3746 { 0x0c00, 0, 0x00000000, 0x00000001 },
3747 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3748 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
3750 { 0x1000, 0, 0x00000000, 0x00000001 },
3751 { 0x1004, 0, 0x00000000, 0x000f0001 },
3753 { 0x1408, 0, 0x01c00800, 0x00000000 },
3754 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3755 { 0x14a8, 0, 0x00000000, 0x000001ff },
3756 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3757 { 0x14b0, 0, 0x00000002, 0x00000001 },
3758 { 0x14b8, 0, 0x00000000, 0x00000000 },
3759 { 0x14c0, 0, 0x00000000, 0x00000009 },
3760 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3761 { 0x14cc, 0, 0x00000000, 0x00000001 },
3762 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3764 { 0x1800, 0, 0x00000000, 0x00000001 },
3765 { 0x1804, 0, 0x00000000, 0x00000003 },
3767 { 0x2800, 0, 0x00000000, 0x00000001 },
3768 { 0x2804, 0, 0x00000000, 0x00003f01 },
3769 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3770 { 0x2810, 0, 0xffff0000, 0x00000000 },
3771 { 0x2814, 0, 0xffff0000, 0x00000000 },
3772 { 0x2818, 0, 0xffff0000, 0x00000000 },
3773 { 0x281c, 0, 0xffff0000, 0x00000000 },
3774 { 0x2834, 0, 0xffffffff, 0x00000000 },
3775 { 0x2840, 0, 0x00000000, 0xffffffff },
3776 { 0x2844, 0, 0x00000000, 0xffffffff },
3777 { 0x2848, 0, 0xffffffff, 0x00000000 },
3778 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3780 { 0x2c00, 0, 0x00000000, 0x00000011 },
3781 { 0x2c04, 0, 0x00000000, 0x00030007 },
3783 { 0x3c00, 0, 0x00000000, 0x00000001 },
3784 { 0x3c04, 0, 0x00000000, 0x00070000 },
3785 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3786 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3787 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3788 { 0x3c14, 0, 0x00000000, 0xffffffff },
3789 { 0x3c18, 0, 0x00000000, 0xffffffff },
3790 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3791 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3793 { 0x5004, 0, 0x00000000, 0x0000007f },
3794 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3795 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3797 { 0x5c00, 0, 0x00000000, 0x00000001 },
3798 { 0x5c04, 0, 0x00000000, 0x0003000f },
3799 { 0x5c08, 0, 0x00000003, 0x00000000 },
3800 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3801 { 0x5c10, 0, 0x00000000, 0xffffffff },
3802 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3803 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3804 { 0x5c88, 0, 0x00000000, 0x00077373 },
3805 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3807 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3808 { 0x680c, 0, 0xffffffff, 0x00000000 },
3809 { 0x6810, 0, 0xffffffff, 0x00000000 },
3810 { 0x6814, 0, 0xffffffff, 0x00000000 },
3811 { 0x6818, 0, 0xffffffff, 0x00000000 },
3812 { 0x681c, 0, 0xffffffff, 0x00000000 },
3813 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3814 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3815 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3816 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3817 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3818 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3819 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3820 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3821 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3822 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3823 { 0x684c, 0, 0xffffffff, 0x00000000 },
3824 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3825 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3826 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3827 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3828 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3829 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3831 { 0xffff, 0, 0x00000000, 0x00000000 },
3835 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3836 u32 offset, rw_mask, ro_mask, save_val, val;
3838 offset = (u32) reg_tbl[i].offset;
3839 rw_mask = reg_tbl[i].rw_mask;
3840 ro_mask = reg_tbl[i].ro_mask;
3842 save_val = readl(bp->regview + offset);
3844 writel(0, bp->regview + offset);
3846 val = readl(bp->regview + offset);
3847 if ((val & rw_mask) != 0) {
3851 if ((val & ro_mask) != (save_val & ro_mask)) {
3855 writel(0xffffffff, bp->regview + offset);
3857 val = readl(bp->regview + offset);
3858 if ((val & rw_mask) != rw_mask) {
3862 if ((val & ro_mask) != (save_val & ro_mask)) {
3866 writel(save_val, bp->regview + offset);
3870 writel(save_val, bp->regview + offset);
3878 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3880 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
3881 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3884 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3887 for (offset = 0; offset < size; offset += 4) {
3889 REG_WR_IND(bp, start + offset, test_pattern[i]);
3891 if (REG_RD_IND(bp, start + offset) !=
3901 bnx2_test_memory(struct bnx2 *bp)
3905 static const struct {
3909 { 0x60000, 0x4000 },
3910 { 0xa0000, 0x3000 },
3911 { 0xe0000, 0x4000 },
3912 { 0x120000, 0x4000 },
3913 { 0x1a0000, 0x4000 },
3914 { 0x160000, 0x4000 },
3918 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3919 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3920 mem_tbl[i].len)) != 0) {
3928 #define BNX2_MAC_LOOPBACK 0
3929 #define BNX2_PHY_LOOPBACK 1
3932 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
3934 unsigned int pkt_size, num_pkts, i;
3935 struct sk_buff *skb, *rx_skb;
3936 unsigned char *packet;
3937 u16 rx_start_idx, rx_idx;
3940 struct sw_bd *rx_buf;
3941 struct l2_fhdr *rx_hdr;
3944 if (loopback_mode == BNX2_MAC_LOOPBACK) {
3945 bp->loopback = MAC_LOOPBACK;
3946 bnx2_set_mac_loopback(bp);
3948 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
3950 bnx2_set_phy_loopback(bp);
3956 skb = dev_alloc_skb(pkt_size);
3959 packet = skb_put(skb, pkt_size);
3960 memcpy(packet, bp->mac_addr, 6);
3961 memset(packet + 6, 0x0, 8);
3962 for (i = 14; i < pkt_size; i++)
3963 packet[i] = (unsigned char) (i & 0xff);
3965 map = pci_map_single(bp->pdev, skb->data, pkt_size,
3968 REG_WR(bp, BNX2_HC_COMMAND,
3969 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3971 REG_RD(bp, BNX2_HC_COMMAND);
3974 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
3978 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
3980 txbd->tx_bd_haddr_hi = (u64) map >> 32;
3981 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
3982 txbd->tx_bd_mss_nbytes = pkt_size;
3983 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
3986 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
3987 bp->tx_prod_bseq += pkt_size;
3989 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, bp->tx_prod);
3990 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
3994 REG_WR(bp, BNX2_HC_COMMAND,
3995 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3997 REG_RD(bp, BNX2_HC_COMMAND);
4001 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4002 dev_kfree_skb_irq(skb);
4004 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4005 goto loopback_test_done;
4008 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4009 if (rx_idx != rx_start_idx + num_pkts) {
4010 goto loopback_test_done;
4013 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4014 rx_skb = rx_buf->skb;
4016 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4017 skb_reserve(rx_skb, bp->rx_offset);
4019 pci_dma_sync_single_for_cpu(bp->pdev,
4020 pci_unmap_addr(rx_buf, mapping),
4021 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4023 if (rx_hdr->l2_fhdr_status &
4024 (L2_FHDR_ERRORS_BAD_CRC |
4025 L2_FHDR_ERRORS_PHY_DECODE |
4026 L2_FHDR_ERRORS_ALIGNMENT |
4027 L2_FHDR_ERRORS_TOO_SHORT |
4028 L2_FHDR_ERRORS_GIANT_FRAME)) {
4030 goto loopback_test_done;
4033 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4034 goto loopback_test_done;
4037 for (i = 14; i < pkt_size; i++) {
4038 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4039 goto loopback_test_done;
4050 #define BNX2_MAC_LOOPBACK_FAILED 1
4051 #define BNX2_PHY_LOOPBACK_FAILED 2
4052 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4053 BNX2_PHY_LOOPBACK_FAILED)
4056 bnx2_test_loopback(struct bnx2 *bp)
4060 if (!netif_running(bp->dev))
4061 return BNX2_LOOPBACK_FAILED;
4063 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4064 spin_lock_bh(&bp->phy_lock);
4066 spin_unlock_bh(&bp->phy_lock);
4067 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4068 rc |= BNX2_MAC_LOOPBACK_FAILED;
4069 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4070 rc |= BNX2_PHY_LOOPBACK_FAILED;
4074 #define NVRAM_SIZE 0x200
4075 #define CRC32_RESIDUAL 0xdebb20e3
4078 bnx2_test_nvram(struct bnx2 *bp)
4080 u32 buf[NVRAM_SIZE / 4];
4081 u8 *data = (u8 *) buf;
4085 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4086 goto test_nvram_done;
4088 magic = be32_to_cpu(buf[0]);
4089 if (magic != 0x669955aa) {
4091 goto test_nvram_done;
4094 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4095 goto test_nvram_done;
4097 csum = ether_crc_le(0x100, data);
4098 if (csum != CRC32_RESIDUAL) {
4100 goto test_nvram_done;
4103 csum = ether_crc_le(0x100, data + 0x100);
4104 if (csum != CRC32_RESIDUAL) {
4113 bnx2_test_link(struct bnx2 *bp)
4117 spin_lock_bh(&bp->phy_lock);
4118 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4119 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4120 spin_unlock_bh(&bp->phy_lock);
4122 if (bmsr & BMSR_LSTATUS) {
4129 bnx2_test_intr(struct bnx2 *bp)
4134 if (!netif_running(bp->dev))
4137 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4139 /* This register is not touched during run-time. */
4140 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4141 REG_RD(bp, BNX2_HC_COMMAND);
4143 for (i = 0; i < 10; i++) {
4144 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4150 msleep_interruptible(10);
4159 bnx2_timer(unsigned long data)
4161 struct bnx2 *bp = (struct bnx2 *) data;
4164 if (!netif_running(bp->dev))
4167 if (atomic_read(&bp->intr_sem) != 0)
4168 goto bnx2_restart_timer;
4170 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4171 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4173 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4175 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
4176 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
4178 spin_lock(&bp->phy_lock);
4179 if (bp->serdes_an_pending) {
4180 bp->serdes_an_pending--;
4182 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4185 bp->current_interval = bp->timer_interval;
4187 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4189 if (bmcr & BMCR_ANENABLE) {
4192 bnx2_write_phy(bp, 0x1c, 0x7c00);
4193 bnx2_read_phy(bp, 0x1c, &phy1);
4195 bnx2_write_phy(bp, 0x17, 0x0f01);
4196 bnx2_read_phy(bp, 0x15, &phy2);
4197 bnx2_write_phy(bp, 0x17, 0x0f01);
4198 bnx2_read_phy(bp, 0x15, &phy2);
4200 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4201 !(phy2 & 0x20)) { /* no CONFIG */
4203 bmcr &= ~BMCR_ANENABLE;
4204 bmcr |= BMCR_SPEED1000 |
4206 bnx2_write_phy(bp, MII_BMCR, bmcr);
4208 PHY_PARALLEL_DETECT_FLAG;
4212 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4213 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4216 bnx2_write_phy(bp, 0x17, 0x0f01);
4217 bnx2_read_phy(bp, 0x15, &phy2);
4221 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4222 bmcr |= BMCR_ANENABLE;
4223 bnx2_write_phy(bp, MII_BMCR, bmcr);
4225 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4230 bp->current_interval = bp->timer_interval;
4232 spin_unlock(&bp->phy_lock);
4236 mod_timer(&bp->timer, jiffies + bp->current_interval);
4239 /* Called with rtnl_lock */
4241 bnx2_open(struct net_device *dev)
4243 struct bnx2 *bp = netdev_priv(dev);
4246 bnx2_set_power_state(bp, PCI_D0);
4247 bnx2_disable_int(bp);
4249 rc = bnx2_alloc_mem(bp);
4253 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4254 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4257 if (pci_enable_msi(bp->pdev) == 0) {
4258 bp->flags |= USING_MSI_FLAG;
4259 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4263 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4264 SA_SHIRQ, dev->name, dev);
4268 rc = request_irq(bp->pdev->irq, bnx2_interrupt, SA_SHIRQ,
4276 rc = bnx2_init_nic(bp);
4279 free_irq(bp->pdev->irq, dev);
4280 if (bp->flags & USING_MSI_FLAG) {
4281 pci_disable_msi(bp->pdev);
4282 bp->flags &= ~USING_MSI_FLAG;
4289 mod_timer(&bp->timer, jiffies + bp->current_interval);
4291 atomic_set(&bp->intr_sem, 0);
4293 bnx2_enable_int(bp);
4295 if (bp->flags & USING_MSI_FLAG) {
4296 /* Test MSI to make sure it is working
4297 * If MSI test fails, go back to INTx mode
4299 if (bnx2_test_intr(bp) != 0) {
4300 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4301 " using MSI, switching to INTx mode. Please"
4302 " report this failure to the PCI maintainer"
4303 " and include system chipset information.\n",
4306 bnx2_disable_int(bp);
4307 free_irq(bp->pdev->irq, dev);
4308 pci_disable_msi(bp->pdev);
4309 bp->flags &= ~USING_MSI_FLAG;
4311 rc = bnx2_init_nic(bp);
4314 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4315 SA_SHIRQ, dev->name, dev);
4320 del_timer_sync(&bp->timer);
4323 bnx2_enable_int(bp);
4326 if (bp->flags & USING_MSI_FLAG) {
4327 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4330 netif_start_queue(dev);
4336 bnx2_reset_task(void *data)
4338 struct bnx2 *bp = data;
4340 if (!netif_running(bp->dev))
4343 bp->in_reset_task = 1;
4344 bnx2_netif_stop(bp);
4348 atomic_set(&bp->intr_sem, 1);
4349 bnx2_netif_start(bp);
4350 bp->in_reset_task = 0;
4354 bnx2_tx_timeout(struct net_device *dev)
4356 struct bnx2 *bp = netdev_priv(dev);
4358 /* This allows the netif to be shutdown gracefully before resetting */
4359 schedule_work(&bp->reset_task);
4363 /* Called with rtnl_lock */
4365 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4367 struct bnx2 *bp = netdev_priv(dev);
4369 bnx2_netif_stop(bp);
4372 bnx2_set_rx_mode(dev);
4374 bnx2_netif_start(bp);
4377 /* Called with rtnl_lock */
4379 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4381 struct bnx2 *bp = netdev_priv(dev);
4383 bnx2_netif_stop(bp);
4386 bp->vlgrp->vlan_devices[vid] = NULL;
4387 bnx2_set_rx_mode(dev);
4389 bnx2_netif_start(bp);
4393 /* Called with netif_tx_lock.
4394 * hard_start_xmit is pseudo-lockless - a lock is only required when
4395 * the tx queue is full. This way, we get the benefit of lockless
4396 * operations most of the time without the complexities to handle
4397 * netif_stop_queue/wake_queue race conditions.
4400 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4402 struct bnx2 *bp = netdev_priv(dev);
4405 struct sw_bd *tx_buf;
4406 u32 len, vlan_tag_flags, last_frag, mss;
4407 u16 prod, ring_prod;
4410 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4411 netif_stop_queue(dev);
4412 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4415 return NETDEV_TX_BUSY;
4417 len = skb_headlen(skb);
4419 ring_prod = TX_RING_IDX(prod);
4422 if (skb->ip_summed == CHECKSUM_HW) {
4423 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4426 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4428 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4431 if ((mss = skb_shinfo(skb)->tso_size) &&
4432 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4433 u32 tcp_opt_len, ip_tcp_len;
4435 if (skb_header_cloned(skb) &&
4436 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4438 return NETDEV_TX_OK;
4441 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4442 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4445 if (skb->h.th->doff > 5) {
4446 tcp_opt_len = (skb->h.th->doff - 5) << 2;
4448 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4450 skb->nh.iph->check = 0;
4451 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4453 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4457 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4458 vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4459 (tcp_opt_len >> 2)) << 8;
4468 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4470 tx_buf = &bp->tx_buf_ring[ring_prod];
4472 pci_unmap_addr_set(tx_buf, mapping, mapping);
4474 txbd = &bp->tx_desc_ring[ring_prod];
4476 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4477 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4478 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4479 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4481 last_frag = skb_shinfo(skb)->nr_frags;
4483 for (i = 0; i < last_frag; i++) {
4484 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4486 prod = NEXT_TX_BD(prod);
4487 ring_prod = TX_RING_IDX(prod);
4488 txbd = &bp->tx_desc_ring[ring_prod];
4491 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4492 len, PCI_DMA_TODEVICE);
4493 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4496 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4497 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4498 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4499 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4502 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4504 prod = NEXT_TX_BD(prod);
4505 bp->tx_prod_bseq += skb->len;
4507 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
4508 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4513 dev->trans_start = jiffies;
4515 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4516 spin_lock(&bp->tx_lock);
4517 netif_stop_queue(dev);
4519 if (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)
4520 netif_wake_queue(dev);
4521 spin_unlock(&bp->tx_lock);
4524 return NETDEV_TX_OK;
4527 /* Called with rtnl_lock */
4529 bnx2_close(struct net_device *dev)
4531 struct bnx2 *bp = netdev_priv(dev);
4534 /* Calling flush_scheduled_work() may deadlock because
4535 * linkwatch_event() may be on the workqueue and it will try to get
4536 * the rtnl_lock which we are holding.
4538 while (bp->in_reset_task)
4541 bnx2_netif_stop(bp);
4542 del_timer_sync(&bp->timer);
4543 if (bp->flags & NO_WOL_FLAG)
4544 reset_code = BNX2_DRV_MSG_CODE_UNLOAD;
4546 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4548 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4549 bnx2_reset_chip(bp, reset_code);
4550 free_irq(bp->pdev->irq, dev);
4551 if (bp->flags & USING_MSI_FLAG) {
4552 pci_disable_msi(bp->pdev);
4553 bp->flags &= ~USING_MSI_FLAG;
4558 netif_carrier_off(bp->dev);
4559 bnx2_set_power_state(bp, PCI_D3hot);
4563 #define GET_NET_STATS64(ctr) \
4564 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4565 (unsigned long) (ctr##_lo)
4567 #define GET_NET_STATS32(ctr) \
4570 #if (BITS_PER_LONG == 64)
4571 #define GET_NET_STATS GET_NET_STATS64
4573 #define GET_NET_STATS GET_NET_STATS32
4576 static struct net_device_stats *
4577 bnx2_get_stats(struct net_device *dev)
4579 struct bnx2 *bp = netdev_priv(dev);
4580 struct statistics_block *stats_blk = bp->stats_blk;
4581 struct net_device_stats *net_stats = &bp->net_stats;
4583 if (bp->stats_blk == NULL) {
4586 net_stats->rx_packets =
4587 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4588 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4589 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4591 net_stats->tx_packets =
4592 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4593 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4594 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4596 net_stats->rx_bytes =
4597 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4599 net_stats->tx_bytes =
4600 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4602 net_stats->multicast =
4603 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4605 net_stats->collisions =
4606 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4608 net_stats->rx_length_errors =
4609 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4610 stats_blk->stat_EtherStatsOverrsizePkts);
4612 net_stats->rx_over_errors =
4613 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4615 net_stats->rx_frame_errors =
4616 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4618 net_stats->rx_crc_errors =
4619 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4621 net_stats->rx_errors = net_stats->rx_length_errors +
4622 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4623 net_stats->rx_crc_errors;
4625 net_stats->tx_aborted_errors =
4626 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4627 stats_blk->stat_Dot3StatsLateCollisions);
4629 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4630 (CHIP_ID(bp) == CHIP_ID_5708_A0))
4631 net_stats->tx_carrier_errors = 0;
4633 net_stats->tx_carrier_errors =
4635 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4638 net_stats->tx_errors =
4640 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4642 net_stats->tx_aborted_errors +
4643 net_stats->tx_carrier_errors;
4645 net_stats->rx_missed_errors =
4646 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4647 stats_blk->stat_FwRxDrop);
4652 /* All ethtool functions called with rtnl_lock */
4655 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4657 struct bnx2 *bp = netdev_priv(dev);
4659 cmd->supported = SUPPORTED_Autoneg;
4660 if (bp->phy_flags & PHY_SERDES_FLAG) {
4661 cmd->supported |= SUPPORTED_1000baseT_Full |
4664 cmd->port = PORT_FIBRE;
4667 cmd->supported |= SUPPORTED_10baseT_Half |
4668 SUPPORTED_10baseT_Full |
4669 SUPPORTED_100baseT_Half |
4670 SUPPORTED_100baseT_Full |
4671 SUPPORTED_1000baseT_Full |
4674 cmd->port = PORT_TP;
4677 cmd->advertising = bp->advertising;
4679 if (bp->autoneg & AUTONEG_SPEED) {
4680 cmd->autoneg = AUTONEG_ENABLE;
4683 cmd->autoneg = AUTONEG_DISABLE;
4686 if (netif_carrier_ok(dev)) {
4687 cmd->speed = bp->line_speed;
4688 cmd->duplex = bp->duplex;
4695 cmd->transceiver = XCVR_INTERNAL;
4696 cmd->phy_address = bp->phy_addr;
4702 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4704 struct bnx2 *bp = netdev_priv(dev);
4705 u8 autoneg = bp->autoneg;
4706 u8 req_duplex = bp->req_duplex;
4707 u16 req_line_speed = bp->req_line_speed;
4708 u32 advertising = bp->advertising;
4710 if (cmd->autoneg == AUTONEG_ENABLE) {
4711 autoneg |= AUTONEG_SPEED;
4713 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
4715 /* allow advertising 1 speed */
4716 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4717 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4718 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4719 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4721 if (bp->phy_flags & PHY_SERDES_FLAG)
4724 advertising = cmd->advertising;
4727 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4728 advertising = cmd->advertising;
4730 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4734 if (bp->phy_flags & PHY_SERDES_FLAG) {
4735 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4738 advertising = ETHTOOL_ALL_COPPER_SPEED;
4741 advertising |= ADVERTISED_Autoneg;
4744 if (bp->phy_flags & PHY_SERDES_FLAG) {
4745 if ((cmd->speed != SPEED_1000) ||
4746 (cmd->duplex != DUPLEX_FULL)) {
4750 else if (cmd->speed == SPEED_1000) {
4753 autoneg &= ~AUTONEG_SPEED;
4754 req_line_speed = cmd->speed;
4755 req_duplex = cmd->duplex;
4759 bp->autoneg = autoneg;
4760 bp->advertising = advertising;
4761 bp->req_line_speed = req_line_speed;
4762 bp->req_duplex = req_duplex;
4764 spin_lock_bh(&bp->phy_lock);
4768 spin_unlock_bh(&bp->phy_lock);
4774 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4776 struct bnx2 *bp = netdev_priv(dev);
4778 strcpy(info->driver, DRV_MODULE_NAME);
4779 strcpy(info->version, DRV_MODULE_VERSION);
4780 strcpy(info->bus_info, pci_name(bp->pdev));
4781 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4782 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4783 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
4784 info->fw_version[1] = info->fw_version[3] = '.';
4785 info->fw_version[5] = 0;
4788 #define BNX2_REGDUMP_LEN (32 * 1024)
4791 bnx2_get_regs_len(struct net_device *dev)
4793 return BNX2_REGDUMP_LEN;
4797 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4799 u32 *p = _p, i, offset;
4801 struct bnx2 *bp = netdev_priv(dev);
4802 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4803 0x0800, 0x0880, 0x0c00, 0x0c10,
4804 0x0c30, 0x0d08, 0x1000, 0x101c,
4805 0x1040, 0x1048, 0x1080, 0x10a4,
4806 0x1400, 0x1490, 0x1498, 0x14f0,
4807 0x1500, 0x155c, 0x1580, 0x15dc,
4808 0x1600, 0x1658, 0x1680, 0x16d8,
4809 0x1800, 0x1820, 0x1840, 0x1854,
4810 0x1880, 0x1894, 0x1900, 0x1984,
4811 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4812 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4813 0x2000, 0x2030, 0x23c0, 0x2400,
4814 0x2800, 0x2820, 0x2830, 0x2850,
4815 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4816 0x3c00, 0x3c94, 0x4000, 0x4010,
4817 0x4080, 0x4090, 0x43c0, 0x4458,
4818 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4819 0x4fc0, 0x5010, 0x53c0, 0x5444,
4820 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4821 0x5fc0, 0x6000, 0x6400, 0x6428,
4822 0x6800, 0x6848, 0x684c, 0x6860,
4823 0x6888, 0x6910, 0x8000 };
4827 memset(p, 0, BNX2_REGDUMP_LEN);
4829 if (!netif_running(bp->dev))
4833 offset = reg_boundaries[0];
4835 while (offset < BNX2_REGDUMP_LEN) {
4836 *p++ = REG_RD(bp, offset);
4838 if (offset == reg_boundaries[i + 1]) {
4839 offset = reg_boundaries[i + 2];
4840 p = (u32 *) (orig_p + offset);
4847 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4849 struct bnx2 *bp = netdev_priv(dev);
4851 if (bp->flags & NO_WOL_FLAG) {
4856 wol->supported = WAKE_MAGIC;
4858 wol->wolopts = WAKE_MAGIC;
4862 memset(&wol->sopass, 0, sizeof(wol->sopass));
4866 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4868 struct bnx2 *bp = netdev_priv(dev);
4870 if (wol->wolopts & ~WAKE_MAGIC)
4873 if (wol->wolopts & WAKE_MAGIC) {
4874 if (bp->flags & NO_WOL_FLAG)
4886 bnx2_nway_reset(struct net_device *dev)
4888 struct bnx2 *bp = netdev_priv(dev);
4891 if (!(bp->autoneg & AUTONEG_SPEED)) {
4895 spin_lock_bh(&bp->phy_lock);
4897 /* Force a link down visible on the other side */
4898 if (bp->phy_flags & PHY_SERDES_FLAG) {
4899 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
4900 spin_unlock_bh(&bp->phy_lock);
4904 spin_lock_bh(&bp->phy_lock);
4905 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
4906 bp->current_interval = SERDES_AN_TIMEOUT;
4907 bp->serdes_an_pending = 1;
4908 mod_timer(&bp->timer, jiffies + bp->current_interval);
4912 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4913 bmcr &= ~BMCR_LOOPBACK;
4914 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4916 spin_unlock_bh(&bp->phy_lock);
4922 bnx2_get_eeprom_len(struct net_device *dev)
4924 struct bnx2 *bp = netdev_priv(dev);
4926 if (bp->flash_info == NULL)
4929 return (int) bp->flash_size;
4933 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4936 struct bnx2 *bp = netdev_priv(dev);
4939 /* parameters already validated in ethtool_get_eeprom */
4941 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
4947 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4950 struct bnx2 *bp = netdev_priv(dev);
4953 /* parameters already validated in ethtool_set_eeprom */
4955 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
4961 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4963 struct bnx2 *bp = netdev_priv(dev);
4965 memset(coal, 0, sizeof(struct ethtool_coalesce));
4967 coal->rx_coalesce_usecs = bp->rx_ticks;
4968 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
4969 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
4970 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
4972 coal->tx_coalesce_usecs = bp->tx_ticks;
4973 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
4974 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
4975 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
4977 coal->stats_block_coalesce_usecs = bp->stats_ticks;
4983 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4985 struct bnx2 *bp = netdev_priv(dev);
4987 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
4988 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
4990 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
4991 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
4993 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
4994 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
4996 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
4997 if (bp->rx_quick_cons_trip_int > 0xff)
4998 bp->rx_quick_cons_trip_int = 0xff;
5000 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5001 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5003 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5004 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5006 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5007 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5009 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5010 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5013 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5014 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5015 bp->stats_ticks &= 0xffff00;
5017 if (netif_running(bp->dev)) {
5018 bnx2_netif_stop(bp);
5020 bnx2_netif_start(bp);
5027 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5029 struct bnx2 *bp = netdev_priv(dev);
5031 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5032 ering->rx_mini_max_pending = 0;
5033 ering->rx_jumbo_max_pending = 0;
5035 ering->rx_pending = bp->rx_ring_size;
5036 ering->rx_mini_pending = 0;
5037 ering->rx_jumbo_pending = 0;
5039 ering->tx_max_pending = MAX_TX_DESC_CNT;
5040 ering->tx_pending = bp->tx_ring_size;
5044 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5046 struct bnx2 *bp = netdev_priv(dev);
5048 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5049 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5050 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5054 if (netif_running(bp->dev)) {
5055 bnx2_netif_stop(bp);
5056 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5061 bnx2_set_rx_ring_size(bp, ering->rx_pending);
5062 bp->tx_ring_size = ering->tx_pending;
5064 if (netif_running(bp->dev)) {
5067 rc = bnx2_alloc_mem(bp);
5071 bnx2_netif_start(bp);
5078 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5080 struct bnx2 *bp = netdev_priv(dev);
5082 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5083 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5084 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5088 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5090 struct bnx2 *bp = netdev_priv(dev);
5092 bp->req_flow_ctrl = 0;
5093 if (epause->rx_pause)
5094 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5095 if (epause->tx_pause)
5096 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5098 if (epause->autoneg) {
5099 bp->autoneg |= AUTONEG_FLOW_CTRL;
5102 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5105 spin_lock_bh(&bp->phy_lock);
5109 spin_unlock_bh(&bp->phy_lock);
5115 bnx2_get_rx_csum(struct net_device *dev)
5117 struct bnx2 *bp = netdev_priv(dev);
5123 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5125 struct bnx2 *bp = netdev_priv(dev);
5131 #define BNX2_NUM_STATS 46
5134 char string[ETH_GSTRING_LEN];
5135 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5137 { "rx_error_bytes" },
5139 { "tx_error_bytes" },
5140 { "rx_ucast_packets" },
5141 { "rx_mcast_packets" },
5142 { "rx_bcast_packets" },
5143 { "tx_ucast_packets" },
5144 { "tx_mcast_packets" },
5145 { "tx_bcast_packets" },
5146 { "tx_mac_errors" },
5147 { "tx_carrier_errors" },
5148 { "rx_crc_errors" },
5149 { "rx_align_errors" },
5150 { "tx_single_collisions" },
5151 { "tx_multi_collisions" },
5153 { "tx_excess_collisions" },
5154 { "tx_late_collisions" },
5155 { "tx_total_collisions" },
5158 { "rx_undersize_packets" },
5159 { "rx_oversize_packets" },
5160 { "rx_64_byte_packets" },
5161 { "rx_65_to_127_byte_packets" },
5162 { "rx_128_to_255_byte_packets" },
5163 { "rx_256_to_511_byte_packets" },
5164 { "rx_512_to_1023_byte_packets" },
5165 { "rx_1024_to_1522_byte_packets" },
5166 { "rx_1523_to_9022_byte_packets" },
5167 { "tx_64_byte_packets" },
5168 { "tx_65_to_127_byte_packets" },
5169 { "tx_128_to_255_byte_packets" },
5170 { "tx_256_to_511_byte_packets" },
5171 { "tx_512_to_1023_byte_packets" },
5172 { "tx_1024_to_1522_byte_packets" },
5173 { "tx_1523_to_9022_byte_packets" },
5174 { "rx_xon_frames" },
5175 { "rx_xoff_frames" },
5176 { "tx_xon_frames" },
5177 { "tx_xoff_frames" },
5178 { "rx_mac_ctrl_frames" },
5179 { "rx_filtered_packets" },
5181 { "rx_fw_discards" },
5184 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5186 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5187 STATS_OFFSET32(stat_IfHCInOctets_hi),
5188 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5189 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5190 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5191 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5192 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5193 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5194 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5195 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5196 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5197 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5198 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5199 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5200 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5201 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5202 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5203 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5204 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5205 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5206 STATS_OFFSET32(stat_EtherStatsCollisions),
5207 STATS_OFFSET32(stat_EtherStatsFragments),
5208 STATS_OFFSET32(stat_EtherStatsJabbers),
5209 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5210 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5211 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5212 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5213 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5214 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5215 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5216 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5217 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5218 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5219 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5220 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5221 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5222 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5223 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5224 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5225 STATS_OFFSET32(stat_XonPauseFramesReceived),
5226 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5227 STATS_OFFSET32(stat_OutXonSent),
5228 STATS_OFFSET32(stat_OutXoffSent),
5229 STATS_OFFSET32(stat_MacControlFramesReceived),
5230 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5231 STATS_OFFSET32(stat_IfInMBUFDiscards),
5232 STATS_OFFSET32(stat_FwRxDrop),
5235 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5236 * skipped because of errata.
5238 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5239 8,0,8,8,8,8,8,8,8,8,
5240 4,0,4,4,4,4,4,4,4,4,
5241 4,4,4,4,4,4,4,4,4,4,
5242 4,4,4,4,4,4,4,4,4,4,
5246 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5247 8,0,8,8,8,8,8,8,8,8,
5248 4,4,4,4,4,4,4,4,4,4,
5249 4,4,4,4,4,4,4,4,4,4,
5250 4,4,4,4,4,4,4,4,4,4,
5254 #define BNX2_NUM_TESTS 6
5257 char string[ETH_GSTRING_LEN];
5258 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5259 { "register_test (offline)" },
5260 { "memory_test (offline)" },
5261 { "loopback_test (offline)" },
5262 { "nvram_test (online)" },
5263 { "interrupt_test (online)" },
5264 { "link_test (online)" },
5268 bnx2_self_test_count(struct net_device *dev)
5270 return BNX2_NUM_TESTS;
5274 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5276 struct bnx2 *bp = netdev_priv(dev);
5278 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5279 if (etest->flags & ETH_TEST_FL_OFFLINE) {
5280 bnx2_netif_stop(bp);
5281 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5284 if (bnx2_test_registers(bp) != 0) {
5286 etest->flags |= ETH_TEST_FL_FAILED;
5288 if (bnx2_test_memory(bp) != 0) {
5290 etest->flags |= ETH_TEST_FL_FAILED;
5292 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5293 etest->flags |= ETH_TEST_FL_FAILED;
5295 if (!netif_running(bp->dev)) {
5296 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5300 bnx2_netif_start(bp);
5303 /* wait for link up */
5304 msleep_interruptible(3000);
5305 if ((!bp->link_up) && !(bp->phy_flags & PHY_SERDES_FLAG))
5306 msleep_interruptible(4000);
5309 if (bnx2_test_nvram(bp) != 0) {
5311 etest->flags |= ETH_TEST_FL_FAILED;
5313 if (bnx2_test_intr(bp) != 0) {
5315 etest->flags |= ETH_TEST_FL_FAILED;
5318 if (bnx2_test_link(bp) != 0) {
5320 etest->flags |= ETH_TEST_FL_FAILED;
5326 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5328 switch (stringset) {
5330 memcpy(buf, bnx2_stats_str_arr,
5331 sizeof(bnx2_stats_str_arr));
5334 memcpy(buf, bnx2_tests_str_arr,
5335 sizeof(bnx2_tests_str_arr));
5341 bnx2_get_stats_count(struct net_device *dev)
5343 return BNX2_NUM_STATS;
5347 bnx2_get_ethtool_stats(struct net_device *dev,
5348 struct ethtool_stats *stats, u64 *buf)
5350 struct bnx2 *bp = netdev_priv(dev);
5352 u32 *hw_stats = (u32 *) bp->stats_blk;
5353 u8 *stats_len_arr = NULL;
5355 if (hw_stats == NULL) {
5356 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5360 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5361 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5362 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5363 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5364 stats_len_arr = bnx2_5706_stats_len_arr;
5366 stats_len_arr = bnx2_5708_stats_len_arr;
5368 for (i = 0; i < BNX2_NUM_STATS; i++) {
5369 if (stats_len_arr[i] == 0) {
5370 /* skip this counter */
5374 if (stats_len_arr[i] == 4) {
5375 /* 4-byte counter */
5377 *(hw_stats + bnx2_stats_offset_arr[i]);
5380 /* 8-byte counter */
5381 buf[i] = (((u64) *(hw_stats +
5382 bnx2_stats_offset_arr[i])) << 32) +
5383 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5388 bnx2_phys_id(struct net_device *dev, u32 data)
5390 struct bnx2 *bp = netdev_priv(dev);
5397 save = REG_RD(bp, BNX2_MISC_CFG);
5398 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5400 for (i = 0; i < (data * 2); i++) {
5402 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5405 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5406 BNX2_EMAC_LED_1000MB_OVERRIDE |
5407 BNX2_EMAC_LED_100MB_OVERRIDE |
5408 BNX2_EMAC_LED_10MB_OVERRIDE |
5409 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5410 BNX2_EMAC_LED_TRAFFIC);
5412 msleep_interruptible(500);
5413 if (signal_pending(current))
5416 REG_WR(bp, BNX2_EMAC_LED, 0);
5417 REG_WR(bp, BNX2_MISC_CFG, save);
5421 static struct ethtool_ops bnx2_ethtool_ops = {
5422 .get_settings = bnx2_get_settings,
5423 .set_settings = bnx2_set_settings,
5424 .get_drvinfo = bnx2_get_drvinfo,
5425 .get_regs_len = bnx2_get_regs_len,
5426 .get_regs = bnx2_get_regs,
5427 .get_wol = bnx2_get_wol,
5428 .set_wol = bnx2_set_wol,
5429 .nway_reset = bnx2_nway_reset,
5430 .get_link = ethtool_op_get_link,
5431 .get_eeprom_len = bnx2_get_eeprom_len,
5432 .get_eeprom = bnx2_get_eeprom,
5433 .set_eeprom = bnx2_set_eeprom,
5434 .get_coalesce = bnx2_get_coalesce,
5435 .set_coalesce = bnx2_set_coalesce,
5436 .get_ringparam = bnx2_get_ringparam,
5437 .set_ringparam = bnx2_set_ringparam,
5438 .get_pauseparam = bnx2_get_pauseparam,
5439 .set_pauseparam = bnx2_set_pauseparam,
5440 .get_rx_csum = bnx2_get_rx_csum,
5441 .set_rx_csum = bnx2_set_rx_csum,
5442 .get_tx_csum = ethtool_op_get_tx_csum,
5443 .set_tx_csum = ethtool_op_set_tx_csum,
5444 .get_sg = ethtool_op_get_sg,
5445 .set_sg = ethtool_op_set_sg,
5447 .get_tso = ethtool_op_get_tso,
5448 .set_tso = ethtool_op_set_tso,
5450 .self_test_count = bnx2_self_test_count,
5451 .self_test = bnx2_self_test,
5452 .get_strings = bnx2_get_strings,
5453 .phys_id = bnx2_phys_id,
5454 .get_stats_count = bnx2_get_stats_count,
5455 .get_ethtool_stats = bnx2_get_ethtool_stats,
5456 .get_perm_addr = ethtool_op_get_perm_addr,
5459 /* Called with rtnl_lock */
5461 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5463 struct mii_ioctl_data *data = if_mii(ifr);
5464 struct bnx2 *bp = netdev_priv(dev);
5469 data->phy_id = bp->phy_addr;
5475 spin_lock_bh(&bp->phy_lock);
5476 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5477 spin_unlock_bh(&bp->phy_lock);
5479 data->val_out = mii_regval;
5485 if (!capable(CAP_NET_ADMIN))
5488 spin_lock_bh(&bp->phy_lock);
5489 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5490 spin_unlock_bh(&bp->phy_lock);
5501 /* Called with rtnl_lock */
5503 bnx2_change_mac_addr(struct net_device *dev, void *p)
5505 struct sockaddr *addr = p;
5506 struct bnx2 *bp = netdev_priv(dev);
5508 if (!is_valid_ether_addr(addr->sa_data))
5511 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5512 if (netif_running(dev))
5513 bnx2_set_mac_addr(bp);
5518 /* Called with rtnl_lock */
5520 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5522 struct bnx2 *bp = netdev_priv(dev);
5524 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5525 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5529 if (netif_running(dev)) {
5530 bnx2_netif_stop(bp);
5534 bnx2_netif_start(bp);
5539 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5541 poll_bnx2(struct net_device *dev)
5543 struct bnx2 *bp = netdev_priv(dev);
5545 disable_irq(bp->pdev->irq);
5546 bnx2_interrupt(bp->pdev->irq, dev, NULL);
5547 enable_irq(bp->pdev->irq);
5551 static int __devinit
5552 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5555 unsigned long mem_len;
5559 SET_MODULE_OWNER(dev);
5560 SET_NETDEV_DEV(dev, &pdev->dev);
5561 bp = netdev_priv(dev);
5566 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5567 rc = pci_enable_device(pdev);
5569 printk(KERN_ERR PFX "Cannot enable PCI device, aborting.");
5573 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5574 printk(KERN_ERR PFX "Cannot find PCI device base address, "
5577 goto err_out_disable;
5580 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5582 printk(KERN_ERR PFX "Cannot obtain PCI resources, aborting.\n");
5583 goto err_out_disable;
5586 pci_set_master(pdev);
5588 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5589 if (bp->pm_cap == 0) {
5590 printk(KERN_ERR PFX "Cannot find power management capability, "
5593 goto err_out_release;
5596 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5597 if (bp->pcix_cap == 0) {
5598 printk(KERN_ERR PFX "Cannot find PCIX capability, aborting.\n");
5600 goto err_out_release;
5603 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5604 bp->flags |= USING_DAC_FLAG;
5605 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
5606 printk(KERN_ERR PFX "pci_set_consistent_dma_mask "
5607 "failed, aborting.\n");
5609 goto err_out_release;
5612 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
5613 printk(KERN_ERR PFX "System does not support DMA, aborting.\n");
5615 goto err_out_release;
5621 spin_lock_init(&bp->phy_lock);
5622 spin_lock_init(&bp->tx_lock);
5623 INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5625 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5626 mem_len = MB_GET_CID_ADDR(17);
5627 dev->mem_end = dev->mem_start + mem_len;
5628 dev->irq = pdev->irq;
5630 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5633 printk(KERN_ERR PFX "Cannot map register space, aborting.\n");
5635 goto err_out_release;
5638 /* Configure byte swap and enable write to the reg_window registers.
5639 * Rely on CPU to do target byte swapping on big endian systems
5640 * The chip's target access swapping will not swap all accesses
5642 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5643 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5644 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5646 bnx2_set_power_state(bp, PCI_D0);
5648 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5650 /* Get bus information. */
5651 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5652 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5655 bp->flags |= PCIX_FLAG;
5657 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5659 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5661 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5662 bp->bus_speed_mhz = 133;
5665 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5666 bp->bus_speed_mhz = 100;
5669 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5670 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5671 bp->bus_speed_mhz = 66;
5674 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5675 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5676 bp->bus_speed_mhz = 50;
5679 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5680 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5681 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5682 bp->bus_speed_mhz = 33;
5687 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5688 bp->bus_speed_mhz = 66;
5690 bp->bus_speed_mhz = 33;
5693 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5694 bp->flags |= PCI_32BIT_FLAG;
5696 /* 5706A0 may falsely detect SERR and PERR. */
5697 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5698 reg = REG_RD(bp, PCI_COMMAND);
5699 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5700 REG_WR(bp, PCI_COMMAND, reg);
5702 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5703 !(bp->flags & PCIX_FLAG)) {
5705 printk(KERN_ERR PFX "5706 A1 can only be used in a PCIX bus, "
5710 bnx2_init_nvram(bp);
5712 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5714 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5715 BNX2_SHM_HDR_SIGNATURE_SIG)
5716 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5718 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5720 /* Get the permanent MAC address. First we need to make sure the
5721 * firmware is actually running.
5723 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
5725 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5726 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5727 printk(KERN_ERR PFX "Firmware not running, aborting.\n");
5732 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
5734 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
5735 bp->mac_addr[0] = (u8) (reg >> 8);
5736 bp->mac_addr[1] = (u8) reg;
5738 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
5739 bp->mac_addr[2] = (u8) (reg >> 24);
5740 bp->mac_addr[3] = (u8) (reg >> 16);
5741 bp->mac_addr[4] = (u8) (reg >> 8);
5742 bp->mac_addr[5] = (u8) reg;
5744 bp->tx_ring_size = MAX_TX_DESC_CNT;
5745 bnx2_set_rx_ring_size(bp, 100);
5749 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5751 bp->tx_quick_cons_trip_int = 20;
5752 bp->tx_quick_cons_trip = 20;
5753 bp->tx_ticks_int = 80;
5756 bp->rx_quick_cons_trip_int = 6;
5757 bp->rx_quick_cons_trip = 6;
5758 bp->rx_ticks_int = 18;
5761 bp->stats_ticks = 1000000 & 0xffff00;
5763 bp->timer_interval = HZ;
5764 bp->current_interval = HZ;
5768 /* Disable WOL support if we are running on a SERDES chip. */
5769 if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
5770 bp->phy_flags |= PHY_SERDES_FLAG;
5771 bp->flags |= NO_WOL_FLAG;
5772 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5774 reg = REG_RD_IND(bp, bp->shmem_base +
5775 BNX2_SHARED_HW_CFG_CONFIG);
5776 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5777 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5781 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
5782 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5783 (CHIP_ID(bp) == CHIP_ID_5708_B1))
5784 bp->flags |= NO_WOL_FLAG;
5786 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5787 bp->tx_quick_cons_trip_int =
5788 bp->tx_quick_cons_trip;
5789 bp->tx_ticks_int = bp->tx_ticks;
5790 bp->rx_quick_cons_trip_int =
5791 bp->rx_quick_cons_trip;
5792 bp->rx_ticks_int = bp->rx_ticks;
5793 bp->comp_prod_trip_int = bp->comp_prod_trip;
5794 bp->com_ticks_int = bp->com_ticks;
5795 bp->cmd_ticks_int = bp->cmd_ticks;
5798 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5799 bp->req_line_speed = 0;
5800 if (bp->phy_flags & PHY_SERDES_FLAG) {
5801 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
5803 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
5804 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5805 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5807 bp->req_line_speed = bp->line_speed = SPEED_1000;
5808 bp->req_duplex = DUPLEX_FULL;
5812 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5815 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5817 init_timer(&bp->timer);
5818 bp->timer.expires = RUN_AT(bp->timer_interval);
5819 bp->timer.data = (unsigned long) bp;
5820 bp->timer.function = bnx2_timer;
5826 iounmap(bp->regview);
5831 pci_release_regions(pdev);
5834 pci_disable_device(pdev);
5835 pci_set_drvdata(pdev, NULL);
5841 static int __devinit
5842 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5844 static int version_printed = 0;
5845 struct net_device *dev = NULL;
5849 if (version_printed++ == 0)
5850 printk(KERN_INFO "%s", version);
5852 /* dev zeroed in init_etherdev */
5853 dev = alloc_etherdev(sizeof(*bp));
5858 rc = bnx2_init_board(pdev, dev);
5864 dev->open = bnx2_open;
5865 dev->hard_start_xmit = bnx2_start_xmit;
5866 dev->stop = bnx2_close;
5867 dev->get_stats = bnx2_get_stats;
5868 dev->set_multicast_list = bnx2_set_rx_mode;
5869 dev->do_ioctl = bnx2_ioctl;
5870 dev->set_mac_address = bnx2_change_mac_addr;
5871 dev->change_mtu = bnx2_change_mtu;
5872 dev->tx_timeout = bnx2_tx_timeout;
5873 dev->watchdog_timeo = TX_TIMEOUT;
5875 dev->vlan_rx_register = bnx2_vlan_rx_register;
5876 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
5878 dev->poll = bnx2_poll;
5879 dev->ethtool_ops = &bnx2_ethtool_ops;
5882 bp = netdev_priv(dev);
5884 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5885 dev->poll_controller = poll_bnx2;
5888 if ((rc = register_netdev(dev))) {
5889 printk(KERN_ERR PFX "Cannot register net device\n");
5891 iounmap(bp->regview);
5892 pci_release_regions(pdev);
5893 pci_disable_device(pdev);
5894 pci_set_drvdata(pdev, NULL);
5899 pci_set_drvdata(pdev, dev);
5901 memcpy(dev->dev_addr, bp->mac_addr, 6);
5902 memcpy(dev->perm_addr, bp->mac_addr, 6);
5903 bp->name = board_info[ent->driver_data].name,
5904 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
5908 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
5909 ((CHIP_ID(bp) & 0x0ff0) >> 4),
5910 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
5911 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
5916 printk("node addr ");
5917 for (i = 0; i < 6; i++)
5918 printk("%2.2x", dev->dev_addr[i]);
5921 dev->features |= NETIF_F_SG;
5922 if (bp->flags & USING_DAC_FLAG)
5923 dev->features |= NETIF_F_HIGHDMA;
5924 dev->features |= NETIF_F_IP_CSUM;
5926 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5929 dev->features |= NETIF_F_TSO;
5932 netif_carrier_off(bp->dev);
5937 static void __devexit
5938 bnx2_remove_one(struct pci_dev *pdev)
5940 struct net_device *dev = pci_get_drvdata(pdev);
5941 struct bnx2 *bp = netdev_priv(dev);
5943 flush_scheduled_work();
5945 unregister_netdev(dev);
5948 iounmap(bp->regview);
5951 pci_release_regions(pdev);
5952 pci_disable_device(pdev);
5953 pci_set_drvdata(pdev, NULL);
5957 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
5959 struct net_device *dev = pci_get_drvdata(pdev);
5960 struct bnx2 *bp = netdev_priv(dev);
5963 if (!netif_running(dev))
5966 flush_scheduled_work();
5967 bnx2_netif_stop(bp);
5968 netif_device_detach(dev);
5969 del_timer_sync(&bp->timer);
5970 if (bp->flags & NO_WOL_FLAG)
5971 reset_code = BNX2_DRV_MSG_CODE_UNLOAD;
5973 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5975 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5976 bnx2_reset_chip(bp, reset_code);
5978 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
5983 bnx2_resume(struct pci_dev *pdev)
5985 struct net_device *dev = pci_get_drvdata(pdev);
5986 struct bnx2 *bp = netdev_priv(dev);
5988 if (!netif_running(dev))
5991 bnx2_set_power_state(bp, PCI_D0);
5992 netif_device_attach(dev);
5994 bnx2_netif_start(bp);
5998 static struct pci_driver bnx2_pci_driver = {
5999 .name = DRV_MODULE_NAME,
6000 .id_table = bnx2_pci_tbl,
6001 .probe = bnx2_init_one,
6002 .remove = __devexit_p(bnx2_remove_one),
6003 .suspend = bnx2_suspend,
6004 .resume = bnx2_resume,
6007 static int __init bnx2_init(void)
6009 return pci_module_init(&bnx2_pci_driver);
6012 static void __exit bnx2_cleanup(void)
6014 pci_unregister_driver(&bnx2_pci_driver);
6017 module_init(bnx2_init);
6018 module_exit(bnx2_cleanup);