1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
45 #include <net/checksum.h>
48 #include <linux/workqueue.h>
49 #include <linux/crc32.h>
50 #include <linux/prefetch.h>
51 #include <linux/cache.h>
52 #include <linux/zlib.h>
57 #define DRV_MODULE_NAME "bnx2"
58 #define PFX DRV_MODULE_NAME ": "
59 #define DRV_MODULE_VERSION "1.4.45"
60 #define DRV_MODULE_RELDATE "September 29, 2006"
62 #define RUN_AT(x) (jiffies + (x))
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT (5*HZ)
67 static const char version[] __devinitdata =
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
75 static int disable_msi = 0;
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
90 /* indexed by board_t, above */
93 } board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103 static struct pci_device_id bnx2_pci_tbl[] = {
104 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
105 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
113 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
121 static struct flash_spec flash_table[] =
124 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
125 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
126 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
128 /* Expansion entry 0001 */
129 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
130 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
131 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
133 /* Saifun SA25F010 (non-buffered flash) */
134 /* strap, cfg1, & write1 need updates */
135 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
138 "Non-buffered flash (128kB)"},
139 /* Saifun SA25F020 (non-buffered flash) */
140 /* strap, cfg1, & write1 need updates */
141 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
142 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
144 "Non-buffered flash (256kB)"},
145 /* Expansion entry 0100 */
146 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
147 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
148 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
150 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
151 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
152 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
153 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
154 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
155 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
156 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
157 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
158 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
159 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
160 /* Saifun SA25F005 (non-buffered flash) */
161 /* strap, cfg1, & write1 need updates */
162 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
163 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
164 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
165 "Non-buffered flash (64kB)"},
167 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
168 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
169 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
171 /* Expansion entry 1001 */
172 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
173 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
176 /* Expansion entry 1010 */
177 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
178 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
181 /* ATMEL AT45DB011B (buffered flash) */
182 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
183 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
184 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
185 "Buffered flash (128kB)"},
186 /* Expansion entry 1100 */
187 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
188 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
191 /* Expansion entry 1101 */
192 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
193 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
196 /* Ateml Expansion entry 1110 */
197 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
198 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
199 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
200 "Entry 1110 (Atmel)"},
201 /* ATMEL AT45DB021B (buffered flash) */
202 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
203 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
204 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
205 "Buffered flash (256kB)"},
208 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
210 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
215 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
216 if (diff > MAX_TX_DESC_CNT)
217 diff = (diff & MAX_TX_DESC_CNT) - 1;
218 return (bp->tx_ring_size - diff);
222 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
224 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
225 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
229 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
231 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
232 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
236 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
239 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
240 REG_WR(bp, BNX2_CTX_DATA, val);
244 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
249 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
250 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
251 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
253 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
254 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
259 val1 = (bp->phy_addr << 21) | (reg << 16) |
260 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
261 BNX2_EMAC_MDIO_COMM_START_BUSY;
262 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
264 for (i = 0; i < 50; i++) {
267 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
268 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
271 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
272 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
278 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
287 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
288 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
289 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
291 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
292 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
306 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
307 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
308 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
310 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
311 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
316 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
317 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
318 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
319 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
321 for (i = 0; i < 50; i++) {
324 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
325 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
331 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
336 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
337 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
338 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
340 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
341 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
350 bnx2_disable_int(struct bnx2 *bp)
352 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
353 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
354 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
358 bnx2_enable_int(struct bnx2 *bp)
360 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
361 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
362 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
364 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
365 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
367 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
371 bnx2_disable_int_sync(struct bnx2 *bp)
373 atomic_inc(&bp->intr_sem);
374 bnx2_disable_int(bp);
375 synchronize_irq(bp->pdev->irq);
379 bnx2_netif_stop(struct bnx2 *bp)
381 bnx2_disable_int_sync(bp);
382 if (netif_running(bp->dev)) {
383 netif_poll_disable(bp->dev);
384 netif_tx_disable(bp->dev);
385 bp->dev->trans_start = jiffies; /* prevent tx timeout */
390 bnx2_netif_start(struct bnx2 *bp)
392 if (atomic_dec_and_test(&bp->intr_sem)) {
393 if (netif_running(bp->dev)) {
394 netif_wake_queue(bp->dev);
395 netif_poll_enable(bp->dev);
402 bnx2_free_mem(struct bnx2 *bp)
406 if (bp->status_blk) {
407 pci_free_consistent(bp->pdev, bp->status_stats_size,
408 bp->status_blk, bp->status_blk_mapping);
409 bp->status_blk = NULL;
410 bp->stats_blk = NULL;
412 if (bp->tx_desc_ring) {
413 pci_free_consistent(bp->pdev,
414 sizeof(struct tx_bd) * TX_DESC_CNT,
415 bp->tx_desc_ring, bp->tx_desc_mapping);
416 bp->tx_desc_ring = NULL;
418 kfree(bp->tx_buf_ring);
419 bp->tx_buf_ring = NULL;
420 for (i = 0; i < bp->rx_max_ring; i++) {
421 if (bp->rx_desc_ring[i])
422 pci_free_consistent(bp->pdev,
423 sizeof(struct rx_bd) * RX_DESC_CNT,
425 bp->rx_desc_mapping[i]);
426 bp->rx_desc_ring[i] = NULL;
428 vfree(bp->rx_buf_ring);
429 bp->rx_buf_ring = NULL;
433 bnx2_alloc_mem(struct bnx2 *bp)
435 int i, status_blk_size;
437 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
439 if (bp->tx_buf_ring == NULL)
442 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
443 sizeof(struct tx_bd) *
445 &bp->tx_desc_mapping);
446 if (bp->tx_desc_ring == NULL)
449 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
451 if (bp->rx_buf_ring == NULL)
454 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
457 for (i = 0; i < bp->rx_max_ring; i++) {
458 bp->rx_desc_ring[i] =
459 pci_alloc_consistent(bp->pdev,
460 sizeof(struct rx_bd) * RX_DESC_CNT,
461 &bp->rx_desc_mapping[i]);
462 if (bp->rx_desc_ring[i] == NULL)
467 /* Combine status and statistics blocks into one allocation. */
468 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
469 bp->status_stats_size = status_blk_size +
470 sizeof(struct statistics_block);
472 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
473 &bp->status_blk_mapping);
474 if (bp->status_blk == NULL)
477 memset(bp->status_blk, 0, bp->status_stats_size);
479 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
482 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
492 bnx2_report_fw_link(struct bnx2 *bp)
494 u32 fw_link_status = 0;
499 switch (bp->line_speed) {
501 if (bp->duplex == DUPLEX_HALF)
502 fw_link_status = BNX2_LINK_STATUS_10HALF;
504 fw_link_status = BNX2_LINK_STATUS_10FULL;
507 if (bp->duplex == DUPLEX_HALF)
508 fw_link_status = BNX2_LINK_STATUS_100HALF;
510 fw_link_status = BNX2_LINK_STATUS_100FULL;
513 if (bp->duplex == DUPLEX_HALF)
514 fw_link_status = BNX2_LINK_STATUS_1000HALF;
516 fw_link_status = BNX2_LINK_STATUS_1000FULL;
519 if (bp->duplex == DUPLEX_HALF)
520 fw_link_status = BNX2_LINK_STATUS_2500HALF;
522 fw_link_status = BNX2_LINK_STATUS_2500FULL;
526 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
529 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
531 bnx2_read_phy(bp, MII_BMSR, &bmsr);
532 bnx2_read_phy(bp, MII_BMSR, &bmsr);
534 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
535 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
536 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
538 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
542 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
544 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
548 bnx2_report_link(struct bnx2 *bp)
551 netif_carrier_on(bp->dev);
552 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
554 printk("%d Mbps ", bp->line_speed);
556 if (bp->duplex == DUPLEX_FULL)
557 printk("full duplex");
559 printk("half duplex");
562 if (bp->flow_ctrl & FLOW_CTRL_RX) {
563 printk(", receive ");
564 if (bp->flow_ctrl & FLOW_CTRL_TX)
565 printk("& transmit ");
568 printk(", transmit ");
570 printk("flow control ON");
575 netif_carrier_off(bp->dev);
576 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
579 bnx2_report_fw_link(bp);
583 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
585 u32 local_adv, remote_adv;
588 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
589 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
591 if (bp->duplex == DUPLEX_FULL) {
592 bp->flow_ctrl = bp->req_flow_ctrl;
597 if (bp->duplex != DUPLEX_FULL) {
601 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
602 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
605 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
606 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
607 bp->flow_ctrl |= FLOW_CTRL_TX;
608 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
609 bp->flow_ctrl |= FLOW_CTRL_RX;
613 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
614 bnx2_read_phy(bp, MII_LPA, &remote_adv);
616 if (bp->phy_flags & PHY_SERDES_FLAG) {
617 u32 new_local_adv = 0;
618 u32 new_remote_adv = 0;
620 if (local_adv & ADVERTISE_1000XPAUSE)
621 new_local_adv |= ADVERTISE_PAUSE_CAP;
622 if (local_adv & ADVERTISE_1000XPSE_ASYM)
623 new_local_adv |= ADVERTISE_PAUSE_ASYM;
624 if (remote_adv & ADVERTISE_1000XPAUSE)
625 new_remote_adv |= ADVERTISE_PAUSE_CAP;
626 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
627 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
629 local_adv = new_local_adv;
630 remote_adv = new_remote_adv;
633 /* See Table 28B-3 of 802.3ab-1999 spec. */
634 if (local_adv & ADVERTISE_PAUSE_CAP) {
635 if(local_adv & ADVERTISE_PAUSE_ASYM) {
636 if (remote_adv & ADVERTISE_PAUSE_CAP) {
637 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
639 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
640 bp->flow_ctrl = FLOW_CTRL_RX;
644 if (remote_adv & ADVERTISE_PAUSE_CAP) {
645 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
649 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
650 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
651 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
653 bp->flow_ctrl = FLOW_CTRL_TX;
659 bnx2_5708s_linkup(struct bnx2 *bp)
664 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
665 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
666 case BCM5708S_1000X_STAT1_SPEED_10:
667 bp->line_speed = SPEED_10;
669 case BCM5708S_1000X_STAT1_SPEED_100:
670 bp->line_speed = SPEED_100;
672 case BCM5708S_1000X_STAT1_SPEED_1G:
673 bp->line_speed = SPEED_1000;
675 case BCM5708S_1000X_STAT1_SPEED_2G5:
676 bp->line_speed = SPEED_2500;
679 if (val & BCM5708S_1000X_STAT1_FD)
680 bp->duplex = DUPLEX_FULL;
682 bp->duplex = DUPLEX_HALF;
688 bnx2_5706s_linkup(struct bnx2 *bp)
690 u32 bmcr, local_adv, remote_adv, common;
693 bp->line_speed = SPEED_1000;
695 bnx2_read_phy(bp, MII_BMCR, &bmcr);
696 if (bmcr & BMCR_FULLDPLX) {
697 bp->duplex = DUPLEX_FULL;
700 bp->duplex = DUPLEX_HALF;
703 if (!(bmcr & BMCR_ANENABLE)) {
707 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
708 bnx2_read_phy(bp, MII_LPA, &remote_adv);
710 common = local_adv & remote_adv;
711 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
713 if (common & ADVERTISE_1000XFULL) {
714 bp->duplex = DUPLEX_FULL;
717 bp->duplex = DUPLEX_HALF;
725 bnx2_copper_linkup(struct bnx2 *bp)
729 bnx2_read_phy(bp, MII_BMCR, &bmcr);
730 if (bmcr & BMCR_ANENABLE) {
731 u32 local_adv, remote_adv, common;
733 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
734 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
736 common = local_adv & (remote_adv >> 2);
737 if (common & ADVERTISE_1000FULL) {
738 bp->line_speed = SPEED_1000;
739 bp->duplex = DUPLEX_FULL;
741 else if (common & ADVERTISE_1000HALF) {
742 bp->line_speed = SPEED_1000;
743 bp->duplex = DUPLEX_HALF;
746 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
747 bnx2_read_phy(bp, MII_LPA, &remote_adv);
749 common = local_adv & remote_adv;
750 if (common & ADVERTISE_100FULL) {
751 bp->line_speed = SPEED_100;
752 bp->duplex = DUPLEX_FULL;
754 else if (common & ADVERTISE_100HALF) {
755 bp->line_speed = SPEED_100;
756 bp->duplex = DUPLEX_HALF;
758 else if (common & ADVERTISE_10FULL) {
759 bp->line_speed = SPEED_10;
760 bp->duplex = DUPLEX_FULL;
762 else if (common & ADVERTISE_10HALF) {
763 bp->line_speed = SPEED_10;
764 bp->duplex = DUPLEX_HALF;
773 if (bmcr & BMCR_SPEED100) {
774 bp->line_speed = SPEED_100;
777 bp->line_speed = SPEED_10;
779 if (bmcr & BMCR_FULLDPLX) {
780 bp->duplex = DUPLEX_FULL;
783 bp->duplex = DUPLEX_HALF;
791 bnx2_set_mac_link(struct bnx2 *bp)
795 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
796 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
797 (bp->duplex == DUPLEX_HALF)) {
798 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
801 /* Configure the EMAC mode register. */
802 val = REG_RD(bp, BNX2_EMAC_MODE);
804 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
805 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
809 switch (bp->line_speed) {
811 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
812 val |= BNX2_EMAC_MODE_PORT_MII_10;
817 val |= BNX2_EMAC_MODE_PORT_MII;
820 val |= BNX2_EMAC_MODE_25G;
823 val |= BNX2_EMAC_MODE_PORT_GMII;
828 val |= BNX2_EMAC_MODE_PORT_GMII;
831 /* Set the MAC to operate in the appropriate duplex mode. */
832 if (bp->duplex == DUPLEX_HALF)
833 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
834 REG_WR(bp, BNX2_EMAC_MODE, val);
836 /* Enable/disable rx PAUSE. */
837 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
839 if (bp->flow_ctrl & FLOW_CTRL_RX)
840 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
841 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
843 /* Enable/disable tx PAUSE. */
844 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
845 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
847 if (bp->flow_ctrl & FLOW_CTRL_TX)
848 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
849 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
851 /* Acknowledge the interrupt. */
852 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
858 bnx2_set_link(struct bnx2 *bp)
863 if (bp->loopback == MAC_LOOPBACK) {
868 link_up = bp->link_up;
870 bnx2_read_phy(bp, MII_BMSR, &bmsr);
871 bnx2_read_phy(bp, MII_BMSR, &bmsr);
873 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
874 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
877 val = REG_RD(bp, BNX2_EMAC_STATUS);
878 if (val & BNX2_EMAC_STATUS_LINK)
879 bmsr |= BMSR_LSTATUS;
881 bmsr &= ~BMSR_LSTATUS;
884 if (bmsr & BMSR_LSTATUS) {
887 if (bp->phy_flags & PHY_SERDES_FLAG) {
888 if (CHIP_NUM(bp) == CHIP_NUM_5706)
889 bnx2_5706s_linkup(bp);
890 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
891 bnx2_5708s_linkup(bp);
894 bnx2_copper_linkup(bp);
896 bnx2_resolve_flow_ctrl(bp);
899 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
900 (bp->autoneg & AUTONEG_SPEED)) {
904 bnx2_read_phy(bp, MII_BMCR, &bmcr);
905 if (!(bmcr & BMCR_ANENABLE)) {
906 bnx2_write_phy(bp, MII_BMCR, bmcr |
910 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
914 if (bp->link_up != link_up) {
915 bnx2_report_link(bp);
918 bnx2_set_mac_link(bp);
924 bnx2_reset_phy(struct bnx2 *bp)
929 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
931 #define PHY_RESET_MAX_WAIT 100
932 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
935 bnx2_read_phy(bp, MII_BMCR, ®);
936 if (!(reg & BMCR_RESET)) {
941 if (i == PHY_RESET_MAX_WAIT) {
948 bnx2_phy_get_pause_adv(struct bnx2 *bp)
952 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
953 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
955 if (bp->phy_flags & PHY_SERDES_FLAG) {
956 adv = ADVERTISE_1000XPAUSE;
959 adv = ADVERTISE_PAUSE_CAP;
962 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
963 if (bp->phy_flags & PHY_SERDES_FLAG) {
964 adv = ADVERTISE_1000XPSE_ASYM;
967 adv = ADVERTISE_PAUSE_ASYM;
970 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
971 if (bp->phy_flags & PHY_SERDES_FLAG) {
972 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
975 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
982 bnx2_setup_serdes_phy(struct bnx2 *bp)
987 if (!(bp->autoneg & AUTONEG_SPEED)) {
989 int force_link_down = 0;
991 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
992 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
993 if (up1 & BCM5708S_UP1_2G5) {
994 up1 &= ~BCM5708S_UP1_2G5;
995 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1000 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1001 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1003 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1004 new_bmcr = bmcr & ~BMCR_ANENABLE;
1005 new_bmcr |= BMCR_SPEED1000;
1006 if (bp->req_duplex == DUPLEX_FULL) {
1007 adv |= ADVERTISE_1000XFULL;
1008 new_bmcr |= BMCR_FULLDPLX;
1011 adv |= ADVERTISE_1000XHALF;
1012 new_bmcr &= ~BMCR_FULLDPLX;
1014 if ((new_bmcr != bmcr) || (force_link_down)) {
1015 /* Force a link down visible on the other side */
1017 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1018 ~(ADVERTISE_1000XFULL |
1019 ADVERTISE_1000XHALF));
1020 bnx2_write_phy(bp, MII_BMCR, bmcr |
1021 BMCR_ANRESTART | BMCR_ANENABLE);
1024 netif_carrier_off(bp->dev);
1025 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1027 bnx2_write_phy(bp, MII_ADVERTISE, adv);
1028 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1033 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1034 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1035 up1 |= BCM5708S_UP1_2G5;
1036 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1039 if (bp->advertising & ADVERTISED_1000baseT_Full)
1040 new_adv |= ADVERTISE_1000XFULL;
1042 new_adv |= bnx2_phy_get_pause_adv(bp);
1044 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1045 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1047 bp->serdes_an_pending = 0;
1048 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1049 /* Force a link down visible on the other side */
1053 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1054 for (i = 0; i < 110; i++) {
1059 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1060 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1062 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1063 /* Speed up link-up time when the link partner
1064 * does not autonegotiate which is very common
1065 * in blade servers. Some blade servers use
1066 * IPMI for kerboard input and it's important
1067 * to minimize link disruptions. Autoneg. involves
1068 * exchanging base pages plus 3 next pages and
1069 * normally completes in about 120 msec.
1071 bp->current_interval = SERDES_AN_TIMEOUT;
1072 bp->serdes_an_pending = 1;
1073 mod_timer(&bp->timer, jiffies + bp->current_interval);
1080 #define ETHTOOL_ALL_FIBRE_SPEED \
1081 (ADVERTISED_1000baseT_Full)
1083 #define ETHTOOL_ALL_COPPER_SPEED \
1084 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1085 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1086 ADVERTISED_1000baseT_Full)
1088 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1089 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1091 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1094 bnx2_setup_copper_phy(struct bnx2 *bp)
1099 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1101 if (bp->autoneg & AUTONEG_SPEED) {
1102 u32 adv_reg, adv1000_reg;
1103 u32 new_adv_reg = 0;
1104 u32 new_adv1000_reg = 0;
1106 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1107 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1108 ADVERTISE_PAUSE_ASYM);
1110 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1111 adv1000_reg &= PHY_ALL_1000_SPEED;
1113 if (bp->advertising & ADVERTISED_10baseT_Half)
1114 new_adv_reg |= ADVERTISE_10HALF;
1115 if (bp->advertising & ADVERTISED_10baseT_Full)
1116 new_adv_reg |= ADVERTISE_10FULL;
1117 if (bp->advertising & ADVERTISED_100baseT_Half)
1118 new_adv_reg |= ADVERTISE_100HALF;
1119 if (bp->advertising & ADVERTISED_100baseT_Full)
1120 new_adv_reg |= ADVERTISE_100FULL;
1121 if (bp->advertising & ADVERTISED_1000baseT_Full)
1122 new_adv1000_reg |= ADVERTISE_1000FULL;
1124 new_adv_reg |= ADVERTISE_CSMA;
1126 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1128 if ((adv1000_reg != new_adv1000_reg) ||
1129 (adv_reg != new_adv_reg) ||
1130 ((bmcr & BMCR_ANENABLE) == 0)) {
1132 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1133 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1134 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1137 else if (bp->link_up) {
1138 /* Flow ctrl may have changed from auto to forced */
1139 /* or vice-versa. */
1141 bnx2_resolve_flow_ctrl(bp);
1142 bnx2_set_mac_link(bp);
1148 if (bp->req_line_speed == SPEED_100) {
1149 new_bmcr |= BMCR_SPEED100;
1151 if (bp->req_duplex == DUPLEX_FULL) {
1152 new_bmcr |= BMCR_FULLDPLX;
1154 if (new_bmcr != bmcr) {
1158 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1159 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1161 if (bmsr & BMSR_LSTATUS) {
1162 /* Force link down */
1163 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1166 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1167 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1169 } while ((bmsr & BMSR_LSTATUS) && (i < 620));
1172 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1174 /* Normally, the new speed is setup after the link has
1175 * gone down and up again. In some cases, link will not go
1176 * down so we need to set up the new speed here.
1178 if (bmsr & BMSR_LSTATUS) {
1179 bp->line_speed = bp->req_line_speed;
1180 bp->duplex = bp->req_duplex;
1181 bnx2_resolve_flow_ctrl(bp);
1182 bnx2_set_mac_link(bp);
1189 bnx2_setup_phy(struct bnx2 *bp)
1191 if (bp->loopback == MAC_LOOPBACK)
1194 if (bp->phy_flags & PHY_SERDES_FLAG) {
1195 return (bnx2_setup_serdes_phy(bp));
1198 return (bnx2_setup_copper_phy(bp));
1203 bnx2_init_5708s_phy(struct bnx2 *bp)
1207 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1208 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1209 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1211 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1212 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1213 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1215 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1216 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1217 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1219 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1220 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1221 val |= BCM5708S_UP1_2G5;
1222 bnx2_write_phy(bp, BCM5708S_UP1, val);
1225 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1226 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1227 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1228 /* increase tx signal amplitude */
1229 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1230 BCM5708S_BLK_ADDR_TX_MISC);
1231 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1232 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1233 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1234 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1237 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1238 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1243 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1244 BNX2_SHARED_HW_CFG_CONFIG);
1245 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1246 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1247 BCM5708S_BLK_ADDR_TX_MISC);
1248 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1249 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1250 BCM5708S_BLK_ADDR_DIG);
1257 bnx2_init_5706s_phy(struct bnx2 *bp)
1259 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1261 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1262 REG_WR(bp, BNX2_MISC_UNUSED0, 0x300);
1265 if (bp->dev->mtu > 1500) {
1268 /* Set extended packet length bit */
1269 bnx2_write_phy(bp, 0x18, 0x7);
1270 bnx2_read_phy(bp, 0x18, &val);
1271 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1273 bnx2_write_phy(bp, 0x1c, 0x6c00);
1274 bnx2_read_phy(bp, 0x1c, &val);
1275 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1280 bnx2_write_phy(bp, 0x18, 0x7);
1281 bnx2_read_phy(bp, 0x18, &val);
1282 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1284 bnx2_write_phy(bp, 0x1c, 0x6c00);
1285 bnx2_read_phy(bp, 0x1c, &val);
1286 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1293 bnx2_init_copper_phy(struct bnx2 *bp)
1297 bp->phy_flags |= PHY_CRC_FIX_FLAG;
1299 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1300 bnx2_write_phy(bp, 0x18, 0x0c00);
1301 bnx2_write_phy(bp, 0x17, 0x000a);
1302 bnx2_write_phy(bp, 0x15, 0x310b);
1303 bnx2_write_phy(bp, 0x17, 0x201f);
1304 bnx2_write_phy(bp, 0x15, 0x9506);
1305 bnx2_write_phy(bp, 0x17, 0x401f);
1306 bnx2_write_phy(bp, 0x15, 0x14e2);
1307 bnx2_write_phy(bp, 0x18, 0x0400);
1310 if (bp->dev->mtu > 1500) {
1311 /* Set extended packet length bit */
1312 bnx2_write_phy(bp, 0x18, 0x7);
1313 bnx2_read_phy(bp, 0x18, &val);
1314 bnx2_write_phy(bp, 0x18, val | 0x4000);
1316 bnx2_read_phy(bp, 0x10, &val);
1317 bnx2_write_phy(bp, 0x10, val | 0x1);
1320 bnx2_write_phy(bp, 0x18, 0x7);
1321 bnx2_read_phy(bp, 0x18, &val);
1322 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1324 bnx2_read_phy(bp, 0x10, &val);
1325 bnx2_write_phy(bp, 0x10, val & ~0x1);
1328 /* ethernet@wirespeed */
1329 bnx2_write_phy(bp, 0x18, 0x7007);
1330 bnx2_read_phy(bp, 0x18, &val);
1331 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1337 bnx2_init_phy(struct bnx2 *bp)
1342 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1343 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1345 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1349 bnx2_read_phy(bp, MII_PHYSID1, &val);
1350 bp->phy_id = val << 16;
1351 bnx2_read_phy(bp, MII_PHYSID2, &val);
1352 bp->phy_id |= val & 0xffff;
1354 if (bp->phy_flags & PHY_SERDES_FLAG) {
1355 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1356 rc = bnx2_init_5706s_phy(bp);
1357 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1358 rc = bnx2_init_5708s_phy(bp);
1361 rc = bnx2_init_copper_phy(bp);
1370 bnx2_set_mac_loopback(struct bnx2 *bp)
1374 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1375 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1376 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1377 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1382 static int bnx2_test_link(struct bnx2 *);
1385 bnx2_set_phy_loopback(struct bnx2 *bp)
1390 spin_lock_bh(&bp->phy_lock);
1391 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1393 spin_unlock_bh(&bp->phy_lock);
1397 for (i = 0; i < 10; i++) {
1398 if (bnx2_test_link(bp) == 0)
1403 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1404 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1405 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1406 BNX2_EMAC_MODE_25G);
1408 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1409 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1415 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1421 msg_data |= bp->fw_wr_seq;
1423 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1425 /* wait for an acknowledgement. */
1426 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1429 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1431 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1434 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1437 /* If we timed out, inform the firmware that this is the case. */
1438 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1440 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1443 msg_data &= ~BNX2_DRV_MSG_CODE;
1444 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1446 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1451 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1458 bnx2_init_context(struct bnx2 *bp)
1464 u32 vcid_addr, pcid_addr, offset;
1468 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1471 vcid_addr = GET_PCID_ADDR(vcid);
1473 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1478 pcid_addr = GET_PCID_ADDR(new_vcid);
1481 vcid_addr = GET_CID_ADDR(vcid);
1482 pcid_addr = vcid_addr;
1485 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1486 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1488 /* Zero out the context. */
1489 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1490 CTX_WR(bp, 0x00, offset, 0);
1493 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1494 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1499 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1505 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1506 if (good_mbuf == NULL) {
1507 printk(KERN_ERR PFX "Failed to allocate memory in "
1508 "bnx2_alloc_bad_rbuf\n");
1512 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1513 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1517 /* Allocate a bunch of mbufs and save the good ones in an array. */
1518 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1519 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1520 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1522 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1524 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1526 /* The addresses with Bit 9 set are bad memory blocks. */
1527 if (!(val & (1 << 9))) {
1528 good_mbuf[good_mbuf_cnt] = (u16) val;
1532 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1535 /* Free the good ones back to the mbuf pool thus discarding
1536 * all the bad ones. */
1537 while (good_mbuf_cnt) {
1540 val = good_mbuf[good_mbuf_cnt];
1541 val = (val << 9) | val | 1;
1543 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1550 bnx2_set_mac_addr(struct bnx2 *bp)
1553 u8 *mac_addr = bp->dev->dev_addr;
1555 val = (mac_addr[0] << 8) | mac_addr[1];
1557 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1559 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1560 (mac_addr[4] << 8) | mac_addr[5];
1562 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1566 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1568 struct sk_buff *skb;
1569 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1571 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1572 unsigned long align;
1574 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1579 if (unlikely((align = (unsigned long) skb->data & 0x7))) {
1580 skb_reserve(skb, 8 - align);
1583 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1584 PCI_DMA_FROMDEVICE);
1587 pci_unmap_addr_set(rx_buf, mapping, mapping);
1589 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1590 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1592 bp->rx_prod_bseq += bp->rx_buf_use_size;
1598 bnx2_phy_int(struct bnx2 *bp)
1600 u32 new_link_state, old_link_state;
1602 new_link_state = bp->status_blk->status_attn_bits &
1603 STATUS_ATTN_BITS_LINK_STATE;
1604 old_link_state = bp->status_blk->status_attn_bits_ack &
1605 STATUS_ATTN_BITS_LINK_STATE;
1606 if (new_link_state != old_link_state) {
1607 if (new_link_state) {
1608 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1609 STATUS_ATTN_BITS_LINK_STATE);
1612 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1613 STATUS_ATTN_BITS_LINK_STATE);
1620 bnx2_tx_int(struct bnx2 *bp)
1622 struct status_block *sblk = bp->status_blk;
1623 u16 hw_cons, sw_cons, sw_ring_cons;
1626 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1627 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1630 sw_cons = bp->tx_cons;
1632 while (sw_cons != hw_cons) {
1633 struct sw_bd *tx_buf;
1634 struct sk_buff *skb;
1637 sw_ring_cons = TX_RING_IDX(sw_cons);
1639 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1642 /* partial BD completions possible with TSO packets */
1643 if (skb_is_gso(skb)) {
1644 u16 last_idx, last_ring_idx;
1646 last_idx = sw_cons +
1647 skb_shinfo(skb)->nr_frags + 1;
1648 last_ring_idx = sw_ring_cons +
1649 skb_shinfo(skb)->nr_frags + 1;
1650 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1653 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1658 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1659 skb_headlen(skb), PCI_DMA_TODEVICE);
1662 last = skb_shinfo(skb)->nr_frags;
1664 for (i = 0; i < last; i++) {
1665 sw_cons = NEXT_TX_BD(sw_cons);
1667 pci_unmap_page(bp->pdev,
1669 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1671 skb_shinfo(skb)->frags[i].size,
1675 sw_cons = NEXT_TX_BD(sw_cons);
1677 tx_free_bd += last + 1;
1681 hw_cons = bp->hw_tx_cons =
1682 sblk->status_tx_quick_consumer_index0;
1684 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1689 bp->tx_cons = sw_cons;
1690 /* Need to make the tx_cons update visible to bnx2_start_xmit()
1691 * before checking for netif_queue_stopped(). Without the
1692 * memory barrier, there is a small possibility that bnx2_start_xmit()
1693 * will miss it and cause the queue to be stopped forever.
1697 if (unlikely(netif_queue_stopped(bp->dev)) &&
1698 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1699 netif_tx_lock(bp->dev);
1700 if ((netif_queue_stopped(bp->dev)) &&
1701 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
1702 netif_wake_queue(bp->dev);
1703 netif_tx_unlock(bp->dev);
1708 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1711 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1712 struct rx_bd *cons_bd, *prod_bd;
1714 cons_rx_buf = &bp->rx_buf_ring[cons];
1715 prod_rx_buf = &bp->rx_buf_ring[prod];
1717 pci_dma_sync_single_for_device(bp->pdev,
1718 pci_unmap_addr(cons_rx_buf, mapping),
1719 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1721 bp->rx_prod_bseq += bp->rx_buf_use_size;
1723 prod_rx_buf->skb = skb;
1728 pci_unmap_addr_set(prod_rx_buf, mapping,
1729 pci_unmap_addr(cons_rx_buf, mapping));
1731 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1732 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1733 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1734 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
1738 bnx2_rx_int(struct bnx2 *bp, int budget)
1740 struct status_block *sblk = bp->status_blk;
1741 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1742 struct l2_fhdr *rx_hdr;
1745 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1746 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1749 sw_cons = bp->rx_cons;
1750 sw_prod = bp->rx_prod;
1752 /* Memory barrier necessary as speculative reads of the rx
1753 * buffer can be ahead of the index in the status block
1756 while (sw_cons != hw_cons) {
1759 struct sw_bd *rx_buf;
1760 struct sk_buff *skb;
1761 dma_addr_t dma_addr;
1763 sw_ring_cons = RX_RING_IDX(sw_cons);
1764 sw_ring_prod = RX_RING_IDX(sw_prod);
1766 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1771 dma_addr = pci_unmap_addr(rx_buf, mapping);
1773 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
1774 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1776 rx_hdr = (struct l2_fhdr *) skb->data;
1777 len = rx_hdr->l2_fhdr_pkt_len - 4;
1779 if ((status = rx_hdr->l2_fhdr_status) &
1780 (L2_FHDR_ERRORS_BAD_CRC |
1781 L2_FHDR_ERRORS_PHY_DECODE |
1782 L2_FHDR_ERRORS_ALIGNMENT |
1783 L2_FHDR_ERRORS_TOO_SHORT |
1784 L2_FHDR_ERRORS_GIANT_FRAME)) {
1789 /* Since we don't have a jumbo ring, copy small packets
1792 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1793 struct sk_buff *new_skb;
1795 new_skb = netdev_alloc_skb(bp->dev, len + 2);
1796 if (new_skb == NULL)
1800 memcpy(new_skb->data,
1801 skb->data + bp->rx_offset - 2,
1804 skb_reserve(new_skb, 2);
1805 skb_put(new_skb, len);
1807 bnx2_reuse_rx_skb(bp, skb,
1808 sw_ring_cons, sw_ring_prod);
1812 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1813 pci_unmap_single(bp->pdev, dma_addr,
1814 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1816 skb_reserve(skb, bp->rx_offset);
1821 bnx2_reuse_rx_skb(bp, skb,
1822 sw_ring_cons, sw_ring_prod);
1826 skb->protocol = eth_type_trans(skb, bp->dev);
1828 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1829 (ntohs(skb->protocol) != 0x8100)) {
1836 skb->ip_summed = CHECKSUM_NONE;
1838 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1839 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1841 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1842 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
1843 skb->ip_summed = CHECKSUM_UNNECESSARY;
1847 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1848 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1849 rx_hdr->l2_fhdr_vlan_tag);
1853 netif_receive_skb(skb);
1855 bp->dev->last_rx = jiffies;
1859 sw_cons = NEXT_RX_BD(sw_cons);
1860 sw_prod = NEXT_RX_BD(sw_prod);
1862 if ((rx_pkt == budget))
1865 /* Refresh hw_cons to see if there is new work */
1866 if (sw_cons == hw_cons) {
1867 hw_cons = bp->hw_rx_cons =
1868 sblk->status_rx_quick_consumer_index0;
1869 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1874 bp->rx_cons = sw_cons;
1875 bp->rx_prod = sw_prod;
1877 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1879 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1887 /* MSI ISR - The only difference between this and the INTx ISR
1888 * is that the MSI interrupt is always serviced.
1891 bnx2_msi(int irq, void *dev_instance)
1893 struct net_device *dev = dev_instance;
1894 struct bnx2 *bp = netdev_priv(dev);
1896 prefetch(bp->status_blk);
1897 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1898 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1899 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1901 /* Return here if interrupt is disabled. */
1902 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1905 netif_rx_schedule(dev);
1911 bnx2_interrupt(int irq, void *dev_instance)
1913 struct net_device *dev = dev_instance;
1914 struct bnx2 *bp = netdev_priv(dev);
1916 /* When using INTx, it is possible for the interrupt to arrive
1917 * at the CPU before the status block posted prior to the
1918 * interrupt. Reading a register will flush the status block.
1919 * When using MSI, the MSI message will always complete after
1920 * the status block write.
1922 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
1923 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1924 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
1927 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1928 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1929 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1931 /* Return here if interrupt is shared and is disabled. */
1932 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1935 netif_rx_schedule(dev);
1941 bnx2_has_work(struct bnx2 *bp)
1943 struct status_block *sblk = bp->status_blk;
1945 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
1946 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
1949 if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
1957 bnx2_poll(struct net_device *dev, int *budget)
1959 struct bnx2 *bp = netdev_priv(dev);
1961 if ((bp->status_blk->status_attn_bits &
1962 STATUS_ATTN_BITS_LINK_STATE) !=
1963 (bp->status_blk->status_attn_bits_ack &
1964 STATUS_ATTN_BITS_LINK_STATE)) {
1966 spin_lock(&bp->phy_lock);
1968 spin_unlock(&bp->phy_lock);
1970 /* This is needed to take care of transient status
1971 * during link changes.
1973 REG_WR(bp, BNX2_HC_COMMAND,
1974 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
1975 REG_RD(bp, BNX2_HC_COMMAND);
1978 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
1981 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
1982 int orig_budget = *budget;
1985 if (orig_budget > dev->quota)
1986 orig_budget = dev->quota;
1988 work_done = bnx2_rx_int(bp, orig_budget);
1989 *budget -= work_done;
1990 dev->quota -= work_done;
1993 bp->last_status_idx = bp->status_blk->status_idx;
1996 if (!bnx2_has_work(bp)) {
1997 netif_rx_complete(dev);
1998 if (likely(bp->flags & USING_MSI_FLAG)) {
1999 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2000 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2001 bp->last_status_idx);
2004 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2005 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2006 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2007 bp->last_status_idx);
2009 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2010 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2011 bp->last_status_idx);
2018 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2019 * from set_multicast.
2022 bnx2_set_rx_mode(struct net_device *dev)
2024 struct bnx2 *bp = netdev_priv(dev);
2025 u32 rx_mode, sort_mode;
2028 spin_lock_bh(&bp->phy_lock);
2030 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2031 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2032 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2034 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2035 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2037 if (!(bp->flags & ASF_ENABLE_FLAG))
2038 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2040 if (dev->flags & IFF_PROMISC) {
2041 /* Promiscuous mode. */
2042 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2043 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN;
2045 else if (dev->flags & IFF_ALLMULTI) {
2046 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2047 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2050 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2053 /* Accept one or more multicast(s). */
2054 struct dev_mc_list *mclist;
2055 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2060 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2062 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2063 i++, mclist = mclist->next) {
2065 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2067 regidx = (bit & 0xe0) >> 5;
2069 mc_filter[regidx] |= (1 << bit);
2072 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2073 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2077 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2080 if (rx_mode != bp->rx_mode) {
2081 bp->rx_mode = rx_mode;
2082 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2085 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2086 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2087 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2089 spin_unlock_bh(&bp->phy_lock);
2092 #define FW_BUF_SIZE 0x8000
2095 bnx2_gunzip_init(struct bnx2 *bp)
2097 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2100 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2103 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2104 if (bp->strm->workspace == NULL)
2114 vfree(bp->gunzip_buf);
2115 bp->gunzip_buf = NULL;
2118 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2119 "uncompression.\n", bp->dev->name);
2124 bnx2_gunzip_end(struct bnx2 *bp)
2126 kfree(bp->strm->workspace);
2131 if (bp->gunzip_buf) {
2132 vfree(bp->gunzip_buf);
2133 bp->gunzip_buf = NULL;
2138 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2142 /* check gzip header */
2143 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2149 if (zbuf[3] & FNAME)
2150 while ((zbuf[n++] != 0) && (n < len));
2152 bp->strm->next_in = zbuf + n;
2153 bp->strm->avail_in = len - n;
2154 bp->strm->next_out = bp->gunzip_buf;
2155 bp->strm->avail_out = FW_BUF_SIZE;
2157 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2161 rc = zlib_inflate(bp->strm, Z_FINISH);
2163 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2164 *outbuf = bp->gunzip_buf;
2166 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2167 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2168 bp->dev->name, bp->strm->msg);
2170 zlib_inflateEnd(bp->strm);
2172 if (rc == Z_STREAM_END)
2179 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2186 for (i = 0; i < rv2p_code_len; i += 8) {
2187 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2189 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2192 if (rv2p_proc == RV2P_PROC1) {
2193 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2194 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2197 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2198 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2202 /* Reset the processor, un-stall is done later. */
2203 if (rv2p_proc == RV2P_PROC1) {
2204 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2207 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2212 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2218 val = REG_RD_IND(bp, cpu_reg->mode);
2219 val |= cpu_reg->mode_value_halt;
2220 REG_WR_IND(bp, cpu_reg->mode, val);
2221 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2223 /* Load the Text area. */
2224 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2228 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2229 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2233 /* Load the Data area. */
2234 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2238 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2239 REG_WR_IND(bp, offset, fw->data[j]);
2243 /* Load the SBSS area. */
2244 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2248 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2249 REG_WR_IND(bp, offset, fw->sbss[j]);
2253 /* Load the BSS area. */
2254 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2258 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2259 REG_WR_IND(bp, offset, fw->bss[j]);
2263 /* Load the Read-Only area. */
2264 offset = cpu_reg->spad_base +
2265 (fw->rodata_addr - cpu_reg->mips_view_base);
2269 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2270 REG_WR_IND(bp, offset, fw->rodata[j]);
2274 /* Clear the pre-fetch instruction. */
2275 REG_WR_IND(bp, cpu_reg->inst, 0);
2276 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2278 /* Start the CPU. */
2279 val = REG_RD_IND(bp, cpu_reg->mode);
2280 val &= ~cpu_reg->mode_value_halt;
2281 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2282 REG_WR_IND(bp, cpu_reg->mode, val);
2286 bnx2_init_cpus(struct bnx2 *bp)
2288 struct cpu_reg cpu_reg;
2294 if ((rc = bnx2_gunzip_init(bp)) != 0)
2297 /* Initialize the RV2P processor. */
2298 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2303 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2305 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2310 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2312 /* Initialize the RX Processor. */
2313 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2314 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2315 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2316 cpu_reg.state = BNX2_RXP_CPU_STATE;
2317 cpu_reg.state_value_clear = 0xffffff;
2318 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2319 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2320 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2321 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2322 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2323 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2324 cpu_reg.mips_view_base = 0x8000000;
2326 fw.ver_major = bnx2_RXP_b06FwReleaseMajor;
2327 fw.ver_minor = bnx2_RXP_b06FwReleaseMinor;
2328 fw.ver_fix = bnx2_RXP_b06FwReleaseFix;
2329 fw.start_addr = bnx2_RXP_b06FwStartAddr;
2331 fw.text_addr = bnx2_RXP_b06FwTextAddr;
2332 fw.text_len = bnx2_RXP_b06FwTextLen;
2335 rc = bnx2_gunzip(bp, bnx2_RXP_b06FwText, sizeof(bnx2_RXP_b06FwText),
2342 fw.data_addr = bnx2_RXP_b06FwDataAddr;
2343 fw.data_len = bnx2_RXP_b06FwDataLen;
2345 fw.data = bnx2_RXP_b06FwData;
2347 fw.sbss_addr = bnx2_RXP_b06FwSbssAddr;
2348 fw.sbss_len = bnx2_RXP_b06FwSbssLen;
2350 fw.sbss = bnx2_RXP_b06FwSbss;
2352 fw.bss_addr = bnx2_RXP_b06FwBssAddr;
2353 fw.bss_len = bnx2_RXP_b06FwBssLen;
2355 fw.bss = bnx2_RXP_b06FwBss;
2357 fw.rodata_addr = bnx2_RXP_b06FwRodataAddr;
2358 fw.rodata_len = bnx2_RXP_b06FwRodataLen;
2359 fw.rodata_index = 0;
2360 fw.rodata = bnx2_RXP_b06FwRodata;
2362 load_cpu_fw(bp, &cpu_reg, &fw);
2364 /* Initialize the TX Processor. */
2365 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2366 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2367 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2368 cpu_reg.state = BNX2_TXP_CPU_STATE;
2369 cpu_reg.state_value_clear = 0xffffff;
2370 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2371 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2372 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2373 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2374 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2375 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2376 cpu_reg.mips_view_base = 0x8000000;
2378 fw.ver_major = bnx2_TXP_b06FwReleaseMajor;
2379 fw.ver_minor = bnx2_TXP_b06FwReleaseMinor;
2380 fw.ver_fix = bnx2_TXP_b06FwReleaseFix;
2381 fw.start_addr = bnx2_TXP_b06FwStartAddr;
2383 fw.text_addr = bnx2_TXP_b06FwTextAddr;
2384 fw.text_len = bnx2_TXP_b06FwTextLen;
2387 rc = bnx2_gunzip(bp, bnx2_TXP_b06FwText, sizeof(bnx2_TXP_b06FwText),
2394 fw.data_addr = bnx2_TXP_b06FwDataAddr;
2395 fw.data_len = bnx2_TXP_b06FwDataLen;
2397 fw.data = bnx2_TXP_b06FwData;
2399 fw.sbss_addr = bnx2_TXP_b06FwSbssAddr;
2400 fw.sbss_len = bnx2_TXP_b06FwSbssLen;
2402 fw.sbss = bnx2_TXP_b06FwSbss;
2404 fw.bss_addr = bnx2_TXP_b06FwBssAddr;
2405 fw.bss_len = bnx2_TXP_b06FwBssLen;
2407 fw.bss = bnx2_TXP_b06FwBss;
2409 fw.rodata_addr = bnx2_TXP_b06FwRodataAddr;
2410 fw.rodata_len = bnx2_TXP_b06FwRodataLen;
2411 fw.rodata_index = 0;
2412 fw.rodata = bnx2_TXP_b06FwRodata;
2414 load_cpu_fw(bp, &cpu_reg, &fw);
2416 /* Initialize the TX Patch-up Processor. */
2417 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2418 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2419 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2420 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2421 cpu_reg.state_value_clear = 0xffffff;
2422 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2423 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2424 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2425 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2426 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2427 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2428 cpu_reg.mips_view_base = 0x8000000;
2430 fw.ver_major = bnx2_TPAT_b06FwReleaseMajor;
2431 fw.ver_minor = bnx2_TPAT_b06FwReleaseMinor;
2432 fw.ver_fix = bnx2_TPAT_b06FwReleaseFix;
2433 fw.start_addr = bnx2_TPAT_b06FwStartAddr;
2435 fw.text_addr = bnx2_TPAT_b06FwTextAddr;
2436 fw.text_len = bnx2_TPAT_b06FwTextLen;
2439 rc = bnx2_gunzip(bp, bnx2_TPAT_b06FwText, sizeof(bnx2_TPAT_b06FwText),
2446 fw.data_addr = bnx2_TPAT_b06FwDataAddr;
2447 fw.data_len = bnx2_TPAT_b06FwDataLen;
2449 fw.data = bnx2_TPAT_b06FwData;
2451 fw.sbss_addr = bnx2_TPAT_b06FwSbssAddr;
2452 fw.sbss_len = bnx2_TPAT_b06FwSbssLen;
2454 fw.sbss = bnx2_TPAT_b06FwSbss;
2456 fw.bss_addr = bnx2_TPAT_b06FwBssAddr;
2457 fw.bss_len = bnx2_TPAT_b06FwBssLen;
2459 fw.bss = bnx2_TPAT_b06FwBss;
2461 fw.rodata_addr = bnx2_TPAT_b06FwRodataAddr;
2462 fw.rodata_len = bnx2_TPAT_b06FwRodataLen;
2463 fw.rodata_index = 0;
2464 fw.rodata = bnx2_TPAT_b06FwRodata;
2466 load_cpu_fw(bp, &cpu_reg, &fw);
2468 /* Initialize the Completion Processor. */
2469 cpu_reg.mode = BNX2_COM_CPU_MODE;
2470 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2471 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2472 cpu_reg.state = BNX2_COM_CPU_STATE;
2473 cpu_reg.state_value_clear = 0xffffff;
2474 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2475 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2476 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2477 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2478 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2479 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2480 cpu_reg.mips_view_base = 0x8000000;
2482 fw.ver_major = bnx2_COM_b06FwReleaseMajor;
2483 fw.ver_minor = bnx2_COM_b06FwReleaseMinor;
2484 fw.ver_fix = bnx2_COM_b06FwReleaseFix;
2485 fw.start_addr = bnx2_COM_b06FwStartAddr;
2487 fw.text_addr = bnx2_COM_b06FwTextAddr;
2488 fw.text_len = bnx2_COM_b06FwTextLen;
2491 rc = bnx2_gunzip(bp, bnx2_COM_b06FwText, sizeof(bnx2_COM_b06FwText),
2498 fw.data_addr = bnx2_COM_b06FwDataAddr;
2499 fw.data_len = bnx2_COM_b06FwDataLen;
2501 fw.data = bnx2_COM_b06FwData;
2503 fw.sbss_addr = bnx2_COM_b06FwSbssAddr;
2504 fw.sbss_len = bnx2_COM_b06FwSbssLen;
2506 fw.sbss = bnx2_COM_b06FwSbss;
2508 fw.bss_addr = bnx2_COM_b06FwBssAddr;
2509 fw.bss_len = bnx2_COM_b06FwBssLen;
2511 fw.bss = bnx2_COM_b06FwBss;
2513 fw.rodata_addr = bnx2_COM_b06FwRodataAddr;
2514 fw.rodata_len = bnx2_COM_b06FwRodataLen;
2515 fw.rodata_index = 0;
2516 fw.rodata = bnx2_COM_b06FwRodata;
2518 load_cpu_fw(bp, &cpu_reg, &fw);
2521 bnx2_gunzip_end(bp);
2526 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2530 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2536 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2537 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2538 PCI_PM_CTRL_PME_STATUS);
2540 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2541 /* delay required during transition out of D3hot */
2544 val = REG_RD(bp, BNX2_EMAC_MODE);
2545 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2546 val &= ~BNX2_EMAC_MODE_MPKT;
2547 REG_WR(bp, BNX2_EMAC_MODE, val);
2549 val = REG_RD(bp, BNX2_RPM_CONFIG);
2550 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2551 REG_WR(bp, BNX2_RPM_CONFIG, val);
2562 autoneg = bp->autoneg;
2563 advertising = bp->advertising;
2565 bp->autoneg = AUTONEG_SPEED;
2566 bp->advertising = ADVERTISED_10baseT_Half |
2567 ADVERTISED_10baseT_Full |
2568 ADVERTISED_100baseT_Half |
2569 ADVERTISED_100baseT_Full |
2572 bnx2_setup_copper_phy(bp);
2574 bp->autoneg = autoneg;
2575 bp->advertising = advertising;
2577 bnx2_set_mac_addr(bp);
2579 val = REG_RD(bp, BNX2_EMAC_MODE);
2581 /* Enable port mode. */
2582 val &= ~BNX2_EMAC_MODE_PORT;
2583 val |= BNX2_EMAC_MODE_PORT_MII |
2584 BNX2_EMAC_MODE_MPKT_RCVD |
2585 BNX2_EMAC_MODE_ACPI_RCVD |
2586 BNX2_EMAC_MODE_MPKT;
2588 REG_WR(bp, BNX2_EMAC_MODE, val);
2590 /* receive all multicast */
2591 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2592 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2595 REG_WR(bp, BNX2_EMAC_RX_MODE,
2596 BNX2_EMAC_RX_MODE_SORT_MODE);
2598 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2599 BNX2_RPM_SORT_USER0_MC_EN;
2600 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2601 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2602 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2603 BNX2_RPM_SORT_USER0_ENA);
2605 /* Need to enable EMAC and RPM for WOL. */
2606 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2607 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2608 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2609 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2611 val = REG_RD(bp, BNX2_RPM_CONFIG);
2612 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2613 REG_WR(bp, BNX2_RPM_CONFIG, val);
2615 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2618 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2621 if (!(bp->flags & NO_WOL_FLAG))
2622 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2624 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2625 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2626 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2635 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2637 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2640 /* No more memory access after this point until
2641 * device is brought back to D0.
2653 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2658 /* Request access to the flash interface. */
2659 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2660 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2661 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2662 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2668 if (j >= NVRAM_TIMEOUT_COUNT)
2675 bnx2_release_nvram_lock(struct bnx2 *bp)
2680 /* Relinquish nvram interface. */
2681 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2683 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2684 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2685 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2691 if (j >= NVRAM_TIMEOUT_COUNT)
2699 bnx2_enable_nvram_write(struct bnx2 *bp)
2703 val = REG_RD(bp, BNX2_MISC_CFG);
2704 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2706 if (!bp->flash_info->buffered) {
2709 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2710 REG_WR(bp, BNX2_NVM_COMMAND,
2711 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2713 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2716 val = REG_RD(bp, BNX2_NVM_COMMAND);
2717 if (val & BNX2_NVM_COMMAND_DONE)
2721 if (j >= NVRAM_TIMEOUT_COUNT)
2728 bnx2_disable_nvram_write(struct bnx2 *bp)
2732 val = REG_RD(bp, BNX2_MISC_CFG);
2733 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2738 bnx2_enable_nvram_access(struct bnx2 *bp)
2742 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2743 /* Enable both bits, even on read. */
2744 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2745 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2749 bnx2_disable_nvram_access(struct bnx2 *bp)
2753 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2754 /* Disable both bits, even after read. */
2755 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2756 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2757 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2761 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2766 if (bp->flash_info->buffered)
2767 /* Buffered flash, no erase needed */
2770 /* Build an erase command */
2771 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2772 BNX2_NVM_COMMAND_DOIT;
2774 /* Need to clear DONE bit separately. */
2775 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2777 /* Address of the NVRAM to read from. */
2778 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2780 /* Issue an erase command. */
2781 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2783 /* Wait for completion. */
2784 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2789 val = REG_RD(bp, BNX2_NVM_COMMAND);
2790 if (val & BNX2_NVM_COMMAND_DONE)
2794 if (j >= NVRAM_TIMEOUT_COUNT)
2801 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2806 /* Build the command word. */
2807 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2809 /* Calculate an offset of a buffered flash. */
2810 if (bp->flash_info->buffered) {
2811 offset = ((offset / bp->flash_info->page_size) <<
2812 bp->flash_info->page_bits) +
2813 (offset % bp->flash_info->page_size);
2816 /* Need to clear DONE bit separately. */
2817 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2819 /* Address of the NVRAM to read from. */
2820 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2822 /* Issue a read command. */
2823 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2825 /* Wait for completion. */
2826 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2831 val = REG_RD(bp, BNX2_NVM_COMMAND);
2832 if (val & BNX2_NVM_COMMAND_DONE) {
2833 val = REG_RD(bp, BNX2_NVM_READ);
2835 val = be32_to_cpu(val);
2836 memcpy(ret_val, &val, 4);
2840 if (j >= NVRAM_TIMEOUT_COUNT)
2848 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2853 /* Build the command word. */
2854 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2856 /* Calculate an offset of a buffered flash. */
2857 if (bp->flash_info->buffered) {
2858 offset = ((offset / bp->flash_info->page_size) <<
2859 bp->flash_info->page_bits) +
2860 (offset % bp->flash_info->page_size);
2863 /* Need to clear DONE bit separately. */
2864 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2866 memcpy(&val32, val, 4);
2867 val32 = cpu_to_be32(val32);
2869 /* Write the data. */
2870 REG_WR(bp, BNX2_NVM_WRITE, val32);
2872 /* Address of the NVRAM to write to. */
2873 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2875 /* Issue the write command. */
2876 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2878 /* Wait for completion. */
2879 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2882 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2885 if (j >= NVRAM_TIMEOUT_COUNT)
2892 bnx2_init_nvram(struct bnx2 *bp)
2895 int j, entry_count, rc;
2896 struct flash_spec *flash;
2898 /* Determine the selected interface. */
2899 val = REG_RD(bp, BNX2_NVM_CFG1);
2901 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2904 if (val & 0x40000000) {
2906 /* Flash interface has been reconfigured */
2907 for (j = 0, flash = &flash_table[0]; j < entry_count;
2909 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2910 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2911 bp->flash_info = flash;
2918 /* Not yet been reconfigured */
2920 if (val & (1 << 23))
2921 mask = FLASH_BACKUP_STRAP_MASK;
2923 mask = FLASH_STRAP_MASK;
2925 for (j = 0, flash = &flash_table[0]; j < entry_count;
2928 if ((val & mask) == (flash->strapping & mask)) {
2929 bp->flash_info = flash;
2931 /* Request access to the flash interface. */
2932 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2935 /* Enable access to flash interface */
2936 bnx2_enable_nvram_access(bp);
2938 /* Reconfigure the flash interface */
2939 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2940 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2941 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2942 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2944 /* Disable access to flash interface */
2945 bnx2_disable_nvram_access(bp);
2946 bnx2_release_nvram_lock(bp);
2951 } /* if (val & 0x40000000) */
2953 if (j == entry_count) {
2954 bp->flash_info = NULL;
2955 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
2959 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2960 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2962 bp->flash_size = val;
2964 bp->flash_size = bp->flash_info->total_size;
2970 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2974 u32 cmd_flags, offset32, len32, extra;
2979 /* Request access to the flash interface. */
2980 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2983 /* Enable access to flash interface */
2984 bnx2_enable_nvram_access(bp);
2997 pre_len = 4 - (offset & 3);
2999 if (pre_len >= len32) {
3001 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3002 BNX2_NVM_COMMAND_LAST;
3005 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3008 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3013 memcpy(ret_buf, buf + (offset & 3), pre_len);
3020 extra = 4 - (len32 & 3);
3021 len32 = (len32 + 4) & ~3;
3028 cmd_flags = BNX2_NVM_COMMAND_LAST;
3030 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3031 BNX2_NVM_COMMAND_LAST;
3033 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3035 memcpy(ret_buf, buf, 4 - extra);
3037 else if (len32 > 0) {
3040 /* Read the first word. */
3044 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3046 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3048 /* Advance to the next dword. */
3053 while (len32 > 4 && rc == 0) {
3054 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3056 /* Advance to the next dword. */
3065 cmd_flags = BNX2_NVM_COMMAND_LAST;
3066 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3068 memcpy(ret_buf, buf, 4 - extra);
3071 /* Disable access to flash interface */
3072 bnx2_disable_nvram_access(bp);
3074 bnx2_release_nvram_lock(bp);
3080 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3083 u32 written, offset32, len32;
3084 u8 *buf, start[4], end[4], *flash_buffer = NULL;
3086 int align_start, align_end;
3091 align_start = align_end = 0;
3093 if ((align_start = (offset32 & 3))) {
3095 len32 += align_start;
3096 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3101 if ((len32 > 4) || !align_start) {
3102 align_end = 4 - (len32 & 3);
3104 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
3111 if (align_start || align_end) {
3112 buf = kmalloc(len32, GFP_KERNEL);
3116 memcpy(buf, start, 4);
3119 memcpy(buf + len32 - 4, end, 4);
3121 memcpy(buf + align_start, data_buf, buf_size);
3124 if (bp->flash_info->buffered == 0) {
3125 flash_buffer = kmalloc(264, GFP_KERNEL);
3126 if (flash_buffer == NULL) {
3128 goto nvram_write_end;
3133 while ((written < len32) && (rc == 0)) {
3134 u32 page_start, page_end, data_start, data_end;
3135 u32 addr, cmd_flags;
3138 /* Find the page_start addr */
3139 page_start = offset32 + written;
3140 page_start -= (page_start % bp->flash_info->page_size);
3141 /* Find the page_end addr */
3142 page_end = page_start + bp->flash_info->page_size;
3143 /* Find the data_start addr */
3144 data_start = (written == 0) ? offset32 : page_start;
3145 /* Find the data_end addr */
3146 data_end = (page_end > offset32 + len32) ?
3147 (offset32 + len32) : page_end;
3149 /* Request access to the flash interface. */
3150 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3151 goto nvram_write_end;
3153 /* Enable access to flash interface */
3154 bnx2_enable_nvram_access(bp);
3156 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3157 if (bp->flash_info->buffered == 0) {
3160 /* Read the whole page into the buffer
3161 * (non-buffer flash only) */
3162 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3163 if (j == (bp->flash_info->page_size - 4)) {
3164 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3166 rc = bnx2_nvram_read_dword(bp,
3172 goto nvram_write_end;
3178 /* Enable writes to flash interface (unlock write-protect) */
3179 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3180 goto nvram_write_end;
3182 /* Erase the page */
3183 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3184 goto nvram_write_end;
3186 /* Re-enable the write again for the actual write */
3187 bnx2_enable_nvram_write(bp);
3189 /* Loop to write back the buffer data from page_start to
3192 if (bp->flash_info->buffered == 0) {
3193 for (addr = page_start; addr < data_start;
3194 addr += 4, i += 4) {
3196 rc = bnx2_nvram_write_dword(bp, addr,
3197 &flash_buffer[i], cmd_flags);
3200 goto nvram_write_end;
3206 /* Loop to write the new data from data_start to data_end */
3207 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3208 if ((addr == page_end - 4) ||
3209 ((bp->flash_info->buffered) &&
3210 (addr == data_end - 4))) {
3212 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3214 rc = bnx2_nvram_write_dword(bp, addr, buf,
3218 goto nvram_write_end;
3224 /* Loop to write back the buffer data from data_end
3226 if (bp->flash_info->buffered == 0) {
3227 for (addr = data_end; addr < page_end;
3228 addr += 4, i += 4) {
3230 if (addr == page_end-4) {
3231 cmd_flags = BNX2_NVM_COMMAND_LAST;
3233 rc = bnx2_nvram_write_dword(bp, addr,
3234 &flash_buffer[i], cmd_flags);
3237 goto nvram_write_end;
3243 /* Disable writes to flash interface (lock write-protect) */
3244 bnx2_disable_nvram_write(bp);
3246 /* Disable access to flash interface */
3247 bnx2_disable_nvram_access(bp);
3248 bnx2_release_nvram_lock(bp);
3250 /* Increment written */
3251 written += data_end - data_start;
3255 if (bp->flash_info->buffered == 0)
3256 kfree(flash_buffer);
3258 if (align_start || align_end)
3264 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3269 /* Wait for the current PCI transaction to complete before
3270 * issuing a reset. */
3271 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3272 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3273 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3274 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3275 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3276 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3279 /* Wait for the firmware to tell us it is ok to issue a reset. */
3280 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3282 /* Deposit a driver reset signature so the firmware knows that
3283 * this is a soft reset. */
3284 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3285 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3287 /* Do a dummy read to force the chip to complete all current transaction
3288 * before we issue a reset. */
3289 val = REG_RD(bp, BNX2_MISC_ID);
3291 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3292 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3293 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3296 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3298 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3299 (CHIP_ID(bp) == CHIP_ID_5706_A1))
3302 /* Reset takes approximate 30 usec */
3303 for (i = 0; i < 10; i++) {
3304 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3305 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3306 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3312 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3313 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3314 printk(KERN_ERR PFX "Chip reset did not complete\n");
3318 /* Make sure byte swapping is properly configured. */
3319 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3320 if (val != 0x01020304) {
3321 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3325 /* Wait for the firmware to finish its initialization. */
3326 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3330 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3331 /* Adjust the voltage regular to two steps lower. The default
3332 * of this register is 0x0000000e. */
3333 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3335 /* Remove bad rbuf memory from the free pool. */
3336 rc = bnx2_alloc_bad_rbuf(bp);
3343 bnx2_init_chip(struct bnx2 *bp)
3348 /* Make sure the interrupt is not active. */
3349 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3351 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3352 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3354 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3356 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3357 DMA_READ_CHANS << 12 |
3358 DMA_WRITE_CHANS << 16;
3360 val |= (0x2 << 20) | (1 << 11);
3362 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3365 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3366 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3367 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3369 REG_WR(bp, BNX2_DMA_CONFIG, val);
3371 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3372 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3373 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3374 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3377 if (bp->flags & PCIX_FLAG) {
3380 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3382 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3383 val16 & ~PCI_X_CMD_ERO);
3386 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3387 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3388 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3389 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3391 /* Initialize context mapping and zero out the quick contexts. The
3392 * context block must have already been enabled. */
3393 bnx2_init_context(bp);
3395 if ((rc = bnx2_init_cpus(bp)) != 0)
3398 bnx2_init_nvram(bp);
3400 bnx2_set_mac_addr(bp);
3402 val = REG_RD(bp, BNX2_MQ_CONFIG);
3403 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3404 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3405 REG_WR(bp, BNX2_MQ_CONFIG, val);
3407 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3408 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3409 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3411 val = (BCM_PAGE_BITS - 8) << 24;
3412 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3414 /* Configure page size. */
3415 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3416 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3417 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3418 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3420 val = bp->mac_addr[0] +
3421 (bp->mac_addr[1] << 8) +
3422 (bp->mac_addr[2] << 16) +
3424 (bp->mac_addr[4] << 8) +
3425 (bp->mac_addr[5] << 16);
3426 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3428 /* Program the MTU. Also include 4 bytes for CRC32. */
3429 val = bp->dev->mtu + ETH_HLEN + 4;
3430 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3431 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3432 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3434 bp->last_status_idx = 0;
3435 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3437 /* Set up how to generate a link change interrupt. */
3438 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3440 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3441 (u64) bp->status_blk_mapping & 0xffffffff);
3442 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3444 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3445 (u64) bp->stats_blk_mapping & 0xffffffff);
3446 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3447 (u64) bp->stats_blk_mapping >> 32);
3449 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3450 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3452 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3453 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3455 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3456 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3458 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3460 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3462 REG_WR(bp, BNX2_HC_COM_TICKS,
3463 (bp->com_ticks_int << 16) | bp->com_ticks);
3465 REG_WR(bp, BNX2_HC_CMD_TICKS,
3466 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3468 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3469 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3471 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3472 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3474 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3475 BNX2_HC_CONFIG_TX_TMR_MODE |
3476 BNX2_HC_CONFIG_COLLECT_STATS);
3479 /* Clear internal stats counters. */
3480 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3482 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3484 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3485 BNX2_PORT_FEATURE_ASF_ENABLED)
3486 bp->flags |= ASF_ENABLE_FLAG;
3488 /* Initialize the receive filter. */
3489 bnx2_set_rx_mode(bp->dev);
3491 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3494 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3495 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3499 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3506 bnx2_init_tx_ring(struct bnx2 *bp)
3511 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3513 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3515 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3516 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3521 bp->tx_prod_bseq = 0;
3523 val = BNX2_L2CTX_TYPE_TYPE_L2;
3524 val |= BNX2_L2CTX_TYPE_SIZE_L2;
3525 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TYPE, val);
3527 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2;
3529 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_CMD_TYPE, val);
3531 val = (u64) bp->tx_desc_mapping >> 32;
3532 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_HI, val);
3534 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3535 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_LO, val);
3539 bnx2_init_rx_ring(struct bnx2 *bp)
3543 u16 prod, ring_prod;
3546 /* 8 for CRC and VLAN */
3547 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3548 /* 8 for alignment */
3549 bp->rx_buf_size = bp->rx_buf_use_size + 8;
3551 ring_prod = prod = bp->rx_prod = 0;
3554 bp->rx_prod_bseq = 0;
3556 for (i = 0; i < bp->rx_max_ring; i++) {
3559 rxbd = &bp->rx_desc_ring[i][0];
3560 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3561 rxbd->rx_bd_len = bp->rx_buf_use_size;
3562 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3564 if (i == (bp->rx_max_ring - 1))
3568 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3569 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3573 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3574 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3576 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3578 val = (u64) bp->rx_desc_mapping[0] >> 32;
3579 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3581 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3582 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3584 for (i = 0; i < bp->rx_ring_size; i++) {
3585 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3588 prod = NEXT_RX_BD(prod);
3589 ring_prod = RX_RING_IDX(prod);
3593 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3595 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3599 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3603 bp->rx_ring_size = size;
3605 while (size > MAX_RX_DESC_CNT) {
3606 size -= MAX_RX_DESC_CNT;
3609 /* round to next power of 2 */
3611 while ((max & num_rings) == 0)
3614 if (num_rings != max)
3617 bp->rx_max_ring = max;
3618 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3622 bnx2_free_tx_skbs(struct bnx2 *bp)
3626 if (bp->tx_buf_ring == NULL)
3629 for (i = 0; i < TX_DESC_CNT; ) {
3630 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3631 struct sk_buff *skb = tx_buf->skb;
3639 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3640 skb_headlen(skb), PCI_DMA_TODEVICE);
3644 last = skb_shinfo(skb)->nr_frags;
3645 for (j = 0; j < last; j++) {
3646 tx_buf = &bp->tx_buf_ring[i + j + 1];
3647 pci_unmap_page(bp->pdev,
3648 pci_unmap_addr(tx_buf, mapping),
3649 skb_shinfo(skb)->frags[j].size,
3659 bnx2_free_rx_skbs(struct bnx2 *bp)
3663 if (bp->rx_buf_ring == NULL)
3666 for (i = 0; i < bp->rx_max_ring_idx; i++) {
3667 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3668 struct sk_buff *skb = rx_buf->skb;
3673 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3674 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3683 bnx2_free_skbs(struct bnx2 *bp)
3685 bnx2_free_tx_skbs(bp);
3686 bnx2_free_rx_skbs(bp);
3690 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3694 rc = bnx2_reset_chip(bp, reset_code);
3699 if ((rc = bnx2_init_chip(bp)) != 0)
3702 bnx2_init_tx_ring(bp);
3703 bnx2_init_rx_ring(bp);
3708 bnx2_init_nic(struct bnx2 *bp)
3712 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3721 bnx2_test_registers(struct bnx2 *bp)
3725 static const struct {
3731 { 0x006c, 0, 0x00000000, 0x0000003f },
3732 { 0x0090, 0, 0xffffffff, 0x00000000 },
3733 { 0x0094, 0, 0x00000000, 0x00000000 },
3735 { 0x0404, 0, 0x00003f00, 0x00000000 },
3736 { 0x0418, 0, 0x00000000, 0xffffffff },
3737 { 0x041c, 0, 0x00000000, 0xffffffff },
3738 { 0x0420, 0, 0x00000000, 0x80ffffff },
3739 { 0x0424, 0, 0x00000000, 0x00000000 },
3740 { 0x0428, 0, 0x00000000, 0x00000001 },
3741 { 0x0450, 0, 0x00000000, 0x0000ffff },
3742 { 0x0454, 0, 0x00000000, 0xffffffff },
3743 { 0x0458, 0, 0x00000000, 0xffffffff },
3745 { 0x0808, 0, 0x00000000, 0xffffffff },
3746 { 0x0854, 0, 0x00000000, 0xffffffff },
3747 { 0x0868, 0, 0x00000000, 0x77777777 },
3748 { 0x086c, 0, 0x00000000, 0x77777777 },
3749 { 0x0870, 0, 0x00000000, 0x77777777 },
3750 { 0x0874, 0, 0x00000000, 0x77777777 },
3752 { 0x0c00, 0, 0x00000000, 0x00000001 },
3753 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3754 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
3756 { 0x1000, 0, 0x00000000, 0x00000001 },
3757 { 0x1004, 0, 0x00000000, 0x000f0001 },
3759 { 0x1408, 0, 0x01c00800, 0x00000000 },
3760 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3761 { 0x14a8, 0, 0x00000000, 0x000001ff },
3762 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3763 { 0x14b0, 0, 0x00000002, 0x00000001 },
3764 { 0x14b8, 0, 0x00000000, 0x00000000 },
3765 { 0x14c0, 0, 0x00000000, 0x00000009 },
3766 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3767 { 0x14cc, 0, 0x00000000, 0x00000001 },
3768 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3770 { 0x1800, 0, 0x00000000, 0x00000001 },
3771 { 0x1804, 0, 0x00000000, 0x00000003 },
3773 { 0x2800, 0, 0x00000000, 0x00000001 },
3774 { 0x2804, 0, 0x00000000, 0x00003f01 },
3775 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3776 { 0x2810, 0, 0xffff0000, 0x00000000 },
3777 { 0x2814, 0, 0xffff0000, 0x00000000 },
3778 { 0x2818, 0, 0xffff0000, 0x00000000 },
3779 { 0x281c, 0, 0xffff0000, 0x00000000 },
3780 { 0x2834, 0, 0xffffffff, 0x00000000 },
3781 { 0x2840, 0, 0x00000000, 0xffffffff },
3782 { 0x2844, 0, 0x00000000, 0xffffffff },
3783 { 0x2848, 0, 0xffffffff, 0x00000000 },
3784 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3786 { 0x2c00, 0, 0x00000000, 0x00000011 },
3787 { 0x2c04, 0, 0x00000000, 0x00030007 },
3789 { 0x3c00, 0, 0x00000000, 0x00000001 },
3790 { 0x3c04, 0, 0x00000000, 0x00070000 },
3791 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3792 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3793 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3794 { 0x3c14, 0, 0x00000000, 0xffffffff },
3795 { 0x3c18, 0, 0x00000000, 0xffffffff },
3796 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3797 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3799 { 0x5004, 0, 0x00000000, 0x0000007f },
3800 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3801 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3803 { 0x5c00, 0, 0x00000000, 0x00000001 },
3804 { 0x5c04, 0, 0x00000000, 0x0003000f },
3805 { 0x5c08, 0, 0x00000003, 0x00000000 },
3806 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3807 { 0x5c10, 0, 0x00000000, 0xffffffff },
3808 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3809 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3810 { 0x5c88, 0, 0x00000000, 0x00077373 },
3811 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3813 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3814 { 0x680c, 0, 0xffffffff, 0x00000000 },
3815 { 0x6810, 0, 0xffffffff, 0x00000000 },
3816 { 0x6814, 0, 0xffffffff, 0x00000000 },
3817 { 0x6818, 0, 0xffffffff, 0x00000000 },
3818 { 0x681c, 0, 0xffffffff, 0x00000000 },
3819 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3820 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3821 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3822 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3823 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3824 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3825 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3826 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3827 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3828 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3829 { 0x684c, 0, 0xffffffff, 0x00000000 },
3830 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3831 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3832 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3833 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3834 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3835 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3837 { 0xffff, 0, 0x00000000, 0x00000000 },
3841 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3842 u32 offset, rw_mask, ro_mask, save_val, val;
3844 offset = (u32) reg_tbl[i].offset;
3845 rw_mask = reg_tbl[i].rw_mask;
3846 ro_mask = reg_tbl[i].ro_mask;
3848 save_val = readl(bp->regview + offset);
3850 writel(0, bp->regview + offset);
3852 val = readl(bp->regview + offset);
3853 if ((val & rw_mask) != 0) {
3857 if ((val & ro_mask) != (save_val & ro_mask)) {
3861 writel(0xffffffff, bp->regview + offset);
3863 val = readl(bp->regview + offset);
3864 if ((val & rw_mask) != rw_mask) {
3868 if ((val & ro_mask) != (save_val & ro_mask)) {
3872 writel(save_val, bp->regview + offset);
3876 writel(save_val, bp->regview + offset);
3884 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3886 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
3887 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3890 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3893 for (offset = 0; offset < size; offset += 4) {
3895 REG_WR_IND(bp, start + offset, test_pattern[i]);
3897 if (REG_RD_IND(bp, start + offset) !=
3907 bnx2_test_memory(struct bnx2 *bp)
3911 static const struct {
3915 { 0x60000, 0x4000 },
3916 { 0xa0000, 0x3000 },
3917 { 0xe0000, 0x4000 },
3918 { 0x120000, 0x4000 },
3919 { 0x1a0000, 0x4000 },
3920 { 0x160000, 0x4000 },
3924 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3925 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3926 mem_tbl[i].len)) != 0) {
3934 #define BNX2_MAC_LOOPBACK 0
3935 #define BNX2_PHY_LOOPBACK 1
3938 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
3940 unsigned int pkt_size, num_pkts, i;
3941 struct sk_buff *skb, *rx_skb;
3942 unsigned char *packet;
3943 u16 rx_start_idx, rx_idx;
3946 struct sw_bd *rx_buf;
3947 struct l2_fhdr *rx_hdr;
3950 if (loopback_mode == BNX2_MAC_LOOPBACK) {
3951 bp->loopback = MAC_LOOPBACK;
3952 bnx2_set_mac_loopback(bp);
3954 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
3956 bnx2_set_phy_loopback(bp);
3962 skb = netdev_alloc_skb(bp->dev, pkt_size);
3965 packet = skb_put(skb, pkt_size);
3966 memcpy(packet, bp->mac_addr, 6);
3967 memset(packet + 6, 0x0, 8);
3968 for (i = 14; i < pkt_size; i++)
3969 packet[i] = (unsigned char) (i & 0xff);
3971 map = pci_map_single(bp->pdev, skb->data, pkt_size,
3974 REG_WR(bp, BNX2_HC_COMMAND,
3975 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3977 REG_RD(bp, BNX2_HC_COMMAND);
3980 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
3984 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
3986 txbd->tx_bd_haddr_hi = (u64) map >> 32;
3987 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
3988 txbd->tx_bd_mss_nbytes = pkt_size;
3989 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
3992 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
3993 bp->tx_prod_bseq += pkt_size;
3995 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, bp->tx_prod);
3996 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4000 REG_WR(bp, BNX2_HC_COMMAND,
4001 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4003 REG_RD(bp, BNX2_HC_COMMAND);
4007 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4010 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4011 goto loopback_test_done;
4014 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4015 if (rx_idx != rx_start_idx + num_pkts) {
4016 goto loopback_test_done;
4019 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4020 rx_skb = rx_buf->skb;
4022 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4023 skb_reserve(rx_skb, bp->rx_offset);
4025 pci_dma_sync_single_for_cpu(bp->pdev,
4026 pci_unmap_addr(rx_buf, mapping),
4027 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4029 if (rx_hdr->l2_fhdr_status &
4030 (L2_FHDR_ERRORS_BAD_CRC |
4031 L2_FHDR_ERRORS_PHY_DECODE |
4032 L2_FHDR_ERRORS_ALIGNMENT |
4033 L2_FHDR_ERRORS_TOO_SHORT |
4034 L2_FHDR_ERRORS_GIANT_FRAME)) {
4036 goto loopback_test_done;
4039 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4040 goto loopback_test_done;
4043 for (i = 14; i < pkt_size; i++) {
4044 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4045 goto loopback_test_done;
4056 #define BNX2_MAC_LOOPBACK_FAILED 1
4057 #define BNX2_PHY_LOOPBACK_FAILED 2
4058 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4059 BNX2_PHY_LOOPBACK_FAILED)
4062 bnx2_test_loopback(struct bnx2 *bp)
4066 if (!netif_running(bp->dev))
4067 return BNX2_LOOPBACK_FAILED;
4069 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4070 spin_lock_bh(&bp->phy_lock);
4072 spin_unlock_bh(&bp->phy_lock);
4073 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4074 rc |= BNX2_MAC_LOOPBACK_FAILED;
4075 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4076 rc |= BNX2_PHY_LOOPBACK_FAILED;
4080 #define NVRAM_SIZE 0x200
4081 #define CRC32_RESIDUAL 0xdebb20e3
4084 bnx2_test_nvram(struct bnx2 *bp)
4086 u32 buf[NVRAM_SIZE / 4];
4087 u8 *data = (u8 *) buf;
4091 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4092 goto test_nvram_done;
4094 magic = be32_to_cpu(buf[0]);
4095 if (magic != 0x669955aa) {
4097 goto test_nvram_done;
4100 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4101 goto test_nvram_done;
4103 csum = ether_crc_le(0x100, data);
4104 if (csum != CRC32_RESIDUAL) {
4106 goto test_nvram_done;
4109 csum = ether_crc_le(0x100, data + 0x100);
4110 if (csum != CRC32_RESIDUAL) {
4119 bnx2_test_link(struct bnx2 *bp)
4123 spin_lock_bh(&bp->phy_lock);
4124 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4125 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4126 spin_unlock_bh(&bp->phy_lock);
4128 if (bmsr & BMSR_LSTATUS) {
4135 bnx2_test_intr(struct bnx2 *bp)
4140 if (!netif_running(bp->dev))
4143 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4145 /* This register is not touched during run-time. */
4146 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4147 REG_RD(bp, BNX2_HC_COMMAND);
4149 for (i = 0; i < 10; i++) {
4150 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4156 msleep_interruptible(10);
4165 bnx2_timer(unsigned long data)
4167 struct bnx2 *bp = (struct bnx2 *) data;
4170 if (!netif_running(bp->dev))
4173 if (atomic_read(&bp->intr_sem) != 0)
4174 goto bnx2_restart_timer;
4176 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4177 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4179 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4181 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
4182 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
4184 spin_lock(&bp->phy_lock);
4185 if (bp->serdes_an_pending) {
4186 bp->serdes_an_pending--;
4188 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4191 bp->current_interval = bp->timer_interval;
4193 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4195 if (bmcr & BMCR_ANENABLE) {
4198 bnx2_write_phy(bp, 0x1c, 0x7c00);
4199 bnx2_read_phy(bp, 0x1c, &phy1);
4201 bnx2_write_phy(bp, 0x17, 0x0f01);
4202 bnx2_read_phy(bp, 0x15, &phy2);
4203 bnx2_write_phy(bp, 0x17, 0x0f01);
4204 bnx2_read_phy(bp, 0x15, &phy2);
4206 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4207 !(phy2 & 0x20)) { /* no CONFIG */
4209 bmcr &= ~BMCR_ANENABLE;
4210 bmcr |= BMCR_SPEED1000 |
4212 bnx2_write_phy(bp, MII_BMCR, bmcr);
4214 PHY_PARALLEL_DETECT_FLAG;
4218 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4219 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4222 bnx2_write_phy(bp, 0x17, 0x0f01);
4223 bnx2_read_phy(bp, 0x15, &phy2);
4227 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4228 bmcr |= BMCR_ANENABLE;
4229 bnx2_write_phy(bp, MII_BMCR, bmcr);
4231 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4236 bp->current_interval = bp->timer_interval;
4238 spin_unlock(&bp->phy_lock);
4242 mod_timer(&bp->timer, jiffies + bp->current_interval);
4245 /* Called with rtnl_lock */
4247 bnx2_open(struct net_device *dev)
4249 struct bnx2 *bp = netdev_priv(dev);
4252 bnx2_set_power_state(bp, PCI_D0);
4253 bnx2_disable_int(bp);
4255 rc = bnx2_alloc_mem(bp);
4259 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4260 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4263 if (pci_enable_msi(bp->pdev) == 0) {
4264 bp->flags |= USING_MSI_FLAG;
4265 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4269 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4270 IRQF_SHARED, dev->name, dev);
4274 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
4282 rc = bnx2_init_nic(bp);
4285 free_irq(bp->pdev->irq, dev);
4286 if (bp->flags & USING_MSI_FLAG) {
4287 pci_disable_msi(bp->pdev);
4288 bp->flags &= ~USING_MSI_FLAG;
4295 mod_timer(&bp->timer, jiffies + bp->current_interval);
4297 atomic_set(&bp->intr_sem, 0);
4299 bnx2_enable_int(bp);
4301 if (bp->flags & USING_MSI_FLAG) {
4302 /* Test MSI to make sure it is working
4303 * If MSI test fails, go back to INTx mode
4305 if (bnx2_test_intr(bp) != 0) {
4306 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4307 " using MSI, switching to INTx mode. Please"
4308 " report this failure to the PCI maintainer"
4309 " and include system chipset information.\n",
4312 bnx2_disable_int(bp);
4313 free_irq(bp->pdev->irq, dev);
4314 pci_disable_msi(bp->pdev);
4315 bp->flags &= ~USING_MSI_FLAG;
4317 rc = bnx2_init_nic(bp);
4320 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4321 IRQF_SHARED, dev->name, dev);
4326 del_timer_sync(&bp->timer);
4329 bnx2_enable_int(bp);
4332 if (bp->flags & USING_MSI_FLAG) {
4333 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4336 netif_start_queue(dev);
4342 bnx2_reset_task(void *data)
4344 struct bnx2 *bp = data;
4346 if (!netif_running(bp->dev))
4349 bp->in_reset_task = 1;
4350 bnx2_netif_stop(bp);
4354 atomic_set(&bp->intr_sem, 1);
4355 bnx2_netif_start(bp);
4356 bp->in_reset_task = 0;
4360 bnx2_tx_timeout(struct net_device *dev)
4362 struct bnx2 *bp = netdev_priv(dev);
4364 /* This allows the netif to be shutdown gracefully before resetting */
4365 schedule_work(&bp->reset_task);
4369 /* Called with rtnl_lock */
4371 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4373 struct bnx2 *bp = netdev_priv(dev);
4375 bnx2_netif_stop(bp);
4378 bnx2_set_rx_mode(dev);
4380 bnx2_netif_start(bp);
4383 /* Called with rtnl_lock */
4385 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4387 struct bnx2 *bp = netdev_priv(dev);
4389 bnx2_netif_stop(bp);
4392 bp->vlgrp->vlan_devices[vid] = NULL;
4393 bnx2_set_rx_mode(dev);
4395 bnx2_netif_start(bp);
4399 /* Called with netif_tx_lock.
4400 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4401 * netif_wake_queue().
4404 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4406 struct bnx2 *bp = netdev_priv(dev);
4409 struct sw_bd *tx_buf;
4410 u32 len, vlan_tag_flags, last_frag, mss;
4411 u16 prod, ring_prod;
4414 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4415 netif_stop_queue(dev);
4416 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4419 return NETDEV_TX_BUSY;
4421 len = skb_headlen(skb);
4423 ring_prod = TX_RING_IDX(prod);
4426 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4427 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4430 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4432 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4435 if ((mss = skb_shinfo(skb)->gso_size) &&
4436 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4437 u32 tcp_opt_len, ip_tcp_len;
4439 if (skb_header_cloned(skb) &&
4440 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4442 return NETDEV_TX_OK;
4445 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4446 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4449 if (skb->h.th->doff > 5) {
4450 tcp_opt_len = (skb->h.th->doff - 5) << 2;
4452 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4454 skb->nh.iph->check = 0;
4455 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4457 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4461 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4462 vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4463 (tcp_opt_len >> 2)) << 8;
4472 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4474 tx_buf = &bp->tx_buf_ring[ring_prod];
4476 pci_unmap_addr_set(tx_buf, mapping, mapping);
4478 txbd = &bp->tx_desc_ring[ring_prod];
4480 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4481 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4482 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4483 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4485 last_frag = skb_shinfo(skb)->nr_frags;
4487 for (i = 0; i < last_frag; i++) {
4488 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4490 prod = NEXT_TX_BD(prod);
4491 ring_prod = TX_RING_IDX(prod);
4492 txbd = &bp->tx_desc_ring[ring_prod];
4495 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4496 len, PCI_DMA_TODEVICE);
4497 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4500 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4501 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4502 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4503 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4506 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4508 prod = NEXT_TX_BD(prod);
4509 bp->tx_prod_bseq += skb->len;
4511 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
4512 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4517 dev->trans_start = jiffies;
4519 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4520 netif_stop_queue(dev);
4521 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
4522 netif_wake_queue(dev);
4525 return NETDEV_TX_OK;
4528 /* Called with rtnl_lock */
4530 bnx2_close(struct net_device *dev)
4532 struct bnx2 *bp = netdev_priv(dev);
4535 /* Calling flush_scheduled_work() may deadlock because
4536 * linkwatch_event() may be on the workqueue and it will try to get
4537 * the rtnl_lock which we are holding.
4539 while (bp->in_reset_task)
4542 bnx2_netif_stop(bp);
4543 del_timer_sync(&bp->timer);
4544 if (bp->flags & NO_WOL_FLAG)
4545 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
4547 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4549 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4550 bnx2_reset_chip(bp, reset_code);
4551 free_irq(bp->pdev->irq, dev);
4552 if (bp->flags & USING_MSI_FLAG) {
4553 pci_disable_msi(bp->pdev);
4554 bp->flags &= ~USING_MSI_FLAG;
4559 netif_carrier_off(bp->dev);
4560 bnx2_set_power_state(bp, PCI_D3hot);
4564 #define GET_NET_STATS64(ctr) \
4565 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4566 (unsigned long) (ctr##_lo)
4568 #define GET_NET_STATS32(ctr) \
4571 #if (BITS_PER_LONG == 64)
4572 #define GET_NET_STATS GET_NET_STATS64
4574 #define GET_NET_STATS GET_NET_STATS32
4577 static struct net_device_stats *
4578 bnx2_get_stats(struct net_device *dev)
4580 struct bnx2 *bp = netdev_priv(dev);
4581 struct statistics_block *stats_blk = bp->stats_blk;
4582 struct net_device_stats *net_stats = &bp->net_stats;
4584 if (bp->stats_blk == NULL) {
4587 net_stats->rx_packets =
4588 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4589 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4590 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4592 net_stats->tx_packets =
4593 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4594 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4595 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4597 net_stats->rx_bytes =
4598 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4600 net_stats->tx_bytes =
4601 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4603 net_stats->multicast =
4604 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4606 net_stats->collisions =
4607 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4609 net_stats->rx_length_errors =
4610 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4611 stats_blk->stat_EtherStatsOverrsizePkts);
4613 net_stats->rx_over_errors =
4614 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4616 net_stats->rx_frame_errors =
4617 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4619 net_stats->rx_crc_errors =
4620 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4622 net_stats->rx_errors = net_stats->rx_length_errors +
4623 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4624 net_stats->rx_crc_errors;
4626 net_stats->tx_aborted_errors =
4627 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4628 stats_blk->stat_Dot3StatsLateCollisions);
4630 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4631 (CHIP_ID(bp) == CHIP_ID_5708_A0))
4632 net_stats->tx_carrier_errors = 0;
4634 net_stats->tx_carrier_errors =
4636 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4639 net_stats->tx_errors =
4641 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4643 net_stats->tx_aborted_errors +
4644 net_stats->tx_carrier_errors;
4646 net_stats->rx_missed_errors =
4647 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4648 stats_blk->stat_FwRxDrop);
4653 /* All ethtool functions called with rtnl_lock */
4656 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4658 struct bnx2 *bp = netdev_priv(dev);
4660 cmd->supported = SUPPORTED_Autoneg;
4661 if (bp->phy_flags & PHY_SERDES_FLAG) {
4662 cmd->supported |= SUPPORTED_1000baseT_Full |
4665 cmd->port = PORT_FIBRE;
4668 cmd->supported |= SUPPORTED_10baseT_Half |
4669 SUPPORTED_10baseT_Full |
4670 SUPPORTED_100baseT_Half |
4671 SUPPORTED_100baseT_Full |
4672 SUPPORTED_1000baseT_Full |
4675 cmd->port = PORT_TP;
4678 cmd->advertising = bp->advertising;
4680 if (bp->autoneg & AUTONEG_SPEED) {
4681 cmd->autoneg = AUTONEG_ENABLE;
4684 cmd->autoneg = AUTONEG_DISABLE;
4687 if (netif_carrier_ok(dev)) {
4688 cmd->speed = bp->line_speed;
4689 cmd->duplex = bp->duplex;
4696 cmd->transceiver = XCVR_INTERNAL;
4697 cmd->phy_address = bp->phy_addr;
4703 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4705 struct bnx2 *bp = netdev_priv(dev);
4706 u8 autoneg = bp->autoneg;
4707 u8 req_duplex = bp->req_duplex;
4708 u16 req_line_speed = bp->req_line_speed;
4709 u32 advertising = bp->advertising;
4711 if (cmd->autoneg == AUTONEG_ENABLE) {
4712 autoneg |= AUTONEG_SPEED;
4714 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
4716 /* allow advertising 1 speed */
4717 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4718 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4719 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4720 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4722 if (bp->phy_flags & PHY_SERDES_FLAG)
4725 advertising = cmd->advertising;
4728 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4729 advertising = cmd->advertising;
4731 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4735 if (bp->phy_flags & PHY_SERDES_FLAG) {
4736 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4739 advertising = ETHTOOL_ALL_COPPER_SPEED;
4742 advertising |= ADVERTISED_Autoneg;
4745 if (bp->phy_flags & PHY_SERDES_FLAG) {
4746 if ((cmd->speed != SPEED_1000) ||
4747 (cmd->duplex != DUPLEX_FULL)) {
4751 else if (cmd->speed == SPEED_1000) {
4754 autoneg &= ~AUTONEG_SPEED;
4755 req_line_speed = cmd->speed;
4756 req_duplex = cmd->duplex;
4760 bp->autoneg = autoneg;
4761 bp->advertising = advertising;
4762 bp->req_line_speed = req_line_speed;
4763 bp->req_duplex = req_duplex;
4765 spin_lock_bh(&bp->phy_lock);
4769 spin_unlock_bh(&bp->phy_lock);
4775 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4777 struct bnx2 *bp = netdev_priv(dev);
4779 strcpy(info->driver, DRV_MODULE_NAME);
4780 strcpy(info->version, DRV_MODULE_VERSION);
4781 strcpy(info->bus_info, pci_name(bp->pdev));
4782 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4783 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4784 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
4785 info->fw_version[1] = info->fw_version[3] = '.';
4786 info->fw_version[5] = 0;
4789 #define BNX2_REGDUMP_LEN (32 * 1024)
4792 bnx2_get_regs_len(struct net_device *dev)
4794 return BNX2_REGDUMP_LEN;
4798 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4800 u32 *p = _p, i, offset;
4802 struct bnx2 *bp = netdev_priv(dev);
4803 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4804 0x0800, 0x0880, 0x0c00, 0x0c10,
4805 0x0c30, 0x0d08, 0x1000, 0x101c,
4806 0x1040, 0x1048, 0x1080, 0x10a4,
4807 0x1400, 0x1490, 0x1498, 0x14f0,
4808 0x1500, 0x155c, 0x1580, 0x15dc,
4809 0x1600, 0x1658, 0x1680, 0x16d8,
4810 0x1800, 0x1820, 0x1840, 0x1854,
4811 0x1880, 0x1894, 0x1900, 0x1984,
4812 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4813 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4814 0x2000, 0x2030, 0x23c0, 0x2400,
4815 0x2800, 0x2820, 0x2830, 0x2850,
4816 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4817 0x3c00, 0x3c94, 0x4000, 0x4010,
4818 0x4080, 0x4090, 0x43c0, 0x4458,
4819 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4820 0x4fc0, 0x5010, 0x53c0, 0x5444,
4821 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4822 0x5fc0, 0x6000, 0x6400, 0x6428,
4823 0x6800, 0x6848, 0x684c, 0x6860,
4824 0x6888, 0x6910, 0x8000 };
4828 memset(p, 0, BNX2_REGDUMP_LEN);
4830 if (!netif_running(bp->dev))
4834 offset = reg_boundaries[0];
4836 while (offset < BNX2_REGDUMP_LEN) {
4837 *p++ = REG_RD(bp, offset);
4839 if (offset == reg_boundaries[i + 1]) {
4840 offset = reg_boundaries[i + 2];
4841 p = (u32 *) (orig_p + offset);
4848 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4850 struct bnx2 *bp = netdev_priv(dev);
4852 if (bp->flags & NO_WOL_FLAG) {
4857 wol->supported = WAKE_MAGIC;
4859 wol->wolopts = WAKE_MAGIC;
4863 memset(&wol->sopass, 0, sizeof(wol->sopass));
4867 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4869 struct bnx2 *bp = netdev_priv(dev);
4871 if (wol->wolopts & ~WAKE_MAGIC)
4874 if (wol->wolopts & WAKE_MAGIC) {
4875 if (bp->flags & NO_WOL_FLAG)
4887 bnx2_nway_reset(struct net_device *dev)
4889 struct bnx2 *bp = netdev_priv(dev);
4892 if (!(bp->autoneg & AUTONEG_SPEED)) {
4896 spin_lock_bh(&bp->phy_lock);
4898 /* Force a link down visible on the other side */
4899 if (bp->phy_flags & PHY_SERDES_FLAG) {
4900 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
4901 spin_unlock_bh(&bp->phy_lock);
4905 spin_lock_bh(&bp->phy_lock);
4906 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
4907 bp->current_interval = SERDES_AN_TIMEOUT;
4908 bp->serdes_an_pending = 1;
4909 mod_timer(&bp->timer, jiffies + bp->current_interval);
4913 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4914 bmcr &= ~BMCR_LOOPBACK;
4915 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4917 spin_unlock_bh(&bp->phy_lock);
4923 bnx2_get_eeprom_len(struct net_device *dev)
4925 struct bnx2 *bp = netdev_priv(dev);
4927 if (bp->flash_info == NULL)
4930 return (int) bp->flash_size;
4934 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4937 struct bnx2 *bp = netdev_priv(dev);
4940 /* parameters already validated in ethtool_get_eeprom */
4942 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
4948 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4951 struct bnx2 *bp = netdev_priv(dev);
4954 /* parameters already validated in ethtool_set_eeprom */
4956 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
4962 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4964 struct bnx2 *bp = netdev_priv(dev);
4966 memset(coal, 0, sizeof(struct ethtool_coalesce));
4968 coal->rx_coalesce_usecs = bp->rx_ticks;
4969 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
4970 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
4971 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
4973 coal->tx_coalesce_usecs = bp->tx_ticks;
4974 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
4975 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
4976 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
4978 coal->stats_block_coalesce_usecs = bp->stats_ticks;
4984 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4986 struct bnx2 *bp = netdev_priv(dev);
4988 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
4989 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
4991 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
4992 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
4994 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
4995 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
4997 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
4998 if (bp->rx_quick_cons_trip_int > 0xff)
4999 bp->rx_quick_cons_trip_int = 0xff;
5001 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5002 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5004 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5005 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5007 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5008 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5010 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5011 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5014 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5015 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5016 bp->stats_ticks &= 0xffff00;
5018 if (netif_running(bp->dev)) {
5019 bnx2_netif_stop(bp);
5021 bnx2_netif_start(bp);
5028 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5030 struct bnx2 *bp = netdev_priv(dev);
5032 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5033 ering->rx_mini_max_pending = 0;
5034 ering->rx_jumbo_max_pending = 0;
5036 ering->rx_pending = bp->rx_ring_size;
5037 ering->rx_mini_pending = 0;
5038 ering->rx_jumbo_pending = 0;
5040 ering->tx_max_pending = MAX_TX_DESC_CNT;
5041 ering->tx_pending = bp->tx_ring_size;
5045 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5047 struct bnx2 *bp = netdev_priv(dev);
5049 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5050 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5051 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5055 if (netif_running(bp->dev)) {
5056 bnx2_netif_stop(bp);
5057 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5062 bnx2_set_rx_ring_size(bp, ering->rx_pending);
5063 bp->tx_ring_size = ering->tx_pending;
5065 if (netif_running(bp->dev)) {
5068 rc = bnx2_alloc_mem(bp);
5072 bnx2_netif_start(bp);
5079 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5081 struct bnx2 *bp = netdev_priv(dev);
5083 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5084 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5085 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5089 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5091 struct bnx2 *bp = netdev_priv(dev);
5093 bp->req_flow_ctrl = 0;
5094 if (epause->rx_pause)
5095 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5096 if (epause->tx_pause)
5097 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5099 if (epause->autoneg) {
5100 bp->autoneg |= AUTONEG_FLOW_CTRL;
5103 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5106 spin_lock_bh(&bp->phy_lock);
5110 spin_unlock_bh(&bp->phy_lock);
5116 bnx2_get_rx_csum(struct net_device *dev)
5118 struct bnx2 *bp = netdev_priv(dev);
5124 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5126 struct bnx2 *bp = netdev_priv(dev);
5133 bnx2_set_tso(struct net_device *dev, u32 data)
5136 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5138 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
5142 #define BNX2_NUM_STATS 46
5145 char string[ETH_GSTRING_LEN];
5146 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5148 { "rx_error_bytes" },
5150 { "tx_error_bytes" },
5151 { "rx_ucast_packets" },
5152 { "rx_mcast_packets" },
5153 { "rx_bcast_packets" },
5154 { "tx_ucast_packets" },
5155 { "tx_mcast_packets" },
5156 { "tx_bcast_packets" },
5157 { "tx_mac_errors" },
5158 { "tx_carrier_errors" },
5159 { "rx_crc_errors" },
5160 { "rx_align_errors" },
5161 { "tx_single_collisions" },
5162 { "tx_multi_collisions" },
5164 { "tx_excess_collisions" },
5165 { "tx_late_collisions" },
5166 { "tx_total_collisions" },
5169 { "rx_undersize_packets" },
5170 { "rx_oversize_packets" },
5171 { "rx_64_byte_packets" },
5172 { "rx_65_to_127_byte_packets" },
5173 { "rx_128_to_255_byte_packets" },
5174 { "rx_256_to_511_byte_packets" },
5175 { "rx_512_to_1023_byte_packets" },
5176 { "rx_1024_to_1522_byte_packets" },
5177 { "rx_1523_to_9022_byte_packets" },
5178 { "tx_64_byte_packets" },
5179 { "tx_65_to_127_byte_packets" },
5180 { "tx_128_to_255_byte_packets" },
5181 { "tx_256_to_511_byte_packets" },
5182 { "tx_512_to_1023_byte_packets" },
5183 { "tx_1024_to_1522_byte_packets" },
5184 { "tx_1523_to_9022_byte_packets" },
5185 { "rx_xon_frames" },
5186 { "rx_xoff_frames" },
5187 { "tx_xon_frames" },
5188 { "tx_xoff_frames" },
5189 { "rx_mac_ctrl_frames" },
5190 { "rx_filtered_packets" },
5192 { "rx_fw_discards" },
5195 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5197 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5198 STATS_OFFSET32(stat_IfHCInOctets_hi),
5199 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5200 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5201 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5202 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5203 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5204 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5205 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5206 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5207 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5208 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5209 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5210 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5211 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5212 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5213 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5214 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5215 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5216 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5217 STATS_OFFSET32(stat_EtherStatsCollisions),
5218 STATS_OFFSET32(stat_EtherStatsFragments),
5219 STATS_OFFSET32(stat_EtherStatsJabbers),
5220 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5221 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5222 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5223 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5224 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5225 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5226 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5227 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5228 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5229 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5230 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5231 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5232 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5233 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5234 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5235 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5236 STATS_OFFSET32(stat_XonPauseFramesReceived),
5237 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5238 STATS_OFFSET32(stat_OutXonSent),
5239 STATS_OFFSET32(stat_OutXoffSent),
5240 STATS_OFFSET32(stat_MacControlFramesReceived),
5241 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5242 STATS_OFFSET32(stat_IfInMBUFDiscards),
5243 STATS_OFFSET32(stat_FwRxDrop),
5246 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5247 * skipped because of errata.
5249 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5250 8,0,8,8,8,8,8,8,8,8,
5251 4,0,4,4,4,4,4,4,4,4,
5252 4,4,4,4,4,4,4,4,4,4,
5253 4,4,4,4,4,4,4,4,4,4,
5257 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5258 8,0,8,8,8,8,8,8,8,8,
5259 4,4,4,4,4,4,4,4,4,4,
5260 4,4,4,4,4,4,4,4,4,4,
5261 4,4,4,4,4,4,4,4,4,4,
5265 #define BNX2_NUM_TESTS 6
5268 char string[ETH_GSTRING_LEN];
5269 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5270 { "register_test (offline)" },
5271 { "memory_test (offline)" },
5272 { "loopback_test (offline)" },
5273 { "nvram_test (online)" },
5274 { "interrupt_test (online)" },
5275 { "link_test (online)" },
5279 bnx2_self_test_count(struct net_device *dev)
5281 return BNX2_NUM_TESTS;
5285 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5287 struct bnx2 *bp = netdev_priv(dev);
5289 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5290 if (etest->flags & ETH_TEST_FL_OFFLINE) {
5291 bnx2_netif_stop(bp);
5292 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5295 if (bnx2_test_registers(bp) != 0) {
5297 etest->flags |= ETH_TEST_FL_FAILED;
5299 if (bnx2_test_memory(bp) != 0) {
5301 etest->flags |= ETH_TEST_FL_FAILED;
5303 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5304 etest->flags |= ETH_TEST_FL_FAILED;
5306 if (!netif_running(bp->dev)) {
5307 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5311 bnx2_netif_start(bp);
5314 /* wait for link up */
5315 msleep_interruptible(3000);
5316 if ((!bp->link_up) && !(bp->phy_flags & PHY_SERDES_FLAG))
5317 msleep_interruptible(4000);
5320 if (bnx2_test_nvram(bp) != 0) {
5322 etest->flags |= ETH_TEST_FL_FAILED;
5324 if (bnx2_test_intr(bp) != 0) {
5326 etest->flags |= ETH_TEST_FL_FAILED;
5329 if (bnx2_test_link(bp) != 0) {
5331 etest->flags |= ETH_TEST_FL_FAILED;
5337 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5339 switch (stringset) {
5341 memcpy(buf, bnx2_stats_str_arr,
5342 sizeof(bnx2_stats_str_arr));
5345 memcpy(buf, bnx2_tests_str_arr,
5346 sizeof(bnx2_tests_str_arr));
5352 bnx2_get_stats_count(struct net_device *dev)
5354 return BNX2_NUM_STATS;
5358 bnx2_get_ethtool_stats(struct net_device *dev,
5359 struct ethtool_stats *stats, u64 *buf)
5361 struct bnx2 *bp = netdev_priv(dev);
5363 u32 *hw_stats = (u32 *) bp->stats_blk;
5364 u8 *stats_len_arr = NULL;
5366 if (hw_stats == NULL) {
5367 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5371 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5372 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5373 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5374 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5375 stats_len_arr = bnx2_5706_stats_len_arr;
5377 stats_len_arr = bnx2_5708_stats_len_arr;
5379 for (i = 0; i < BNX2_NUM_STATS; i++) {
5380 if (stats_len_arr[i] == 0) {
5381 /* skip this counter */
5385 if (stats_len_arr[i] == 4) {
5386 /* 4-byte counter */
5388 *(hw_stats + bnx2_stats_offset_arr[i]);
5391 /* 8-byte counter */
5392 buf[i] = (((u64) *(hw_stats +
5393 bnx2_stats_offset_arr[i])) << 32) +
5394 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5399 bnx2_phys_id(struct net_device *dev, u32 data)
5401 struct bnx2 *bp = netdev_priv(dev);
5408 save = REG_RD(bp, BNX2_MISC_CFG);
5409 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5411 for (i = 0; i < (data * 2); i++) {
5413 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5416 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5417 BNX2_EMAC_LED_1000MB_OVERRIDE |
5418 BNX2_EMAC_LED_100MB_OVERRIDE |
5419 BNX2_EMAC_LED_10MB_OVERRIDE |
5420 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5421 BNX2_EMAC_LED_TRAFFIC);
5423 msleep_interruptible(500);
5424 if (signal_pending(current))
5427 REG_WR(bp, BNX2_EMAC_LED, 0);
5428 REG_WR(bp, BNX2_MISC_CFG, save);
5432 static const struct ethtool_ops bnx2_ethtool_ops = {
5433 .get_settings = bnx2_get_settings,
5434 .set_settings = bnx2_set_settings,
5435 .get_drvinfo = bnx2_get_drvinfo,
5436 .get_regs_len = bnx2_get_regs_len,
5437 .get_regs = bnx2_get_regs,
5438 .get_wol = bnx2_get_wol,
5439 .set_wol = bnx2_set_wol,
5440 .nway_reset = bnx2_nway_reset,
5441 .get_link = ethtool_op_get_link,
5442 .get_eeprom_len = bnx2_get_eeprom_len,
5443 .get_eeprom = bnx2_get_eeprom,
5444 .set_eeprom = bnx2_set_eeprom,
5445 .get_coalesce = bnx2_get_coalesce,
5446 .set_coalesce = bnx2_set_coalesce,
5447 .get_ringparam = bnx2_get_ringparam,
5448 .set_ringparam = bnx2_set_ringparam,
5449 .get_pauseparam = bnx2_get_pauseparam,
5450 .set_pauseparam = bnx2_set_pauseparam,
5451 .get_rx_csum = bnx2_get_rx_csum,
5452 .set_rx_csum = bnx2_set_rx_csum,
5453 .get_tx_csum = ethtool_op_get_tx_csum,
5454 .set_tx_csum = ethtool_op_set_tx_csum,
5455 .get_sg = ethtool_op_get_sg,
5456 .set_sg = ethtool_op_set_sg,
5458 .get_tso = ethtool_op_get_tso,
5459 .set_tso = bnx2_set_tso,
5461 .self_test_count = bnx2_self_test_count,
5462 .self_test = bnx2_self_test,
5463 .get_strings = bnx2_get_strings,
5464 .phys_id = bnx2_phys_id,
5465 .get_stats_count = bnx2_get_stats_count,
5466 .get_ethtool_stats = bnx2_get_ethtool_stats,
5467 .get_perm_addr = ethtool_op_get_perm_addr,
5470 /* Called with rtnl_lock */
5472 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5474 struct mii_ioctl_data *data = if_mii(ifr);
5475 struct bnx2 *bp = netdev_priv(dev);
5480 data->phy_id = bp->phy_addr;
5486 spin_lock_bh(&bp->phy_lock);
5487 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5488 spin_unlock_bh(&bp->phy_lock);
5490 data->val_out = mii_regval;
5496 if (!capable(CAP_NET_ADMIN))
5499 spin_lock_bh(&bp->phy_lock);
5500 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5501 spin_unlock_bh(&bp->phy_lock);
5512 /* Called with rtnl_lock */
5514 bnx2_change_mac_addr(struct net_device *dev, void *p)
5516 struct sockaddr *addr = p;
5517 struct bnx2 *bp = netdev_priv(dev);
5519 if (!is_valid_ether_addr(addr->sa_data))
5522 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5523 if (netif_running(dev))
5524 bnx2_set_mac_addr(bp);
5529 /* Called with rtnl_lock */
5531 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5533 struct bnx2 *bp = netdev_priv(dev);
5535 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5536 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5540 if (netif_running(dev)) {
5541 bnx2_netif_stop(bp);
5545 bnx2_netif_start(bp);
5550 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5552 poll_bnx2(struct net_device *dev)
5554 struct bnx2 *bp = netdev_priv(dev);
5556 disable_irq(bp->pdev->irq);
5557 bnx2_interrupt(bp->pdev->irq, dev);
5558 enable_irq(bp->pdev->irq);
5562 static int __devinit
5563 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5566 unsigned long mem_len;
5570 SET_MODULE_OWNER(dev);
5571 SET_NETDEV_DEV(dev, &pdev->dev);
5572 bp = netdev_priv(dev);
5577 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5578 rc = pci_enable_device(pdev);
5580 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
5584 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5586 "Cannot find PCI device base address, aborting.\n");
5588 goto err_out_disable;
5591 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5593 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
5594 goto err_out_disable;
5597 pci_set_master(pdev);
5599 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5600 if (bp->pm_cap == 0) {
5602 "Cannot find power management capability, aborting.\n");
5604 goto err_out_release;
5607 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5608 if (bp->pcix_cap == 0) {
5609 dev_err(&pdev->dev, "Cannot find PCIX capability, aborting.\n");
5611 goto err_out_release;
5614 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5615 bp->flags |= USING_DAC_FLAG;
5616 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
5618 "pci_set_consistent_dma_mask failed, aborting.\n");
5620 goto err_out_release;
5623 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
5624 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
5626 goto err_out_release;
5632 spin_lock_init(&bp->phy_lock);
5633 INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5635 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5636 mem_len = MB_GET_CID_ADDR(17);
5637 dev->mem_end = dev->mem_start + mem_len;
5638 dev->irq = pdev->irq;
5640 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5643 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
5645 goto err_out_release;
5648 /* Configure byte swap and enable write to the reg_window registers.
5649 * Rely on CPU to do target byte swapping on big endian systems
5650 * The chip's target access swapping will not swap all accesses
5652 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5653 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5654 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5656 bnx2_set_power_state(bp, PCI_D0);
5658 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5660 /* Get bus information. */
5661 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5662 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5665 bp->flags |= PCIX_FLAG;
5667 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5669 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5671 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5672 bp->bus_speed_mhz = 133;
5675 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5676 bp->bus_speed_mhz = 100;
5679 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5680 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5681 bp->bus_speed_mhz = 66;
5684 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5685 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5686 bp->bus_speed_mhz = 50;
5689 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5690 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5691 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5692 bp->bus_speed_mhz = 33;
5697 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5698 bp->bus_speed_mhz = 66;
5700 bp->bus_speed_mhz = 33;
5703 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5704 bp->flags |= PCI_32BIT_FLAG;
5706 /* 5706A0 may falsely detect SERR and PERR. */
5707 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5708 reg = REG_RD(bp, PCI_COMMAND);
5709 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5710 REG_WR(bp, PCI_COMMAND, reg);
5712 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5713 !(bp->flags & PCIX_FLAG)) {
5716 "5706 A1 can only be used in a PCIX bus, aborting.\n");
5720 bnx2_init_nvram(bp);
5722 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5724 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5725 BNX2_SHM_HDR_SIGNATURE_SIG)
5726 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5728 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5730 /* Get the permanent MAC address. First we need to make sure the
5731 * firmware is actually running.
5733 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
5735 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5736 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5737 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
5742 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
5744 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
5745 bp->mac_addr[0] = (u8) (reg >> 8);
5746 bp->mac_addr[1] = (u8) reg;
5748 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
5749 bp->mac_addr[2] = (u8) (reg >> 24);
5750 bp->mac_addr[3] = (u8) (reg >> 16);
5751 bp->mac_addr[4] = (u8) (reg >> 8);
5752 bp->mac_addr[5] = (u8) reg;
5754 bp->tx_ring_size = MAX_TX_DESC_CNT;
5755 bnx2_set_rx_ring_size(bp, 255);
5759 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5761 bp->tx_quick_cons_trip_int = 20;
5762 bp->tx_quick_cons_trip = 20;
5763 bp->tx_ticks_int = 80;
5766 bp->rx_quick_cons_trip_int = 6;
5767 bp->rx_quick_cons_trip = 6;
5768 bp->rx_ticks_int = 18;
5771 bp->stats_ticks = 1000000 & 0xffff00;
5773 bp->timer_interval = HZ;
5774 bp->current_interval = HZ;
5778 /* Disable WOL support if we are running on a SERDES chip. */
5779 if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
5780 bp->phy_flags |= PHY_SERDES_FLAG;
5781 bp->flags |= NO_WOL_FLAG;
5782 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5784 reg = REG_RD_IND(bp, bp->shmem_base +
5785 BNX2_SHARED_HW_CFG_CONFIG);
5786 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5787 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5791 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
5792 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5793 (CHIP_ID(bp) == CHIP_ID_5708_B1))
5794 bp->flags |= NO_WOL_FLAG;
5796 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5797 bp->tx_quick_cons_trip_int =
5798 bp->tx_quick_cons_trip;
5799 bp->tx_ticks_int = bp->tx_ticks;
5800 bp->rx_quick_cons_trip_int =
5801 bp->rx_quick_cons_trip;
5802 bp->rx_ticks_int = bp->rx_ticks;
5803 bp->comp_prod_trip_int = bp->comp_prod_trip;
5804 bp->com_ticks_int = bp->com_ticks;
5805 bp->cmd_ticks_int = bp->cmd_ticks;
5808 /* Disable MSI on 5706 if AMD 8132 bridge is found.
5810 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
5811 * with byte enables disabled on the unused 32-bit word. This is legal
5812 * but causes problems on the AMD 8132 which will eventually stop
5813 * responding after a while.
5815 * AMD believes this incompatibility is unique to the 5706, and
5816 * prefers to locally disable MSI rather than globally disabling it
5817 * using pci_msi_quirk.
5819 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
5820 struct pci_dev *amd_8132 = NULL;
5822 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
5823 PCI_DEVICE_ID_AMD_8132_BRIDGE,
5827 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
5828 if (rev >= 0x10 && rev <= 0x13) {
5830 pci_dev_put(amd_8132);
5836 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5837 bp->req_line_speed = 0;
5838 if (bp->phy_flags & PHY_SERDES_FLAG) {
5839 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
5841 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
5842 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5843 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5845 bp->req_line_speed = bp->line_speed = SPEED_1000;
5846 bp->req_duplex = DUPLEX_FULL;
5850 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5853 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5855 init_timer(&bp->timer);
5856 bp->timer.expires = RUN_AT(bp->timer_interval);
5857 bp->timer.data = (unsigned long) bp;
5858 bp->timer.function = bnx2_timer;
5864 iounmap(bp->regview);
5869 pci_release_regions(pdev);
5872 pci_disable_device(pdev);
5873 pci_set_drvdata(pdev, NULL);
5879 static int __devinit
5880 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5882 static int version_printed = 0;
5883 struct net_device *dev = NULL;
5887 if (version_printed++ == 0)
5888 printk(KERN_INFO "%s", version);
5890 /* dev zeroed in init_etherdev */
5891 dev = alloc_etherdev(sizeof(*bp));
5896 rc = bnx2_init_board(pdev, dev);
5902 dev->open = bnx2_open;
5903 dev->hard_start_xmit = bnx2_start_xmit;
5904 dev->stop = bnx2_close;
5905 dev->get_stats = bnx2_get_stats;
5906 dev->set_multicast_list = bnx2_set_rx_mode;
5907 dev->do_ioctl = bnx2_ioctl;
5908 dev->set_mac_address = bnx2_change_mac_addr;
5909 dev->change_mtu = bnx2_change_mtu;
5910 dev->tx_timeout = bnx2_tx_timeout;
5911 dev->watchdog_timeo = TX_TIMEOUT;
5913 dev->vlan_rx_register = bnx2_vlan_rx_register;
5914 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
5916 dev->poll = bnx2_poll;
5917 dev->ethtool_ops = &bnx2_ethtool_ops;
5920 bp = netdev_priv(dev);
5922 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5923 dev->poll_controller = poll_bnx2;
5926 if ((rc = register_netdev(dev))) {
5927 dev_err(&pdev->dev, "Cannot register net device\n");
5929 iounmap(bp->regview);
5930 pci_release_regions(pdev);
5931 pci_disable_device(pdev);
5932 pci_set_drvdata(pdev, NULL);
5937 pci_set_drvdata(pdev, dev);
5939 memcpy(dev->dev_addr, bp->mac_addr, 6);
5940 memcpy(dev->perm_addr, bp->mac_addr, 6);
5941 bp->name = board_info[ent->driver_data].name,
5942 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
5946 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
5947 ((CHIP_ID(bp) & 0x0ff0) >> 4),
5948 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
5949 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
5954 printk("node addr ");
5955 for (i = 0; i < 6; i++)
5956 printk("%2.2x", dev->dev_addr[i]);
5959 dev->features |= NETIF_F_SG;
5960 if (bp->flags & USING_DAC_FLAG)
5961 dev->features |= NETIF_F_HIGHDMA;
5962 dev->features |= NETIF_F_IP_CSUM;
5964 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5967 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5970 netif_carrier_off(bp->dev);
5975 static void __devexit
5976 bnx2_remove_one(struct pci_dev *pdev)
5978 struct net_device *dev = pci_get_drvdata(pdev);
5979 struct bnx2 *bp = netdev_priv(dev);
5981 flush_scheduled_work();
5983 unregister_netdev(dev);
5986 iounmap(bp->regview);
5989 pci_release_regions(pdev);
5990 pci_disable_device(pdev);
5991 pci_set_drvdata(pdev, NULL);
5995 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
5997 struct net_device *dev = pci_get_drvdata(pdev);
5998 struct bnx2 *bp = netdev_priv(dev);
6001 if (!netif_running(dev))
6004 flush_scheduled_work();
6005 bnx2_netif_stop(bp);
6006 netif_device_detach(dev);
6007 del_timer_sync(&bp->timer);
6008 if (bp->flags & NO_WOL_FLAG)
6009 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6011 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6013 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6014 bnx2_reset_chip(bp, reset_code);
6016 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6021 bnx2_resume(struct pci_dev *pdev)
6023 struct net_device *dev = pci_get_drvdata(pdev);
6024 struct bnx2 *bp = netdev_priv(dev);
6026 if (!netif_running(dev))
6029 bnx2_set_power_state(bp, PCI_D0);
6030 netif_device_attach(dev);
6032 bnx2_netif_start(bp);
6036 static struct pci_driver bnx2_pci_driver = {
6037 .name = DRV_MODULE_NAME,
6038 .id_table = bnx2_pci_tbl,
6039 .probe = bnx2_init_one,
6040 .remove = __devexit_p(bnx2_remove_one),
6041 .suspend = bnx2_suspend,
6042 .resume = bnx2_resume,
6045 static int __init bnx2_init(void)
6047 return pci_register_driver(&bnx2_pci_driver);
6050 static void __exit bnx2_cleanup(void)
6052 pci_unregister_driver(&bnx2_pci_driver);
6055 module_init(bnx2_init);
6056 module_exit(bnx2_cleanup);