1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
45 #include <net/checksum.h>
48 #include <linux/workqueue.h>
49 #include <linux/crc32.h>
50 #include <linux/prefetch.h>
51 #include <linux/cache.h>
52 #include <linux/zlib.h>
58 #define DRV_MODULE_NAME "bnx2"
59 #define PFX DRV_MODULE_NAME ": "
60 #define DRV_MODULE_VERSION "1.5.3"
61 #define DRV_MODULE_RELDATE "January 8, 2007"
63 #define RUN_AT(x) (jiffies + (x))
65 /* Time in jiffies before concluding the transmitter is hung. */
66 #define TX_TIMEOUT (5*HZ)
68 static const char version[] __devinitdata =
69 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
71 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
76 static int disable_msi = 0;
78 module_param(disable_msi, int, 0);
79 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
92 /* indexed by board_t, above */
95 } board_info[] __devinitdata = {
96 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97 { "HP NC370T Multifunction Gigabit Server Adapter" },
98 { "HP NC370i Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100 { "HP NC370F Multifunction Gigabit Server Adapter" },
101 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
106 static struct pci_device_id bnx2_pci_tbl[] = {
107 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
108 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
109 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
110 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
111 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
112 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
113 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
114 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
116 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
126 static struct flash_spec flash_table[] =
129 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
130 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
131 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
133 /* Expansion entry 0001 */
134 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
135 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
136 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
138 /* Saifun SA25F010 (non-buffered flash) */
139 /* strap, cfg1, & write1 need updates */
140 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
141 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
142 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
143 "Non-buffered flash (128kB)"},
144 /* Saifun SA25F020 (non-buffered flash) */
145 /* strap, cfg1, & write1 need updates */
146 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
147 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
148 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
149 "Non-buffered flash (256kB)"},
150 /* Expansion entry 0100 */
151 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
152 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
155 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
156 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
157 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
158 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
159 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
160 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
161 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
162 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
164 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
165 /* Saifun SA25F005 (non-buffered flash) */
166 /* strap, cfg1, & write1 need updates */
167 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
168 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
169 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
170 "Non-buffered flash (64kB)"},
172 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
173 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
174 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
176 /* Expansion entry 1001 */
177 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
178 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
181 /* Expansion entry 1010 */
182 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
183 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186 /* ATMEL AT45DB011B (buffered flash) */
187 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
188 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
189 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
190 "Buffered flash (128kB)"},
191 /* Expansion entry 1100 */
192 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
193 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
196 /* Expansion entry 1101 */
197 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
198 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201 /* Ateml Expansion entry 1110 */
202 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
203 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
204 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
205 "Entry 1110 (Atmel)"},
206 /* ATMEL AT45DB021B (buffered flash) */
207 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
208 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
210 "Buffered flash (256kB)"},
213 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
215 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
221 /* The ring uses 256 indices for 255 entries, one of them
222 * needs to be skipped.
224 diff = bp->tx_prod - bp->tx_cons;
225 if (unlikely(diff >= TX_DESC_CNT)) {
227 if (diff == TX_DESC_CNT)
228 diff = MAX_TX_DESC_CNT;
230 return (bp->tx_ring_size - diff);
234 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
236 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
237 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
241 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
243 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
244 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
248 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
251 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
254 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
255 REG_WR(bp, BNX2_CTX_CTX_CTRL,
256 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
257 for (i = 0; i < 5; i++) {
259 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
260 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
265 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
266 REG_WR(bp, BNX2_CTX_DATA, val);
271 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
276 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
277 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
278 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
280 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
281 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
286 val1 = (bp->phy_addr << 21) | (reg << 16) |
287 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
288 BNX2_EMAC_MDIO_COMM_START_BUSY;
289 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
291 for (i = 0; i < 50; i++) {
294 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
295 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
298 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
299 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
305 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
314 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
315 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
316 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
318 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
319 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
328 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
333 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
334 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
335 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
337 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
338 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
343 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
344 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
345 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
346 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
348 for (i = 0; i < 50; i++) {
351 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
352 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
358 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
363 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
364 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
365 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
367 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
368 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
377 bnx2_disable_int(struct bnx2 *bp)
379 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
380 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
381 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
385 bnx2_enable_int(struct bnx2 *bp)
387 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
388 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
389 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
391 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
392 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
394 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
398 bnx2_disable_int_sync(struct bnx2 *bp)
400 atomic_inc(&bp->intr_sem);
401 bnx2_disable_int(bp);
402 synchronize_irq(bp->pdev->irq);
406 bnx2_netif_stop(struct bnx2 *bp)
408 bnx2_disable_int_sync(bp);
409 if (netif_running(bp->dev)) {
410 netif_poll_disable(bp->dev);
411 netif_tx_disable(bp->dev);
412 bp->dev->trans_start = jiffies; /* prevent tx timeout */
417 bnx2_netif_start(struct bnx2 *bp)
419 if (atomic_dec_and_test(&bp->intr_sem)) {
420 if (netif_running(bp->dev)) {
421 netif_wake_queue(bp->dev);
422 netif_poll_enable(bp->dev);
429 bnx2_free_mem(struct bnx2 *bp)
433 for (i = 0; i < bp->ctx_pages; i++) {
434 if (bp->ctx_blk[i]) {
435 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
437 bp->ctx_blk_mapping[i]);
438 bp->ctx_blk[i] = NULL;
441 if (bp->status_blk) {
442 pci_free_consistent(bp->pdev, bp->status_stats_size,
443 bp->status_blk, bp->status_blk_mapping);
444 bp->status_blk = NULL;
445 bp->stats_blk = NULL;
447 if (bp->tx_desc_ring) {
448 pci_free_consistent(bp->pdev,
449 sizeof(struct tx_bd) * TX_DESC_CNT,
450 bp->tx_desc_ring, bp->tx_desc_mapping);
451 bp->tx_desc_ring = NULL;
453 kfree(bp->tx_buf_ring);
454 bp->tx_buf_ring = NULL;
455 for (i = 0; i < bp->rx_max_ring; i++) {
456 if (bp->rx_desc_ring[i])
457 pci_free_consistent(bp->pdev,
458 sizeof(struct rx_bd) * RX_DESC_CNT,
460 bp->rx_desc_mapping[i]);
461 bp->rx_desc_ring[i] = NULL;
463 vfree(bp->rx_buf_ring);
464 bp->rx_buf_ring = NULL;
468 bnx2_alloc_mem(struct bnx2 *bp)
470 int i, status_blk_size;
472 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
474 if (bp->tx_buf_ring == NULL)
477 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
478 sizeof(struct tx_bd) *
480 &bp->tx_desc_mapping);
481 if (bp->tx_desc_ring == NULL)
484 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
486 if (bp->rx_buf_ring == NULL)
489 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
492 for (i = 0; i < bp->rx_max_ring; i++) {
493 bp->rx_desc_ring[i] =
494 pci_alloc_consistent(bp->pdev,
495 sizeof(struct rx_bd) * RX_DESC_CNT,
496 &bp->rx_desc_mapping[i]);
497 if (bp->rx_desc_ring[i] == NULL)
502 /* Combine status and statistics blocks into one allocation. */
503 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
504 bp->status_stats_size = status_blk_size +
505 sizeof(struct statistics_block);
507 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
508 &bp->status_blk_mapping);
509 if (bp->status_blk == NULL)
512 memset(bp->status_blk, 0, bp->status_stats_size);
514 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
517 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
519 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
520 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
521 if (bp->ctx_pages == 0)
523 for (i = 0; i < bp->ctx_pages; i++) {
524 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
526 &bp->ctx_blk_mapping[i]);
527 if (bp->ctx_blk[i] == NULL)
539 bnx2_report_fw_link(struct bnx2 *bp)
541 u32 fw_link_status = 0;
546 switch (bp->line_speed) {
548 if (bp->duplex == DUPLEX_HALF)
549 fw_link_status = BNX2_LINK_STATUS_10HALF;
551 fw_link_status = BNX2_LINK_STATUS_10FULL;
554 if (bp->duplex == DUPLEX_HALF)
555 fw_link_status = BNX2_LINK_STATUS_100HALF;
557 fw_link_status = BNX2_LINK_STATUS_100FULL;
560 if (bp->duplex == DUPLEX_HALF)
561 fw_link_status = BNX2_LINK_STATUS_1000HALF;
563 fw_link_status = BNX2_LINK_STATUS_1000FULL;
566 if (bp->duplex == DUPLEX_HALF)
567 fw_link_status = BNX2_LINK_STATUS_2500HALF;
569 fw_link_status = BNX2_LINK_STATUS_2500FULL;
573 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
576 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
578 bnx2_read_phy(bp, MII_BMSR, &bmsr);
579 bnx2_read_phy(bp, MII_BMSR, &bmsr);
581 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
582 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
583 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
585 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
589 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
591 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
595 bnx2_report_link(struct bnx2 *bp)
598 netif_carrier_on(bp->dev);
599 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
601 printk("%d Mbps ", bp->line_speed);
603 if (bp->duplex == DUPLEX_FULL)
604 printk("full duplex");
606 printk("half duplex");
609 if (bp->flow_ctrl & FLOW_CTRL_RX) {
610 printk(", receive ");
611 if (bp->flow_ctrl & FLOW_CTRL_TX)
612 printk("& transmit ");
615 printk(", transmit ");
617 printk("flow control ON");
622 netif_carrier_off(bp->dev);
623 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
626 bnx2_report_fw_link(bp);
630 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
632 u32 local_adv, remote_adv;
635 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
636 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
638 if (bp->duplex == DUPLEX_FULL) {
639 bp->flow_ctrl = bp->req_flow_ctrl;
644 if (bp->duplex != DUPLEX_FULL) {
648 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
649 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
652 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
653 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
654 bp->flow_ctrl |= FLOW_CTRL_TX;
655 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
656 bp->flow_ctrl |= FLOW_CTRL_RX;
660 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
661 bnx2_read_phy(bp, MII_LPA, &remote_adv);
663 if (bp->phy_flags & PHY_SERDES_FLAG) {
664 u32 new_local_adv = 0;
665 u32 new_remote_adv = 0;
667 if (local_adv & ADVERTISE_1000XPAUSE)
668 new_local_adv |= ADVERTISE_PAUSE_CAP;
669 if (local_adv & ADVERTISE_1000XPSE_ASYM)
670 new_local_adv |= ADVERTISE_PAUSE_ASYM;
671 if (remote_adv & ADVERTISE_1000XPAUSE)
672 new_remote_adv |= ADVERTISE_PAUSE_CAP;
673 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
674 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
676 local_adv = new_local_adv;
677 remote_adv = new_remote_adv;
680 /* See Table 28B-3 of 802.3ab-1999 spec. */
681 if (local_adv & ADVERTISE_PAUSE_CAP) {
682 if(local_adv & ADVERTISE_PAUSE_ASYM) {
683 if (remote_adv & ADVERTISE_PAUSE_CAP) {
684 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
686 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
687 bp->flow_ctrl = FLOW_CTRL_RX;
691 if (remote_adv & ADVERTISE_PAUSE_CAP) {
692 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
696 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
697 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
698 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
700 bp->flow_ctrl = FLOW_CTRL_TX;
706 bnx2_5708s_linkup(struct bnx2 *bp)
711 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
712 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
713 case BCM5708S_1000X_STAT1_SPEED_10:
714 bp->line_speed = SPEED_10;
716 case BCM5708S_1000X_STAT1_SPEED_100:
717 bp->line_speed = SPEED_100;
719 case BCM5708S_1000X_STAT1_SPEED_1G:
720 bp->line_speed = SPEED_1000;
722 case BCM5708S_1000X_STAT1_SPEED_2G5:
723 bp->line_speed = SPEED_2500;
726 if (val & BCM5708S_1000X_STAT1_FD)
727 bp->duplex = DUPLEX_FULL;
729 bp->duplex = DUPLEX_HALF;
735 bnx2_5706s_linkup(struct bnx2 *bp)
737 u32 bmcr, local_adv, remote_adv, common;
740 bp->line_speed = SPEED_1000;
742 bnx2_read_phy(bp, MII_BMCR, &bmcr);
743 if (bmcr & BMCR_FULLDPLX) {
744 bp->duplex = DUPLEX_FULL;
747 bp->duplex = DUPLEX_HALF;
750 if (!(bmcr & BMCR_ANENABLE)) {
754 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
755 bnx2_read_phy(bp, MII_LPA, &remote_adv);
757 common = local_adv & remote_adv;
758 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
760 if (common & ADVERTISE_1000XFULL) {
761 bp->duplex = DUPLEX_FULL;
764 bp->duplex = DUPLEX_HALF;
772 bnx2_copper_linkup(struct bnx2 *bp)
776 bnx2_read_phy(bp, MII_BMCR, &bmcr);
777 if (bmcr & BMCR_ANENABLE) {
778 u32 local_adv, remote_adv, common;
780 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
781 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
783 common = local_adv & (remote_adv >> 2);
784 if (common & ADVERTISE_1000FULL) {
785 bp->line_speed = SPEED_1000;
786 bp->duplex = DUPLEX_FULL;
788 else if (common & ADVERTISE_1000HALF) {
789 bp->line_speed = SPEED_1000;
790 bp->duplex = DUPLEX_HALF;
793 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
794 bnx2_read_phy(bp, MII_LPA, &remote_adv);
796 common = local_adv & remote_adv;
797 if (common & ADVERTISE_100FULL) {
798 bp->line_speed = SPEED_100;
799 bp->duplex = DUPLEX_FULL;
801 else if (common & ADVERTISE_100HALF) {
802 bp->line_speed = SPEED_100;
803 bp->duplex = DUPLEX_HALF;
805 else if (common & ADVERTISE_10FULL) {
806 bp->line_speed = SPEED_10;
807 bp->duplex = DUPLEX_FULL;
809 else if (common & ADVERTISE_10HALF) {
810 bp->line_speed = SPEED_10;
811 bp->duplex = DUPLEX_HALF;
820 if (bmcr & BMCR_SPEED100) {
821 bp->line_speed = SPEED_100;
824 bp->line_speed = SPEED_10;
826 if (bmcr & BMCR_FULLDPLX) {
827 bp->duplex = DUPLEX_FULL;
830 bp->duplex = DUPLEX_HALF;
838 bnx2_set_mac_link(struct bnx2 *bp)
842 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
843 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
844 (bp->duplex == DUPLEX_HALF)) {
845 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
848 /* Configure the EMAC mode register. */
849 val = REG_RD(bp, BNX2_EMAC_MODE);
851 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
852 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
853 BNX2_EMAC_MODE_25G_MODE);
856 switch (bp->line_speed) {
858 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
859 val |= BNX2_EMAC_MODE_PORT_MII_10M;
864 val |= BNX2_EMAC_MODE_PORT_MII;
867 val |= BNX2_EMAC_MODE_25G_MODE;
870 val |= BNX2_EMAC_MODE_PORT_GMII;
875 val |= BNX2_EMAC_MODE_PORT_GMII;
878 /* Set the MAC to operate in the appropriate duplex mode. */
879 if (bp->duplex == DUPLEX_HALF)
880 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
881 REG_WR(bp, BNX2_EMAC_MODE, val);
883 /* Enable/disable rx PAUSE. */
884 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
886 if (bp->flow_ctrl & FLOW_CTRL_RX)
887 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
888 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
890 /* Enable/disable tx PAUSE. */
891 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
892 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
894 if (bp->flow_ctrl & FLOW_CTRL_TX)
895 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
896 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
898 /* Acknowledge the interrupt. */
899 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
905 bnx2_set_link(struct bnx2 *bp)
910 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
915 link_up = bp->link_up;
917 bnx2_read_phy(bp, MII_BMSR, &bmsr);
918 bnx2_read_phy(bp, MII_BMSR, &bmsr);
920 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
921 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
924 val = REG_RD(bp, BNX2_EMAC_STATUS);
925 if (val & BNX2_EMAC_STATUS_LINK)
926 bmsr |= BMSR_LSTATUS;
928 bmsr &= ~BMSR_LSTATUS;
931 if (bmsr & BMSR_LSTATUS) {
934 if (bp->phy_flags & PHY_SERDES_FLAG) {
935 if (CHIP_NUM(bp) == CHIP_NUM_5706)
936 bnx2_5706s_linkup(bp);
937 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
938 bnx2_5708s_linkup(bp);
941 bnx2_copper_linkup(bp);
943 bnx2_resolve_flow_ctrl(bp);
946 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
947 (bp->autoneg & AUTONEG_SPEED)) {
951 bnx2_read_phy(bp, MII_BMCR, &bmcr);
952 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
953 if (!(bmcr & BMCR_ANENABLE)) {
954 bnx2_write_phy(bp, MII_BMCR, bmcr |
958 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
962 if (bp->link_up != link_up) {
963 bnx2_report_link(bp);
966 bnx2_set_mac_link(bp);
972 bnx2_reset_phy(struct bnx2 *bp)
977 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
979 #define PHY_RESET_MAX_WAIT 100
980 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
983 bnx2_read_phy(bp, MII_BMCR, ®);
984 if (!(reg & BMCR_RESET)) {
989 if (i == PHY_RESET_MAX_WAIT) {
996 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1000 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1001 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1003 if (bp->phy_flags & PHY_SERDES_FLAG) {
1004 adv = ADVERTISE_1000XPAUSE;
1007 adv = ADVERTISE_PAUSE_CAP;
1010 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1011 if (bp->phy_flags & PHY_SERDES_FLAG) {
1012 adv = ADVERTISE_1000XPSE_ASYM;
1015 adv = ADVERTISE_PAUSE_ASYM;
1018 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1019 if (bp->phy_flags & PHY_SERDES_FLAG) {
1020 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1023 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1030 bnx2_setup_serdes_phy(struct bnx2 *bp)
1035 if (!(bp->autoneg & AUTONEG_SPEED)) {
1037 int force_link_down = 0;
1039 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1040 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1042 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1043 new_bmcr = bmcr & ~(BMCR_ANENABLE | BCM5708S_BMCR_FORCE_2500);
1044 new_bmcr |= BMCR_SPEED1000;
1045 if (bp->req_line_speed == SPEED_2500) {
1046 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1047 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1048 if (!(up1 & BCM5708S_UP1_2G5)) {
1049 up1 |= BCM5708S_UP1_2G5;
1050 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1051 force_link_down = 1;
1053 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1054 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1055 if (up1 & BCM5708S_UP1_2G5) {
1056 up1 &= ~BCM5708S_UP1_2G5;
1057 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1058 force_link_down = 1;
1062 if (bp->req_duplex == DUPLEX_FULL) {
1063 adv |= ADVERTISE_1000XFULL;
1064 new_bmcr |= BMCR_FULLDPLX;
1067 adv |= ADVERTISE_1000XHALF;
1068 new_bmcr &= ~BMCR_FULLDPLX;
1070 if ((new_bmcr != bmcr) || (force_link_down)) {
1071 /* Force a link down visible on the other side */
1073 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1074 ~(ADVERTISE_1000XFULL |
1075 ADVERTISE_1000XHALF));
1076 bnx2_write_phy(bp, MII_BMCR, bmcr |
1077 BMCR_ANRESTART | BMCR_ANENABLE);
1080 netif_carrier_off(bp->dev);
1081 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1082 bnx2_report_link(bp);
1084 bnx2_write_phy(bp, MII_ADVERTISE, adv);
1085 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1090 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1091 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1092 up1 |= BCM5708S_UP1_2G5;
1093 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1096 if (bp->advertising & ADVERTISED_1000baseT_Full)
1097 new_adv |= ADVERTISE_1000XFULL;
1099 new_adv |= bnx2_phy_get_pause_adv(bp);
1101 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1102 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1104 bp->serdes_an_pending = 0;
1105 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1106 /* Force a link down visible on the other side */
1108 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1109 spin_unlock_bh(&bp->phy_lock);
1111 spin_lock_bh(&bp->phy_lock);
1114 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1115 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1117 /* Speed up link-up time when the link partner
1118 * does not autonegotiate which is very common
1119 * in blade servers. Some blade servers use
1120 * IPMI for kerboard input and it's important
1121 * to minimize link disruptions. Autoneg. involves
1122 * exchanging base pages plus 3 next pages and
1123 * normally completes in about 120 msec.
1125 bp->current_interval = SERDES_AN_TIMEOUT;
1126 bp->serdes_an_pending = 1;
1127 mod_timer(&bp->timer, jiffies + bp->current_interval);
1133 #define ETHTOOL_ALL_FIBRE_SPEED \
1134 (ADVERTISED_1000baseT_Full)
1136 #define ETHTOOL_ALL_COPPER_SPEED \
1137 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1138 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1139 ADVERTISED_1000baseT_Full)
1141 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1142 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1144 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1147 bnx2_setup_copper_phy(struct bnx2 *bp)
1152 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1154 if (bp->autoneg & AUTONEG_SPEED) {
1155 u32 adv_reg, adv1000_reg;
1156 u32 new_adv_reg = 0;
1157 u32 new_adv1000_reg = 0;
1159 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1160 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1161 ADVERTISE_PAUSE_ASYM);
1163 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1164 adv1000_reg &= PHY_ALL_1000_SPEED;
1166 if (bp->advertising & ADVERTISED_10baseT_Half)
1167 new_adv_reg |= ADVERTISE_10HALF;
1168 if (bp->advertising & ADVERTISED_10baseT_Full)
1169 new_adv_reg |= ADVERTISE_10FULL;
1170 if (bp->advertising & ADVERTISED_100baseT_Half)
1171 new_adv_reg |= ADVERTISE_100HALF;
1172 if (bp->advertising & ADVERTISED_100baseT_Full)
1173 new_adv_reg |= ADVERTISE_100FULL;
1174 if (bp->advertising & ADVERTISED_1000baseT_Full)
1175 new_adv1000_reg |= ADVERTISE_1000FULL;
1177 new_adv_reg |= ADVERTISE_CSMA;
1179 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1181 if ((adv1000_reg != new_adv1000_reg) ||
1182 (adv_reg != new_adv_reg) ||
1183 ((bmcr & BMCR_ANENABLE) == 0)) {
1185 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1186 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1187 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1190 else if (bp->link_up) {
1191 /* Flow ctrl may have changed from auto to forced */
1192 /* or vice-versa. */
1194 bnx2_resolve_flow_ctrl(bp);
1195 bnx2_set_mac_link(bp);
1201 if (bp->req_line_speed == SPEED_100) {
1202 new_bmcr |= BMCR_SPEED100;
1204 if (bp->req_duplex == DUPLEX_FULL) {
1205 new_bmcr |= BMCR_FULLDPLX;
1207 if (new_bmcr != bmcr) {
1210 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1211 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1213 if (bmsr & BMSR_LSTATUS) {
1214 /* Force link down */
1215 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1216 spin_unlock_bh(&bp->phy_lock);
1218 spin_lock_bh(&bp->phy_lock);
1220 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1221 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1224 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1226 /* Normally, the new speed is setup after the link has
1227 * gone down and up again. In some cases, link will not go
1228 * down so we need to set up the new speed here.
1230 if (bmsr & BMSR_LSTATUS) {
1231 bp->line_speed = bp->req_line_speed;
1232 bp->duplex = bp->req_duplex;
1233 bnx2_resolve_flow_ctrl(bp);
1234 bnx2_set_mac_link(bp);
1241 bnx2_setup_phy(struct bnx2 *bp)
1243 if (bp->loopback == MAC_LOOPBACK)
1246 if (bp->phy_flags & PHY_SERDES_FLAG) {
1247 return (bnx2_setup_serdes_phy(bp));
1250 return (bnx2_setup_copper_phy(bp));
1255 bnx2_init_5708s_phy(struct bnx2 *bp)
1259 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1260 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1261 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1263 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1264 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1265 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1267 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1268 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1269 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1271 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1272 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1273 val |= BCM5708S_UP1_2G5;
1274 bnx2_write_phy(bp, BCM5708S_UP1, val);
1277 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1278 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1279 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1280 /* increase tx signal amplitude */
1281 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1282 BCM5708S_BLK_ADDR_TX_MISC);
1283 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1284 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1285 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1286 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1289 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1290 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1295 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1296 BNX2_SHARED_HW_CFG_CONFIG);
1297 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1298 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1299 BCM5708S_BLK_ADDR_TX_MISC);
1300 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1301 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1302 BCM5708S_BLK_ADDR_DIG);
1309 bnx2_init_5706s_phy(struct bnx2 *bp)
1311 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1313 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1314 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1316 if (bp->dev->mtu > 1500) {
1319 /* Set extended packet length bit */
1320 bnx2_write_phy(bp, 0x18, 0x7);
1321 bnx2_read_phy(bp, 0x18, &val);
1322 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1324 bnx2_write_phy(bp, 0x1c, 0x6c00);
1325 bnx2_read_phy(bp, 0x1c, &val);
1326 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1331 bnx2_write_phy(bp, 0x18, 0x7);
1332 bnx2_read_phy(bp, 0x18, &val);
1333 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1335 bnx2_write_phy(bp, 0x1c, 0x6c00);
1336 bnx2_read_phy(bp, 0x1c, &val);
1337 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1344 bnx2_init_copper_phy(struct bnx2 *bp)
1348 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1349 bnx2_write_phy(bp, 0x18, 0x0c00);
1350 bnx2_write_phy(bp, 0x17, 0x000a);
1351 bnx2_write_phy(bp, 0x15, 0x310b);
1352 bnx2_write_phy(bp, 0x17, 0x201f);
1353 bnx2_write_phy(bp, 0x15, 0x9506);
1354 bnx2_write_phy(bp, 0x17, 0x401f);
1355 bnx2_write_phy(bp, 0x15, 0x14e2);
1356 bnx2_write_phy(bp, 0x18, 0x0400);
1359 if (bp->dev->mtu > 1500) {
1360 /* Set extended packet length bit */
1361 bnx2_write_phy(bp, 0x18, 0x7);
1362 bnx2_read_phy(bp, 0x18, &val);
1363 bnx2_write_phy(bp, 0x18, val | 0x4000);
1365 bnx2_read_phy(bp, 0x10, &val);
1366 bnx2_write_phy(bp, 0x10, val | 0x1);
1369 bnx2_write_phy(bp, 0x18, 0x7);
1370 bnx2_read_phy(bp, 0x18, &val);
1371 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1373 bnx2_read_phy(bp, 0x10, &val);
1374 bnx2_write_phy(bp, 0x10, val & ~0x1);
1377 /* ethernet@wirespeed */
1378 bnx2_write_phy(bp, 0x18, 0x7007);
1379 bnx2_read_phy(bp, 0x18, &val);
1380 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1386 bnx2_init_phy(struct bnx2 *bp)
1391 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1392 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1394 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1398 bnx2_read_phy(bp, MII_PHYSID1, &val);
1399 bp->phy_id = val << 16;
1400 bnx2_read_phy(bp, MII_PHYSID2, &val);
1401 bp->phy_id |= val & 0xffff;
1403 if (bp->phy_flags & PHY_SERDES_FLAG) {
1404 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1405 rc = bnx2_init_5706s_phy(bp);
1406 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1407 rc = bnx2_init_5708s_phy(bp);
1410 rc = bnx2_init_copper_phy(bp);
1419 bnx2_set_mac_loopback(struct bnx2 *bp)
1423 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1424 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1425 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1426 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1431 static int bnx2_test_link(struct bnx2 *);
1434 bnx2_set_phy_loopback(struct bnx2 *bp)
1439 spin_lock_bh(&bp->phy_lock);
1440 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1442 spin_unlock_bh(&bp->phy_lock);
1446 for (i = 0; i < 10; i++) {
1447 if (bnx2_test_link(bp) == 0)
1452 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1453 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1454 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1455 BNX2_EMAC_MODE_25G_MODE);
1457 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1458 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1464 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1470 msg_data |= bp->fw_wr_seq;
1472 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1474 /* wait for an acknowledgement. */
1475 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1478 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1480 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1483 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1486 /* If we timed out, inform the firmware that this is the case. */
1487 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1489 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1492 msg_data &= ~BNX2_DRV_MSG_CODE;
1493 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1495 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1500 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1507 bnx2_init_5709_context(struct bnx2 *bp)
1512 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1513 val |= (BCM_PAGE_BITS - 8) << 16;
1514 REG_WR(bp, BNX2_CTX_COMMAND, val);
1515 for (i = 0; i < bp->ctx_pages; i++) {
1518 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1519 (bp->ctx_blk_mapping[i] & 0xffffffff) |
1520 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1521 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1522 (u64) bp->ctx_blk_mapping[i] >> 32);
1523 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1524 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1525 for (j = 0; j < 10; j++) {
1527 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1528 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1532 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1541 bnx2_init_context(struct bnx2 *bp)
1547 u32 vcid_addr, pcid_addr, offset;
1551 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1554 vcid_addr = GET_PCID_ADDR(vcid);
1556 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1561 pcid_addr = GET_PCID_ADDR(new_vcid);
1564 vcid_addr = GET_CID_ADDR(vcid);
1565 pcid_addr = vcid_addr;
1568 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1569 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1571 /* Zero out the context. */
1572 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1573 CTX_WR(bp, 0x00, offset, 0);
1576 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1577 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1582 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1588 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1589 if (good_mbuf == NULL) {
1590 printk(KERN_ERR PFX "Failed to allocate memory in "
1591 "bnx2_alloc_bad_rbuf\n");
1595 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1596 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1600 /* Allocate a bunch of mbufs and save the good ones in an array. */
1601 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1602 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1603 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1605 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1607 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1609 /* The addresses with Bit 9 set are bad memory blocks. */
1610 if (!(val & (1 << 9))) {
1611 good_mbuf[good_mbuf_cnt] = (u16) val;
1615 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1618 /* Free the good ones back to the mbuf pool thus discarding
1619 * all the bad ones. */
1620 while (good_mbuf_cnt) {
1623 val = good_mbuf[good_mbuf_cnt];
1624 val = (val << 9) | val | 1;
1626 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1633 bnx2_set_mac_addr(struct bnx2 *bp)
1636 u8 *mac_addr = bp->dev->dev_addr;
1638 val = (mac_addr[0] << 8) | mac_addr[1];
1640 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1642 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1643 (mac_addr[4] << 8) | mac_addr[5];
1645 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1649 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1651 struct sk_buff *skb;
1652 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1654 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1655 unsigned long align;
1657 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1662 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1663 skb_reserve(skb, BNX2_RX_ALIGN - align);
1665 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1666 PCI_DMA_FROMDEVICE);
1669 pci_unmap_addr_set(rx_buf, mapping, mapping);
1671 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1672 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1674 bp->rx_prod_bseq += bp->rx_buf_use_size;
1680 bnx2_phy_int(struct bnx2 *bp)
1682 u32 new_link_state, old_link_state;
1684 new_link_state = bp->status_blk->status_attn_bits &
1685 STATUS_ATTN_BITS_LINK_STATE;
1686 old_link_state = bp->status_blk->status_attn_bits_ack &
1687 STATUS_ATTN_BITS_LINK_STATE;
1688 if (new_link_state != old_link_state) {
1689 if (new_link_state) {
1690 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1691 STATUS_ATTN_BITS_LINK_STATE);
1694 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1695 STATUS_ATTN_BITS_LINK_STATE);
1702 bnx2_tx_int(struct bnx2 *bp)
1704 struct status_block *sblk = bp->status_blk;
1705 u16 hw_cons, sw_cons, sw_ring_cons;
1708 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1709 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1712 sw_cons = bp->tx_cons;
1714 while (sw_cons != hw_cons) {
1715 struct sw_bd *tx_buf;
1716 struct sk_buff *skb;
1719 sw_ring_cons = TX_RING_IDX(sw_cons);
1721 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1724 /* partial BD completions possible with TSO packets */
1725 if (skb_is_gso(skb)) {
1726 u16 last_idx, last_ring_idx;
1728 last_idx = sw_cons +
1729 skb_shinfo(skb)->nr_frags + 1;
1730 last_ring_idx = sw_ring_cons +
1731 skb_shinfo(skb)->nr_frags + 1;
1732 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1735 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1740 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1741 skb_headlen(skb), PCI_DMA_TODEVICE);
1744 last = skb_shinfo(skb)->nr_frags;
1746 for (i = 0; i < last; i++) {
1747 sw_cons = NEXT_TX_BD(sw_cons);
1749 pci_unmap_page(bp->pdev,
1751 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1753 skb_shinfo(skb)->frags[i].size,
1757 sw_cons = NEXT_TX_BD(sw_cons);
1759 tx_free_bd += last + 1;
1763 hw_cons = bp->hw_tx_cons =
1764 sblk->status_tx_quick_consumer_index0;
1766 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1771 bp->tx_cons = sw_cons;
1772 /* Need to make the tx_cons update visible to bnx2_start_xmit()
1773 * before checking for netif_queue_stopped(). Without the
1774 * memory barrier, there is a small possibility that bnx2_start_xmit()
1775 * will miss it and cause the queue to be stopped forever.
1779 if (unlikely(netif_queue_stopped(bp->dev)) &&
1780 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1781 netif_tx_lock(bp->dev);
1782 if ((netif_queue_stopped(bp->dev)) &&
1783 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
1784 netif_wake_queue(bp->dev);
1785 netif_tx_unlock(bp->dev);
1790 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1793 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1794 struct rx_bd *cons_bd, *prod_bd;
1796 cons_rx_buf = &bp->rx_buf_ring[cons];
1797 prod_rx_buf = &bp->rx_buf_ring[prod];
1799 pci_dma_sync_single_for_device(bp->pdev,
1800 pci_unmap_addr(cons_rx_buf, mapping),
1801 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1803 bp->rx_prod_bseq += bp->rx_buf_use_size;
1805 prod_rx_buf->skb = skb;
1810 pci_unmap_addr_set(prod_rx_buf, mapping,
1811 pci_unmap_addr(cons_rx_buf, mapping));
1813 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1814 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1815 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1816 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
1820 bnx2_rx_int(struct bnx2 *bp, int budget)
1822 struct status_block *sblk = bp->status_blk;
1823 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1824 struct l2_fhdr *rx_hdr;
1827 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1828 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1831 sw_cons = bp->rx_cons;
1832 sw_prod = bp->rx_prod;
1834 /* Memory barrier necessary as speculative reads of the rx
1835 * buffer can be ahead of the index in the status block
1838 while (sw_cons != hw_cons) {
1841 struct sw_bd *rx_buf;
1842 struct sk_buff *skb;
1843 dma_addr_t dma_addr;
1845 sw_ring_cons = RX_RING_IDX(sw_cons);
1846 sw_ring_prod = RX_RING_IDX(sw_prod);
1848 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1853 dma_addr = pci_unmap_addr(rx_buf, mapping);
1855 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
1856 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1858 rx_hdr = (struct l2_fhdr *) skb->data;
1859 len = rx_hdr->l2_fhdr_pkt_len - 4;
1861 if ((status = rx_hdr->l2_fhdr_status) &
1862 (L2_FHDR_ERRORS_BAD_CRC |
1863 L2_FHDR_ERRORS_PHY_DECODE |
1864 L2_FHDR_ERRORS_ALIGNMENT |
1865 L2_FHDR_ERRORS_TOO_SHORT |
1866 L2_FHDR_ERRORS_GIANT_FRAME)) {
1871 /* Since we don't have a jumbo ring, copy small packets
1874 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1875 struct sk_buff *new_skb;
1877 new_skb = netdev_alloc_skb(bp->dev, len + 2);
1878 if (new_skb == NULL)
1882 memcpy(new_skb->data,
1883 skb->data + bp->rx_offset - 2,
1886 skb_reserve(new_skb, 2);
1887 skb_put(new_skb, len);
1889 bnx2_reuse_rx_skb(bp, skb,
1890 sw_ring_cons, sw_ring_prod);
1894 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1895 pci_unmap_single(bp->pdev, dma_addr,
1896 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1898 skb_reserve(skb, bp->rx_offset);
1903 bnx2_reuse_rx_skb(bp, skb,
1904 sw_ring_cons, sw_ring_prod);
1908 skb->protocol = eth_type_trans(skb, bp->dev);
1910 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1911 (ntohs(skb->protocol) != 0x8100)) {
1918 skb->ip_summed = CHECKSUM_NONE;
1920 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1921 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1923 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1924 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
1925 skb->ip_summed = CHECKSUM_UNNECESSARY;
1929 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1930 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1931 rx_hdr->l2_fhdr_vlan_tag);
1935 netif_receive_skb(skb);
1937 bp->dev->last_rx = jiffies;
1941 sw_cons = NEXT_RX_BD(sw_cons);
1942 sw_prod = NEXT_RX_BD(sw_prod);
1944 if ((rx_pkt == budget))
1947 /* Refresh hw_cons to see if there is new work */
1948 if (sw_cons == hw_cons) {
1949 hw_cons = bp->hw_rx_cons =
1950 sblk->status_rx_quick_consumer_index0;
1951 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1956 bp->rx_cons = sw_cons;
1957 bp->rx_prod = sw_prod;
1959 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1961 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1969 /* MSI ISR - The only difference between this and the INTx ISR
1970 * is that the MSI interrupt is always serviced.
1973 bnx2_msi(int irq, void *dev_instance)
1975 struct net_device *dev = dev_instance;
1976 struct bnx2 *bp = netdev_priv(dev);
1978 prefetch(bp->status_blk);
1979 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1980 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1981 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1983 /* Return here if interrupt is disabled. */
1984 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1987 netif_rx_schedule(dev);
1993 bnx2_interrupt(int irq, void *dev_instance)
1995 struct net_device *dev = dev_instance;
1996 struct bnx2 *bp = netdev_priv(dev);
1998 /* When using INTx, it is possible for the interrupt to arrive
1999 * at the CPU before the status block posted prior to the
2000 * interrupt. Reading a register will flush the status block.
2001 * When using MSI, the MSI message will always complete after
2002 * the status block write.
2004 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
2005 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2006 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2009 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2010 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2011 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2013 /* Return here if interrupt is shared and is disabled. */
2014 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2017 netif_rx_schedule(dev);
2023 bnx2_has_work(struct bnx2 *bp)
2025 struct status_block *sblk = bp->status_blk;
2027 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2028 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2031 if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
2039 bnx2_poll(struct net_device *dev, int *budget)
2041 struct bnx2 *bp = netdev_priv(dev);
2043 if ((bp->status_blk->status_attn_bits &
2044 STATUS_ATTN_BITS_LINK_STATE) !=
2045 (bp->status_blk->status_attn_bits_ack &
2046 STATUS_ATTN_BITS_LINK_STATE)) {
2048 spin_lock(&bp->phy_lock);
2050 spin_unlock(&bp->phy_lock);
2052 /* This is needed to take care of transient status
2053 * during link changes.
2055 REG_WR(bp, BNX2_HC_COMMAND,
2056 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2057 REG_RD(bp, BNX2_HC_COMMAND);
2060 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2063 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2064 int orig_budget = *budget;
2067 if (orig_budget > dev->quota)
2068 orig_budget = dev->quota;
2070 work_done = bnx2_rx_int(bp, orig_budget);
2071 *budget -= work_done;
2072 dev->quota -= work_done;
2075 bp->last_status_idx = bp->status_blk->status_idx;
2078 if (!bnx2_has_work(bp)) {
2079 netif_rx_complete(dev);
2080 if (likely(bp->flags & USING_MSI_FLAG)) {
2081 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2082 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2083 bp->last_status_idx);
2086 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2087 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2088 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2089 bp->last_status_idx);
2091 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2092 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2093 bp->last_status_idx);
2100 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2101 * from set_multicast.
2104 bnx2_set_rx_mode(struct net_device *dev)
2106 struct bnx2 *bp = netdev_priv(dev);
2107 u32 rx_mode, sort_mode;
2110 spin_lock_bh(&bp->phy_lock);
2112 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2113 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2114 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2116 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2117 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2119 if (!(bp->flags & ASF_ENABLE_FLAG))
2120 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2122 if (dev->flags & IFF_PROMISC) {
2123 /* Promiscuous mode. */
2124 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2125 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2126 BNX2_RPM_SORT_USER0_PROM_VLAN;
2128 else if (dev->flags & IFF_ALLMULTI) {
2129 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2130 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2133 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2136 /* Accept one or more multicast(s). */
2137 struct dev_mc_list *mclist;
2138 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2143 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2145 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2146 i++, mclist = mclist->next) {
2148 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2150 regidx = (bit & 0xe0) >> 5;
2152 mc_filter[regidx] |= (1 << bit);
2155 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2156 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2160 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2163 if (rx_mode != bp->rx_mode) {
2164 bp->rx_mode = rx_mode;
2165 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2168 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2169 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2170 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2172 spin_unlock_bh(&bp->phy_lock);
2175 #define FW_BUF_SIZE 0x8000
2178 bnx2_gunzip_init(struct bnx2 *bp)
2180 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2183 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2186 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2187 if (bp->strm->workspace == NULL)
2197 vfree(bp->gunzip_buf);
2198 bp->gunzip_buf = NULL;
2201 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2202 "uncompression.\n", bp->dev->name);
2207 bnx2_gunzip_end(struct bnx2 *bp)
2209 kfree(bp->strm->workspace);
2214 if (bp->gunzip_buf) {
2215 vfree(bp->gunzip_buf);
2216 bp->gunzip_buf = NULL;
2221 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2225 /* check gzip header */
2226 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2232 if (zbuf[3] & FNAME)
2233 while ((zbuf[n++] != 0) && (n < len));
2235 bp->strm->next_in = zbuf + n;
2236 bp->strm->avail_in = len - n;
2237 bp->strm->next_out = bp->gunzip_buf;
2238 bp->strm->avail_out = FW_BUF_SIZE;
2240 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2244 rc = zlib_inflate(bp->strm, Z_FINISH);
2246 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2247 *outbuf = bp->gunzip_buf;
2249 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2250 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2251 bp->dev->name, bp->strm->msg);
2253 zlib_inflateEnd(bp->strm);
2255 if (rc == Z_STREAM_END)
2262 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2269 for (i = 0; i < rv2p_code_len; i += 8) {
2270 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2272 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2275 if (rv2p_proc == RV2P_PROC1) {
2276 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2277 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2280 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2281 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2285 /* Reset the processor, un-stall is done later. */
2286 if (rv2p_proc == RV2P_PROC1) {
2287 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2290 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2295 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2302 val = REG_RD_IND(bp, cpu_reg->mode);
2303 val |= cpu_reg->mode_value_halt;
2304 REG_WR_IND(bp, cpu_reg->mode, val);
2305 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2307 /* Load the Text area. */
2308 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2313 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2323 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2324 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2328 /* Load the Data area. */
2329 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2333 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2334 REG_WR_IND(bp, offset, fw->data[j]);
2338 /* Load the SBSS area. */
2339 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2343 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2344 REG_WR_IND(bp, offset, fw->sbss[j]);
2348 /* Load the BSS area. */
2349 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2353 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2354 REG_WR_IND(bp, offset, fw->bss[j]);
2358 /* Load the Read-Only area. */
2359 offset = cpu_reg->spad_base +
2360 (fw->rodata_addr - cpu_reg->mips_view_base);
2364 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2365 REG_WR_IND(bp, offset, fw->rodata[j]);
2369 /* Clear the pre-fetch instruction. */
2370 REG_WR_IND(bp, cpu_reg->inst, 0);
2371 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2373 /* Start the CPU. */
2374 val = REG_RD_IND(bp, cpu_reg->mode);
2375 val &= ~cpu_reg->mode_value_halt;
2376 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2377 REG_WR_IND(bp, cpu_reg->mode, val);
2383 bnx2_init_cpus(struct bnx2 *bp)
2385 struct cpu_reg cpu_reg;
2391 if ((rc = bnx2_gunzip_init(bp)) != 0)
2394 /* Initialize the RV2P processor. */
2395 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2400 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2402 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2407 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2409 /* Initialize the RX Processor. */
2410 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2411 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2412 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2413 cpu_reg.state = BNX2_RXP_CPU_STATE;
2414 cpu_reg.state_value_clear = 0xffffff;
2415 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2416 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2417 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2418 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2419 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2420 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2421 cpu_reg.mips_view_base = 0x8000000;
2423 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2424 fw = &bnx2_rxp_fw_09;
2426 fw = &bnx2_rxp_fw_06;
2428 rc = load_cpu_fw(bp, &cpu_reg, fw);
2432 /* Initialize the TX Processor. */
2433 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2434 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2435 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2436 cpu_reg.state = BNX2_TXP_CPU_STATE;
2437 cpu_reg.state_value_clear = 0xffffff;
2438 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2439 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2440 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2441 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2442 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2443 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2444 cpu_reg.mips_view_base = 0x8000000;
2446 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2447 fw = &bnx2_txp_fw_09;
2449 fw = &bnx2_txp_fw_06;
2451 rc = load_cpu_fw(bp, &cpu_reg, fw);
2455 /* Initialize the TX Patch-up Processor. */
2456 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2457 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2458 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2459 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2460 cpu_reg.state_value_clear = 0xffffff;
2461 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2462 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2463 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2464 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2465 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2466 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2467 cpu_reg.mips_view_base = 0x8000000;
2469 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2470 fw = &bnx2_tpat_fw_09;
2472 fw = &bnx2_tpat_fw_06;
2474 rc = load_cpu_fw(bp, &cpu_reg, fw);
2478 /* Initialize the Completion Processor. */
2479 cpu_reg.mode = BNX2_COM_CPU_MODE;
2480 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2481 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2482 cpu_reg.state = BNX2_COM_CPU_STATE;
2483 cpu_reg.state_value_clear = 0xffffff;
2484 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2485 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2486 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2487 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2488 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2489 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2490 cpu_reg.mips_view_base = 0x8000000;
2492 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2493 fw = &bnx2_com_fw_09;
2495 fw = &bnx2_com_fw_06;
2497 rc = load_cpu_fw(bp, &cpu_reg, fw);
2501 /* Initialize the Command Processor. */
2502 cpu_reg.mode = BNX2_CP_CPU_MODE;
2503 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2504 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2505 cpu_reg.state = BNX2_CP_CPU_STATE;
2506 cpu_reg.state_value_clear = 0xffffff;
2507 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2508 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2509 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2510 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2511 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2512 cpu_reg.spad_base = BNX2_CP_SCRATCH;
2513 cpu_reg.mips_view_base = 0x8000000;
2515 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2516 fw = &bnx2_cp_fw_09;
2518 rc = load_cpu_fw(bp, &cpu_reg, fw);
2523 bnx2_gunzip_end(bp);
2528 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2532 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2538 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2539 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2540 PCI_PM_CTRL_PME_STATUS);
2542 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2543 /* delay required during transition out of D3hot */
2546 val = REG_RD(bp, BNX2_EMAC_MODE);
2547 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2548 val &= ~BNX2_EMAC_MODE_MPKT;
2549 REG_WR(bp, BNX2_EMAC_MODE, val);
2551 val = REG_RD(bp, BNX2_RPM_CONFIG);
2552 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2553 REG_WR(bp, BNX2_RPM_CONFIG, val);
2564 autoneg = bp->autoneg;
2565 advertising = bp->advertising;
2567 bp->autoneg = AUTONEG_SPEED;
2568 bp->advertising = ADVERTISED_10baseT_Half |
2569 ADVERTISED_10baseT_Full |
2570 ADVERTISED_100baseT_Half |
2571 ADVERTISED_100baseT_Full |
2574 bnx2_setup_copper_phy(bp);
2576 bp->autoneg = autoneg;
2577 bp->advertising = advertising;
2579 bnx2_set_mac_addr(bp);
2581 val = REG_RD(bp, BNX2_EMAC_MODE);
2583 /* Enable port mode. */
2584 val &= ~BNX2_EMAC_MODE_PORT;
2585 val |= BNX2_EMAC_MODE_PORT_MII |
2586 BNX2_EMAC_MODE_MPKT_RCVD |
2587 BNX2_EMAC_MODE_ACPI_RCVD |
2588 BNX2_EMAC_MODE_MPKT;
2590 REG_WR(bp, BNX2_EMAC_MODE, val);
2592 /* receive all multicast */
2593 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2594 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2597 REG_WR(bp, BNX2_EMAC_RX_MODE,
2598 BNX2_EMAC_RX_MODE_SORT_MODE);
2600 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2601 BNX2_RPM_SORT_USER0_MC_EN;
2602 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2603 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2604 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2605 BNX2_RPM_SORT_USER0_ENA);
2607 /* Need to enable EMAC and RPM for WOL. */
2608 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2609 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2610 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2611 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2613 val = REG_RD(bp, BNX2_RPM_CONFIG);
2614 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2615 REG_WR(bp, BNX2_RPM_CONFIG, val);
2617 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2620 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2623 if (!(bp->flags & NO_WOL_FLAG))
2624 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2626 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2627 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2628 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2637 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2639 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2642 /* No more memory access after this point until
2643 * device is brought back to D0.
2655 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2660 /* Request access to the flash interface. */
2661 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2662 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2663 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2664 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2670 if (j >= NVRAM_TIMEOUT_COUNT)
2677 bnx2_release_nvram_lock(struct bnx2 *bp)
2682 /* Relinquish nvram interface. */
2683 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2685 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2686 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2687 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2693 if (j >= NVRAM_TIMEOUT_COUNT)
2701 bnx2_enable_nvram_write(struct bnx2 *bp)
2705 val = REG_RD(bp, BNX2_MISC_CFG);
2706 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2708 if (!bp->flash_info->buffered) {
2711 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2712 REG_WR(bp, BNX2_NVM_COMMAND,
2713 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2715 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2718 val = REG_RD(bp, BNX2_NVM_COMMAND);
2719 if (val & BNX2_NVM_COMMAND_DONE)
2723 if (j >= NVRAM_TIMEOUT_COUNT)
2730 bnx2_disable_nvram_write(struct bnx2 *bp)
2734 val = REG_RD(bp, BNX2_MISC_CFG);
2735 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2740 bnx2_enable_nvram_access(struct bnx2 *bp)
2744 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2745 /* Enable both bits, even on read. */
2746 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2747 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2751 bnx2_disable_nvram_access(struct bnx2 *bp)
2755 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2756 /* Disable both bits, even after read. */
2757 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2758 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2759 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2763 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2768 if (bp->flash_info->buffered)
2769 /* Buffered flash, no erase needed */
2772 /* Build an erase command */
2773 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2774 BNX2_NVM_COMMAND_DOIT;
2776 /* Need to clear DONE bit separately. */
2777 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2779 /* Address of the NVRAM to read from. */
2780 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2782 /* Issue an erase command. */
2783 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2785 /* Wait for completion. */
2786 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2791 val = REG_RD(bp, BNX2_NVM_COMMAND);
2792 if (val & BNX2_NVM_COMMAND_DONE)
2796 if (j >= NVRAM_TIMEOUT_COUNT)
2803 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2808 /* Build the command word. */
2809 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2811 /* Calculate an offset of a buffered flash. */
2812 if (bp->flash_info->buffered) {
2813 offset = ((offset / bp->flash_info->page_size) <<
2814 bp->flash_info->page_bits) +
2815 (offset % bp->flash_info->page_size);
2818 /* Need to clear DONE bit separately. */
2819 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2821 /* Address of the NVRAM to read from. */
2822 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2824 /* Issue a read command. */
2825 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2827 /* Wait for completion. */
2828 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2833 val = REG_RD(bp, BNX2_NVM_COMMAND);
2834 if (val & BNX2_NVM_COMMAND_DONE) {
2835 val = REG_RD(bp, BNX2_NVM_READ);
2837 val = be32_to_cpu(val);
2838 memcpy(ret_val, &val, 4);
2842 if (j >= NVRAM_TIMEOUT_COUNT)
2850 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2855 /* Build the command word. */
2856 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2858 /* Calculate an offset of a buffered flash. */
2859 if (bp->flash_info->buffered) {
2860 offset = ((offset / bp->flash_info->page_size) <<
2861 bp->flash_info->page_bits) +
2862 (offset % bp->flash_info->page_size);
2865 /* Need to clear DONE bit separately. */
2866 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2868 memcpy(&val32, val, 4);
2869 val32 = cpu_to_be32(val32);
2871 /* Write the data. */
2872 REG_WR(bp, BNX2_NVM_WRITE, val32);
2874 /* Address of the NVRAM to write to. */
2875 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2877 /* Issue the write command. */
2878 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2880 /* Wait for completion. */
2881 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2884 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2887 if (j >= NVRAM_TIMEOUT_COUNT)
2894 bnx2_init_nvram(struct bnx2 *bp)
2897 int j, entry_count, rc;
2898 struct flash_spec *flash;
2900 /* Determine the selected interface. */
2901 val = REG_RD(bp, BNX2_NVM_CFG1);
2903 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2906 if (val & 0x40000000) {
2908 /* Flash interface has been reconfigured */
2909 for (j = 0, flash = &flash_table[0]; j < entry_count;
2911 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2912 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2913 bp->flash_info = flash;
2920 /* Not yet been reconfigured */
2922 if (val & (1 << 23))
2923 mask = FLASH_BACKUP_STRAP_MASK;
2925 mask = FLASH_STRAP_MASK;
2927 for (j = 0, flash = &flash_table[0]; j < entry_count;
2930 if ((val & mask) == (flash->strapping & mask)) {
2931 bp->flash_info = flash;
2933 /* Request access to the flash interface. */
2934 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2937 /* Enable access to flash interface */
2938 bnx2_enable_nvram_access(bp);
2940 /* Reconfigure the flash interface */
2941 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2942 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2943 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2944 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2946 /* Disable access to flash interface */
2947 bnx2_disable_nvram_access(bp);
2948 bnx2_release_nvram_lock(bp);
2953 } /* if (val & 0x40000000) */
2955 if (j == entry_count) {
2956 bp->flash_info = NULL;
2957 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
2961 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2962 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2964 bp->flash_size = val;
2966 bp->flash_size = bp->flash_info->total_size;
2972 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2976 u32 cmd_flags, offset32, len32, extra;
2981 /* Request access to the flash interface. */
2982 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2985 /* Enable access to flash interface */
2986 bnx2_enable_nvram_access(bp);
2999 pre_len = 4 - (offset & 3);
3001 if (pre_len >= len32) {
3003 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3004 BNX2_NVM_COMMAND_LAST;
3007 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3010 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3015 memcpy(ret_buf, buf + (offset & 3), pre_len);
3022 extra = 4 - (len32 & 3);
3023 len32 = (len32 + 4) & ~3;
3030 cmd_flags = BNX2_NVM_COMMAND_LAST;
3032 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3033 BNX2_NVM_COMMAND_LAST;
3035 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3037 memcpy(ret_buf, buf, 4 - extra);
3039 else if (len32 > 0) {
3042 /* Read the first word. */
3046 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3048 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3050 /* Advance to the next dword. */
3055 while (len32 > 4 && rc == 0) {
3056 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3058 /* Advance to the next dword. */
3067 cmd_flags = BNX2_NVM_COMMAND_LAST;
3068 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3070 memcpy(ret_buf, buf, 4 - extra);
3073 /* Disable access to flash interface */
3074 bnx2_disable_nvram_access(bp);
3076 bnx2_release_nvram_lock(bp);
3082 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3085 u32 written, offset32, len32;
3086 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3088 int align_start, align_end;
3093 align_start = align_end = 0;
3095 if ((align_start = (offset32 & 3))) {
3097 len32 += (4 - align_start);
3098 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3103 if ((len32 > 4) || !align_start) {
3104 align_end = 4 - (len32 & 3);
3106 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
3113 if (align_start || align_end) {
3114 align_buf = kmalloc(len32, GFP_KERNEL);
3115 if (align_buf == NULL)
3118 memcpy(align_buf, start, 4);
3121 memcpy(align_buf + len32 - 4, end, 4);
3123 memcpy(align_buf + align_start, data_buf, buf_size);
3127 if (bp->flash_info->buffered == 0) {
3128 flash_buffer = kmalloc(264, GFP_KERNEL);
3129 if (flash_buffer == NULL) {
3131 goto nvram_write_end;
3136 while ((written < len32) && (rc == 0)) {
3137 u32 page_start, page_end, data_start, data_end;
3138 u32 addr, cmd_flags;
3141 /* Find the page_start addr */
3142 page_start = offset32 + written;
3143 page_start -= (page_start % bp->flash_info->page_size);
3144 /* Find the page_end addr */
3145 page_end = page_start + bp->flash_info->page_size;
3146 /* Find the data_start addr */
3147 data_start = (written == 0) ? offset32 : page_start;
3148 /* Find the data_end addr */
3149 data_end = (page_end > offset32 + len32) ?
3150 (offset32 + len32) : page_end;
3152 /* Request access to the flash interface. */
3153 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3154 goto nvram_write_end;
3156 /* Enable access to flash interface */
3157 bnx2_enable_nvram_access(bp);
3159 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3160 if (bp->flash_info->buffered == 0) {
3163 /* Read the whole page into the buffer
3164 * (non-buffer flash only) */
3165 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3166 if (j == (bp->flash_info->page_size - 4)) {
3167 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3169 rc = bnx2_nvram_read_dword(bp,
3175 goto nvram_write_end;
3181 /* Enable writes to flash interface (unlock write-protect) */
3182 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3183 goto nvram_write_end;
3185 /* Erase the page */
3186 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3187 goto nvram_write_end;
3189 /* Re-enable the write again for the actual write */
3190 bnx2_enable_nvram_write(bp);
3192 /* Loop to write back the buffer data from page_start to
3195 if (bp->flash_info->buffered == 0) {
3196 for (addr = page_start; addr < data_start;
3197 addr += 4, i += 4) {
3199 rc = bnx2_nvram_write_dword(bp, addr,
3200 &flash_buffer[i], cmd_flags);
3203 goto nvram_write_end;
3209 /* Loop to write the new data from data_start to data_end */
3210 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3211 if ((addr == page_end - 4) ||
3212 ((bp->flash_info->buffered) &&
3213 (addr == data_end - 4))) {
3215 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3217 rc = bnx2_nvram_write_dword(bp, addr, buf,
3221 goto nvram_write_end;
3227 /* Loop to write back the buffer data from data_end
3229 if (bp->flash_info->buffered == 0) {
3230 for (addr = data_end; addr < page_end;
3231 addr += 4, i += 4) {
3233 if (addr == page_end-4) {
3234 cmd_flags = BNX2_NVM_COMMAND_LAST;
3236 rc = bnx2_nvram_write_dword(bp, addr,
3237 &flash_buffer[i], cmd_flags);
3240 goto nvram_write_end;
3246 /* Disable writes to flash interface (lock write-protect) */
3247 bnx2_disable_nvram_write(bp);
3249 /* Disable access to flash interface */
3250 bnx2_disable_nvram_access(bp);
3251 bnx2_release_nvram_lock(bp);
3253 /* Increment written */
3254 written += data_end - data_start;
3258 kfree(flash_buffer);
3264 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3269 /* Wait for the current PCI transaction to complete before
3270 * issuing a reset. */
3271 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3272 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3273 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3274 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3275 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3276 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3279 /* Wait for the firmware to tell us it is ok to issue a reset. */
3280 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3282 /* Deposit a driver reset signature so the firmware knows that
3283 * this is a soft reset. */
3284 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3285 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3287 /* Do a dummy read to force the chip to complete all current transaction
3288 * before we issue a reset. */
3289 val = REG_RD(bp, BNX2_MISC_ID);
3291 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3292 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3293 REG_RD(bp, BNX2_MISC_COMMAND);
3296 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3297 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3299 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3302 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3303 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3304 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3307 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3309 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3310 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3311 current->state = TASK_UNINTERRUPTIBLE;
3312 schedule_timeout(HZ / 50);
3315 /* Reset takes approximate 30 usec */
3316 for (i = 0; i < 10; i++) {
3317 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3318 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3319 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3324 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3325 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3326 printk(KERN_ERR PFX "Chip reset did not complete\n");
3331 /* Make sure byte swapping is properly configured. */
3332 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3333 if (val != 0x01020304) {
3334 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3338 /* Wait for the firmware to finish its initialization. */
3339 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3343 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3344 /* Adjust the voltage regular to two steps lower. The default
3345 * of this register is 0x0000000e. */
3346 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3348 /* Remove bad rbuf memory from the free pool. */
3349 rc = bnx2_alloc_bad_rbuf(bp);
3356 bnx2_init_chip(struct bnx2 *bp)
3361 /* Make sure the interrupt is not active. */
3362 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3364 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3365 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3367 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3369 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3370 DMA_READ_CHANS << 12 |
3371 DMA_WRITE_CHANS << 16;
3373 val |= (0x2 << 20) | (1 << 11);
3375 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3378 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3379 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3380 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3382 REG_WR(bp, BNX2_DMA_CONFIG, val);
3384 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3385 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3386 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3387 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3390 if (bp->flags & PCIX_FLAG) {
3393 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3395 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3396 val16 & ~PCI_X_CMD_ERO);
3399 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3400 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3401 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3402 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3404 /* Initialize context mapping and zero out the quick contexts. The
3405 * context block must have already been enabled. */
3406 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3407 bnx2_init_5709_context(bp);
3409 bnx2_init_context(bp);
3411 if ((rc = bnx2_init_cpus(bp)) != 0)
3414 bnx2_init_nvram(bp);
3416 bnx2_set_mac_addr(bp);
3418 val = REG_RD(bp, BNX2_MQ_CONFIG);
3419 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3420 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3421 REG_WR(bp, BNX2_MQ_CONFIG, val);
3423 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3424 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3425 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3427 val = (BCM_PAGE_BITS - 8) << 24;
3428 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3430 /* Configure page size. */
3431 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3432 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3433 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3434 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3436 val = bp->mac_addr[0] +
3437 (bp->mac_addr[1] << 8) +
3438 (bp->mac_addr[2] << 16) +
3440 (bp->mac_addr[4] << 8) +
3441 (bp->mac_addr[5] << 16);
3442 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3444 /* Program the MTU. Also include 4 bytes for CRC32. */
3445 val = bp->dev->mtu + ETH_HLEN + 4;
3446 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3447 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3448 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3450 bp->last_status_idx = 0;
3451 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3453 /* Set up how to generate a link change interrupt. */
3454 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3456 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3457 (u64) bp->status_blk_mapping & 0xffffffff);
3458 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3460 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3461 (u64) bp->stats_blk_mapping & 0xffffffff);
3462 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3463 (u64) bp->stats_blk_mapping >> 32);
3465 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3466 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3468 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3469 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3471 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3472 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3474 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3476 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3478 REG_WR(bp, BNX2_HC_COM_TICKS,
3479 (bp->com_ticks_int << 16) | bp->com_ticks);
3481 REG_WR(bp, BNX2_HC_CMD_TICKS,
3482 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3484 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3485 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3487 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3488 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3490 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3491 BNX2_HC_CONFIG_TX_TMR_MODE |
3492 BNX2_HC_CONFIG_COLLECT_STATS);
3495 /* Clear internal stats counters. */
3496 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3498 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3500 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3501 BNX2_PORT_FEATURE_ASF_ENABLED)
3502 bp->flags |= ASF_ENABLE_FLAG;
3504 /* Initialize the receive filter. */
3505 bnx2_set_rx_mode(bp->dev);
3507 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3510 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3511 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3515 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3521 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3523 u32 val, offset0, offset1, offset2, offset3;
3525 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3526 offset0 = BNX2_L2CTX_TYPE_XI;
3527 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3528 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3529 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3531 offset0 = BNX2_L2CTX_TYPE;
3532 offset1 = BNX2_L2CTX_CMD_TYPE;
3533 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3534 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3536 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3537 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3539 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3540 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3542 val = (u64) bp->tx_desc_mapping >> 32;
3543 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3545 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3546 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3550 bnx2_init_tx_ring(struct bnx2 *bp)
3555 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3557 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3559 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3560 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3565 bp->tx_prod_bseq = 0;
3568 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3569 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
3571 bnx2_init_tx_context(bp, cid);
3575 bnx2_init_rx_ring(struct bnx2 *bp)
3579 u16 prod, ring_prod;
3582 /* 8 for CRC and VLAN */
3583 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3585 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
3587 ring_prod = prod = bp->rx_prod = 0;
3590 bp->rx_prod_bseq = 0;
3592 for (i = 0; i < bp->rx_max_ring; i++) {
3595 rxbd = &bp->rx_desc_ring[i][0];
3596 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3597 rxbd->rx_bd_len = bp->rx_buf_use_size;
3598 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3600 if (i == (bp->rx_max_ring - 1))
3604 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3605 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3609 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3610 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3612 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3614 val = (u64) bp->rx_desc_mapping[0] >> 32;
3615 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3617 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3618 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3620 for (i = 0; i < bp->rx_ring_size; i++) {
3621 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3624 prod = NEXT_RX_BD(prod);
3625 ring_prod = RX_RING_IDX(prod);
3629 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3631 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3635 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3639 bp->rx_ring_size = size;
3641 while (size > MAX_RX_DESC_CNT) {
3642 size -= MAX_RX_DESC_CNT;
3645 /* round to next power of 2 */
3647 while ((max & num_rings) == 0)
3650 if (num_rings != max)
3653 bp->rx_max_ring = max;
3654 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3658 bnx2_free_tx_skbs(struct bnx2 *bp)
3662 if (bp->tx_buf_ring == NULL)
3665 for (i = 0; i < TX_DESC_CNT; ) {
3666 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3667 struct sk_buff *skb = tx_buf->skb;
3675 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3676 skb_headlen(skb), PCI_DMA_TODEVICE);
3680 last = skb_shinfo(skb)->nr_frags;
3681 for (j = 0; j < last; j++) {
3682 tx_buf = &bp->tx_buf_ring[i + j + 1];
3683 pci_unmap_page(bp->pdev,
3684 pci_unmap_addr(tx_buf, mapping),
3685 skb_shinfo(skb)->frags[j].size,
3695 bnx2_free_rx_skbs(struct bnx2 *bp)
3699 if (bp->rx_buf_ring == NULL)
3702 for (i = 0; i < bp->rx_max_ring_idx; i++) {
3703 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3704 struct sk_buff *skb = rx_buf->skb;
3709 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3710 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3719 bnx2_free_skbs(struct bnx2 *bp)
3721 bnx2_free_tx_skbs(bp);
3722 bnx2_free_rx_skbs(bp);
3726 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3730 rc = bnx2_reset_chip(bp, reset_code);
3735 if ((rc = bnx2_init_chip(bp)) != 0)
3738 bnx2_init_tx_ring(bp);
3739 bnx2_init_rx_ring(bp);
3744 bnx2_init_nic(struct bnx2 *bp)
3748 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3751 spin_lock_bh(&bp->phy_lock);
3753 spin_unlock_bh(&bp->phy_lock);
3759 bnx2_test_registers(struct bnx2 *bp)
3763 static const struct {
3769 { 0x006c, 0, 0x00000000, 0x0000003f },
3770 { 0x0090, 0, 0xffffffff, 0x00000000 },
3771 { 0x0094, 0, 0x00000000, 0x00000000 },
3773 { 0x0404, 0, 0x00003f00, 0x00000000 },
3774 { 0x0418, 0, 0x00000000, 0xffffffff },
3775 { 0x041c, 0, 0x00000000, 0xffffffff },
3776 { 0x0420, 0, 0x00000000, 0x80ffffff },
3777 { 0x0424, 0, 0x00000000, 0x00000000 },
3778 { 0x0428, 0, 0x00000000, 0x00000001 },
3779 { 0x0450, 0, 0x00000000, 0x0000ffff },
3780 { 0x0454, 0, 0x00000000, 0xffffffff },
3781 { 0x0458, 0, 0x00000000, 0xffffffff },
3783 { 0x0808, 0, 0x00000000, 0xffffffff },
3784 { 0x0854, 0, 0x00000000, 0xffffffff },
3785 { 0x0868, 0, 0x00000000, 0x77777777 },
3786 { 0x086c, 0, 0x00000000, 0x77777777 },
3787 { 0x0870, 0, 0x00000000, 0x77777777 },
3788 { 0x0874, 0, 0x00000000, 0x77777777 },
3790 { 0x0c00, 0, 0x00000000, 0x00000001 },
3791 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3792 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
3794 { 0x1000, 0, 0x00000000, 0x00000001 },
3795 { 0x1004, 0, 0x00000000, 0x000f0001 },
3797 { 0x1408, 0, 0x01c00800, 0x00000000 },
3798 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3799 { 0x14a8, 0, 0x00000000, 0x000001ff },
3800 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3801 { 0x14b0, 0, 0x00000002, 0x00000001 },
3802 { 0x14b8, 0, 0x00000000, 0x00000000 },
3803 { 0x14c0, 0, 0x00000000, 0x00000009 },
3804 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3805 { 0x14cc, 0, 0x00000000, 0x00000001 },
3806 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3808 { 0x1800, 0, 0x00000000, 0x00000001 },
3809 { 0x1804, 0, 0x00000000, 0x00000003 },
3811 { 0x2800, 0, 0x00000000, 0x00000001 },
3812 { 0x2804, 0, 0x00000000, 0x00003f01 },
3813 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3814 { 0x2810, 0, 0xffff0000, 0x00000000 },
3815 { 0x2814, 0, 0xffff0000, 0x00000000 },
3816 { 0x2818, 0, 0xffff0000, 0x00000000 },
3817 { 0x281c, 0, 0xffff0000, 0x00000000 },
3818 { 0x2834, 0, 0xffffffff, 0x00000000 },
3819 { 0x2840, 0, 0x00000000, 0xffffffff },
3820 { 0x2844, 0, 0x00000000, 0xffffffff },
3821 { 0x2848, 0, 0xffffffff, 0x00000000 },
3822 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3824 { 0x2c00, 0, 0x00000000, 0x00000011 },
3825 { 0x2c04, 0, 0x00000000, 0x00030007 },
3827 { 0x3c00, 0, 0x00000000, 0x00000001 },
3828 { 0x3c04, 0, 0x00000000, 0x00070000 },
3829 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3830 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3831 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3832 { 0x3c14, 0, 0x00000000, 0xffffffff },
3833 { 0x3c18, 0, 0x00000000, 0xffffffff },
3834 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3835 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3837 { 0x5004, 0, 0x00000000, 0x0000007f },
3838 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3839 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3841 { 0x5c00, 0, 0x00000000, 0x00000001 },
3842 { 0x5c04, 0, 0x00000000, 0x0003000f },
3843 { 0x5c08, 0, 0x00000003, 0x00000000 },
3844 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3845 { 0x5c10, 0, 0x00000000, 0xffffffff },
3846 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3847 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3848 { 0x5c88, 0, 0x00000000, 0x00077373 },
3849 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3851 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3852 { 0x680c, 0, 0xffffffff, 0x00000000 },
3853 { 0x6810, 0, 0xffffffff, 0x00000000 },
3854 { 0x6814, 0, 0xffffffff, 0x00000000 },
3855 { 0x6818, 0, 0xffffffff, 0x00000000 },
3856 { 0x681c, 0, 0xffffffff, 0x00000000 },
3857 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3858 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3859 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3860 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3861 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3862 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3863 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3864 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3865 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3866 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3867 { 0x684c, 0, 0xffffffff, 0x00000000 },
3868 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3869 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3870 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3871 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3872 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3873 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3875 { 0xffff, 0, 0x00000000, 0x00000000 },
3879 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3880 u32 offset, rw_mask, ro_mask, save_val, val;
3882 offset = (u32) reg_tbl[i].offset;
3883 rw_mask = reg_tbl[i].rw_mask;
3884 ro_mask = reg_tbl[i].ro_mask;
3886 save_val = readl(bp->regview + offset);
3888 writel(0, bp->regview + offset);
3890 val = readl(bp->regview + offset);
3891 if ((val & rw_mask) != 0) {
3895 if ((val & ro_mask) != (save_val & ro_mask)) {
3899 writel(0xffffffff, bp->regview + offset);
3901 val = readl(bp->regview + offset);
3902 if ((val & rw_mask) != rw_mask) {
3906 if ((val & ro_mask) != (save_val & ro_mask)) {
3910 writel(save_val, bp->regview + offset);
3914 writel(save_val, bp->regview + offset);
3922 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3924 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
3925 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3928 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3931 for (offset = 0; offset < size; offset += 4) {
3933 REG_WR_IND(bp, start + offset, test_pattern[i]);
3935 if (REG_RD_IND(bp, start + offset) !=
3945 bnx2_test_memory(struct bnx2 *bp)
3949 static const struct {
3953 { 0x60000, 0x4000 },
3954 { 0xa0000, 0x3000 },
3955 { 0xe0000, 0x4000 },
3956 { 0x120000, 0x4000 },
3957 { 0x1a0000, 0x4000 },
3958 { 0x160000, 0x4000 },
3962 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3963 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3964 mem_tbl[i].len)) != 0) {
3972 #define BNX2_MAC_LOOPBACK 0
3973 #define BNX2_PHY_LOOPBACK 1
3976 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
3978 unsigned int pkt_size, num_pkts, i;
3979 struct sk_buff *skb, *rx_skb;
3980 unsigned char *packet;
3981 u16 rx_start_idx, rx_idx;
3984 struct sw_bd *rx_buf;
3985 struct l2_fhdr *rx_hdr;
3988 if (loopback_mode == BNX2_MAC_LOOPBACK) {
3989 bp->loopback = MAC_LOOPBACK;
3990 bnx2_set_mac_loopback(bp);
3992 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
3993 bp->loopback = PHY_LOOPBACK;
3994 bnx2_set_phy_loopback(bp);
4000 skb = netdev_alloc_skb(bp->dev, pkt_size);
4003 packet = skb_put(skb, pkt_size);
4004 memcpy(packet, bp->dev->dev_addr, 6);
4005 memset(packet + 6, 0x0, 8);
4006 for (i = 14; i < pkt_size; i++)
4007 packet[i] = (unsigned char) (i & 0xff);
4009 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4012 REG_WR(bp, BNX2_HC_COMMAND,
4013 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4015 REG_RD(bp, BNX2_HC_COMMAND);
4018 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4022 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4024 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4025 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4026 txbd->tx_bd_mss_nbytes = pkt_size;
4027 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4030 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4031 bp->tx_prod_bseq += pkt_size;
4033 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4034 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4038 REG_WR(bp, BNX2_HC_COMMAND,
4039 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4041 REG_RD(bp, BNX2_HC_COMMAND);
4045 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4048 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4049 goto loopback_test_done;
4052 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4053 if (rx_idx != rx_start_idx + num_pkts) {
4054 goto loopback_test_done;
4057 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4058 rx_skb = rx_buf->skb;
4060 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4061 skb_reserve(rx_skb, bp->rx_offset);
4063 pci_dma_sync_single_for_cpu(bp->pdev,
4064 pci_unmap_addr(rx_buf, mapping),
4065 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4067 if (rx_hdr->l2_fhdr_status &
4068 (L2_FHDR_ERRORS_BAD_CRC |
4069 L2_FHDR_ERRORS_PHY_DECODE |
4070 L2_FHDR_ERRORS_ALIGNMENT |
4071 L2_FHDR_ERRORS_TOO_SHORT |
4072 L2_FHDR_ERRORS_GIANT_FRAME)) {
4074 goto loopback_test_done;
4077 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4078 goto loopback_test_done;
4081 for (i = 14; i < pkt_size; i++) {
4082 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4083 goto loopback_test_done;
4094 #define BNX2_MAC_LOOPBACK_FAILED 1
4095 #define BNX2_PHY_LOOPBACK_FAILED 2
4096 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4097 BNX2_PHY_LOOPBACK_FAILED)
4100 bnx2_test_loopback(struct bnx2 *bp)
4104 if (!netif_running(bp->dev))
4105 return BNX2_LOOPBACK_FAILED;
4107 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4108 spin_lock_bh(&bp->phy_lock);
4110 spin_unlock_bh(&bp->phy_lock);
4111 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4112 rc |= BNX2_MAC_LOOPBACK_FAILED;
4113 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4114 rc |= BNX2_PHY_LOOPBACK_FAILED;
4118 #define NVRAM_SIZE 0x200
4119 #define CRC32_RESIDUAL 0xdebb20e3
4122 bnx2_test_nvram(struct bnx2 *bp)
4124 u32 buf[NVRAM_SIZE / 4];
4125 u8 *data = (u8 *) buf;
4129 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4130 goto test_nvram_done;
4132 magic = be32_to_cpu(buf[0]);
4133 if (magic != 0x669955aa) {
4135 goto test_nvram_done;
4138 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4139 goto test_nvram_done;
4141 csum = ether_crc_le(0x100, data);
4142 if (csum != CRC32_RESIDUAL) {
4144 goto test_nvram_done;
4147 csum = ether_crc_le(0x100, data + 0x100);
4148 if (csum != CRC32_RESIDUAL) {
4157 bnx2_test_link(struct bnx2 *bp)
4161 spin_lock_bh(&bp->phy_lock);
4162 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4163 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4164 spin_unlock_bh(&bp->phy_lock);
4166 if (bmsr & BMSR_LSTATUS) {
4173 bnx2_test_intr(struct bnx2 *bp)
4178 if (!netif_running(bp->dev))
4181 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4183 /* This register is not touched during run-time. */
4184 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4185 REG_RD(bp, BNX2_HC_COMMAND);
4187 for (i = 0; i < 10; i++) {
4188 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4194 msleep_interruptible(10);
4203 bnx2_5706_serdes_timer(struct bnx2 *bp)
4205 spin_lock(&bp->phy_lock);
4206 if (bp->serdes_an_pending)
4207 bp->serdes_an_pending--;
4208 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4211 bp->current_interval = bp->timer_interval;
4213 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4215 if (bmcr & BMCR_ANENABLE) {
4218 bnx2_write_phy(bp, 0x1c, 0x7c00);
4219 bnx2_read_phy(bp, 0x1c, &phy1);
4221 bnx2_write_phy(bp, 0x17, 0x0f01);
4222 bnx2_read_phy(bp, 0x15, &phy2);
4223 bnx2_write_phy(bp, 0x17, 0x0f01);
4224 bnx2_read_phy(bp, 0x15, &phy2);
4226 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4227 !(phy2 & 0x20)) { /* no CONFIG */
4229 bmcr &= ~BMCR_ANENABLE;
4230 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4231 bnx2_write_phy(bp, MII_BMCR, bmcr);
4232 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4236 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4237 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4240 bnx2_write_phy(bp, 0x17, 0x0f01);
4241 bnx2_read_phy(bp, 0x15, &phy2);
4245 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4246 bmcr |= BMCR_ANENABLE;
4247 bnx2_write_phy(bp, MII_BMCR, bmcr);
4249 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4252 bp->current_interval = bp->timer_interval;
4254 spin_unlock(&bp->phy_lock);
4258 bnx2_5708_serdes_timer(struct bnx2 *bp)
4260 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4261 bp->serdes_an_pending = 0;
4265 spin_lock(&bp->phy_lock);
4266 if (bp->serdes_an_pending)
4267 bp->serdes_an_pending--;
4268 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4271 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4273 if (bmcr & BMCR_ANENABLE) {
4274 bmcr &= ~BMCR_ANENABLE;
4275 bmcr |= BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500;
4276 bnx2_write_phy(bp, MII_BMCR, bmcr);
4277 bp->current_interval = SERDES_FORCED_TIMEOUT;
4279 bmcr &= ~(BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500);
4280 bmcr |= BMCR_ANENABLE;
4281 bnx2_write_phy(bp, MII_BMCR, bmcr);
4282 bp->serdes_an_pending = 2;
4283 bp->current_interval = bp->timer_interval;
4287 bp->current_interval = bp->timer_interval;
4289 spin_unlock(&bp->phy_lock);
4293 bnx2_timer(unsigned long data)
4295 struct bnx2 *bp = (struct bnx2 *) data;
4298 if (!netif_running(bp->dev))
4301 if (atomic_read(&bp->intr_sem) != 0)
4302 goto bnx2_restart_timer;
4304 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4305 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4307 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4309 if (bp->phy_flags & PHY_SERDES_FLAG) {
4310 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4311 bnx2_5706_serdes_timer(bp);
4312 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
4313 bnx2_5708_serdes_timer(bp);
4317 mod_timer(&bp->timer, jiffies + bp->current_interval);
4320 /* Called with rtnl_lock */
4322 bnx2_open(struct net_device *dev)
4324 struct bnx2 *bp = netdev_priv(dev);
4327 bnx2_set_power_state(bp, PCI_D0);
4328 bnx2_disable_int(bp);
4330 rc = bnx2_alloc_mem(bp);
4334 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4335 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4338 if (pci_enable_msi(bp->pdev) == 0) {
4339 bp->flags |= USING_MSI_FLAG;
4340 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4344 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4345 IRQF_SHARED, dev->name, dev);
4349 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
4357 rc = bnx2_init_nic(bp);
4360 free_irq(bp->pdev->irq, dev);
4361 if (bp->flags & USING_MSI_FLAG) {
4362 pci_disable_msi(bp->pdev);
4363 bp->flags &= ~USING_MSI_FLAG;
4370 mod_timer(&bp->timer, jiffies + bp->current_interval);
4372 atomic_set(&bp->intr_sem, 0);
4374 bnx2_enable_int(bp);
4376 if (bp->flags & USING_MSI_FLAG) {
4377 /* Test MSI to make sure it is working
4378 * If MSI test fails, go back to INTx mode
4380 if (bnx2_test_intr(bp) != 0) {
4381 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4382 " using MSI, switching to INTx mode. Please"
4383 " report this failure to the PCI maintainer"
4384 " and include system chipset information.\n",
4387 bnx2_disable_int(bp);
4388 free_irq(bp->pdev->irq, dev);
4389 pci_disable_msi(bp->pdev);
4390 bp->flags &= ~USING_MSI_FLAG;
4392 rc = bnx2_init_nic(bp);
4395 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4396 IRQF_SHARED, dev->name, dev);
4401 del_timer_sync(&bp->timer);
4404 bnx2_enable_int(bp);
4407 if (bp->flags & USING_MSI_FLAG) {
4408 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4411 netif_start_queue(dev);
4417 bnx2_reset_task(struct work_struct *work)
4419 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
4421 if (!netif_running(bp->dev))
4424 bp->in_reset_task = 1;
4425 bnx2_netif_stop(bp);
4429 atomic_set(&bp->intr_sem, 1);
4430 bnx2_netif_start(bp);
4431 bp->in_reset_task = 0;
4435 bnx2_tx_timeout(struct net_device *dev)
4437 struct bnx2 *bp = netdev_priv(dev);
4439 /* This allows the netif to be shutdown gracefully before resetting */
4440 schedule_work(&bp->reset_task);
4444 /* Called with rtnl_lock */
4446 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4448 struct bnx2 *bp = netdev_priv(dev);
4450 bnx2_netif_stop(bp);
4453 bnx2_set_rx_mode(dev);
4455 bnx2_netif_start(bp);
4458 /* Called with rtnl_lock */
4460 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4462 struct bnx2 *bp = netdev_priv(dev);
4464 bnx2_netif_stop(bp);
4467 bp->vlgrp->vlan_devices[vid] = NULL;
4468 bnx2_set_rx_mode(dev);
4470 bnx2_netif_start(bp);
4474 /* Called with netif_tx_lock.
4475 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4476 * netif_wake_queue().
4479 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4481 struct bnx2 *bp = netdev_priv(dev);
4484 struct sw_bd *tx_buf;
4485 u32 len, vlan_tag_flags, last_frag, mss;
4486 u16 prod, ring_prod;
4489 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4490 netif_stop_queue(dev);
4491 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4494 return NETDEV_TX_BUSY;
4496 len = skb_headlen(skb);
4498 ring_prod = TX_RING_IDX(prod);
4501 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4502 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4505 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4507 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4510 if ((mss = skb_shinfo(skb)->gso_size) &&
4511 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4512 u32 tcp_opt_len, ip_tcp_len;
4514 if (skb_header_cloned(skb) &&
4515 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4517 return NETDEV_TX_OK;
4520 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4521 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4524 if (skb->h.th->doff > 5) {
4525 tcp_opt_len = (skb->h.th->doff - 5) << 2;
4527 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4529 skb->nh.iph->check = 0;
4530 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4532 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4536 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4537 vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4538 (tcp_opt_len >> 2)) << 8;
4547 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4549 tx_buf = &bp->tx_buf_ring[ring_prod];
4551 pci_unmap_addr_set(tx_buf, mapping, mapping);
4553 txbd = &bp->tx_desc_ring[ring_prod];
4555 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4556 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4557 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4558 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4560 last_frag = skb_shinfo(skb)->nr_frags;
4562 for (i = 0; i < last_frag; i++) {
4563 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4565 prod = NEXT_TX_BD(prod);
4566 ring_prod = TX_RING_IDX(prod);
4567 txbd = &bp->tx_desc_ring[ring_prod];
4570 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4571 len, PCI_DMA_TODEVICE);
4572 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4575 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4576 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4577 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4578 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4581 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4583 prod = NEXT_TX_BD(prod);
4584 bp->tx_prod_bseq += skb->len;
4586 REG_WR16(bp, bp->tx_bidx_addr, prod);
4587 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4592 dev->trans_start = jiffies;
4594 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4595 netif_stop_queue(dev);
4596 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
4597 netif_wake_queue(dev);
4600 return NETDEV_TX_OK;
4603 /* Called with rtnl_lock */
4605 bnx2_close(struct net_device *dev)
4607 struct bnx2 *bp = netdev_priv(dev);
4610 /* Calling flush_scheduled_work() may deadlock because
4611 * linkwatch_event() may be on the workqueue and it will try to get
4612 * the rtnl_lock which we are holding.
4614 while (bp->in_reset_task)
4617 bnx2_netif_stop(bp);
4618 del_timer_sync(&bp->timer);
4619 if (bp->flags & NO_WOL_FLAG)
4620 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
4622 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4624 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4625 bnx2_reset_chip(bp, reset_code);
4626 free_irq(bp->pdev->irq, dev);
4627 if (bp->flags & USING_MSI_FLAG) {
4628 pci_disable_msi(bp->pdev);
4629 bp->flags &= ~USING_MSI_FLAG;
4634 netif_carrier_off(bp->dev);
4635 bnx2_set_power_state(bp, PCI_D3hot);
4639 #define GET_NET_STATS64(ctr) \
4640 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4641 (unsigned long) (ctr##_lo)
4643 #define GET_NET_STATS32(ctr) \
4646 #if (BITS_PER_LONG == 64)
4647 #define GET_NET_STATS GET_NET_STATS64
4649 #define GET_NET_STATS GET_NET_STATS32
4652 static struct net_device_stats *
4653 bnx2_get_stats(struct net_device *dev)
4655 struct bnx2 *bp = netdev_priv(dev);
4656 struct statistics_block *stats_blk = bp->stats_blk;
4657 struct net_device_stats *net_stats = &bp->net_stats;
4659 if (bp->stats_blk == NULL) {
4662 net_stats->rx_packets =
4663 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4664 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4665 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4667 net_stats->tx_packets =
4668 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4669 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4670 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4672 net_stats->rx_bytes =
4673 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4675 net_stats->tx_bytes =
4676 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4678 net_stats->multicast =
4679 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4681 net_stats->collisions =
4682 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4684 net_stats->rx_length_errors =
4685 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4686 stats_blk->stat_EtherStatsOverrsizePkts);
4688 net_stats->rx_over_errors =
4689 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4691 net_stats->rx_frame_errors =
4692 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4694 net_stats->rx_crc_errors =
4695 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4697 net_stats->rx_errors = net_stats->rx_length_errors +
4698 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4699 net_stats->rx_crc_errors;
4701 net_stats->tx_aborted_errors =
4702 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4703 stats_blk->stat_Dot3StatsLateCollisions);
4705 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4706 (CHIP_ID(bp) == CHIP_ID_5708_A0))
4707 net_stats->tx_carrier_errors = 0;
4709 net_stats->tx_carrier_errors =
4711 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4714 net_stats->tx_errors =
4716 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4718 net_stats->tx_aborted_errors +
4719 net_stats->tx_carrier_errors;
4721 net_stats->rx_missed_errors =
4722 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4723 stats_blk->stat_FwRxDrop);
4728 /* All ethtool functions called with rtnl_lock */
4731 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4733 struct bnx2 *bp = netdev_priv(dev);
4735 cmd->supported = SUPPORTED_Autoneg;
4736 if (bp->phy_flags & PHY_SERDES_FLAG) {
4737 cmd->supported |= SUPPORTED_1000baseT_Full |
4740 cmd->port = PORT_FIBRE;
4743 cmd->supported |= SUPPORTED_10baseT_Half |
4744 SUPPORTED_10baseT_Full |
4745 SUPPORTED_100baseT_Half |
4746 SUPPORTED_100baseT_Full |
4747 SUPPORTED_1000baseT_Full |
4750 cmd->port = PORT_TP;
4753 cmd->advertising = bp->advertising;
4755 if (bp->autoneg & AUTONEG_SPEED) {
4756 cmd->autoneg = AUTONEG_ENABLE;
4759 cmd->autoneg = AUTONEG_DISABLE;
4762 if (netif_carrier_ok(dev)) {
4763 cmd->speed = bp->line_speed;
4764 cmd->duplex = bp->duplex;
4771 cmd->transceiver = XCVR_INTERNAL;
4772 cmd->phy_address = bp->phy_addr;
4778 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4780 struct bnx2 *bp = netdev_priv(dev);
4781 u8 autoneg = bp->autoneg;
4782 u8 req_duplex = bp->req_duplex;
4783 u16 req_line_speed = bp->req_line_speed;
4784 u32 advertising = bp->advertising;
4786 if (cmd->autoneg == AUTONEG_ENABLE) {
4787 autoneg |= AUTONEG_SPEED;
4789 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
4791 /* allow advertising 1 speed */
4792 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4793 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4794 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4795 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4797 if (bp->phy_flags & PHY_SERDES_FLAG)
4800 advertising = cmd->advertising;
4803 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4804 advertising = cmd->advertising;
4806 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4810 if (bp->phy_flags & PHY_SERDES_FLAG) {
4811 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4814 advertising = ETHTOOL_ALL_COPPER_SPEED;
4817 advertising |= ADVERTISED_Autoneg;
4820 if (bp->phy_flags & PHY_SERDES_FLAG) {
4821 if ((cmd->speed != SPEED_1000 &&
4822 cmd->speed != SPEED_2500) ||
4823 (cmd->duplex != DUPLEX_FULL))
4826 if (cmd->speed == SPEED_2500 &&
4827 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
4830 else if (cmd->speed == SPEED_1000) {
4833 autoneg &= ~AUTONEG_SPEED;
4834 req_line_speed = cmd->speed;
4835 req_duplex = cmd->duplex;
4839 bp->autoneg = autoneg;
4840 bp->advertising = advertising;
4841 bp->req_line_speed = req_line_speed;
4842 bp->req_duplex = req_duplex;
4844 spin_lock_bh(&bp->phy_lock);
4848 spin_unlock_bh(&bp->phy_lock);
4854 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4856 struct bnx2 *bp = netdev_priv(dev);
4858 strcpy(info->driver, DRV_MODULE_NAME);
4859 strcpy(info->version, DRV_MODULE_VERSION);
4860 strcpy(info->bus_info, pci_name(bp->pdev));
4861 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4862 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4863 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
4864 info->fw_version[1] = info->fw_version[3] = '.';
4865 info->fw_version[5] = 0;
4868 #define BNX2_REGDUMP_LEN (32 * 1024)
4871 bnx2_get_regs_len(struct net_device *dev)
4873 return BNX2_REGDUMP_LEN;
4877 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4879 u32 *p = _p, i, offset;
4881 struct bnx2 *bp = netdev_priv(dev);
4882 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4883 0x0800, 0x0880, 0x0c00, 0x0c10,
4884 0x0c30, 0x0d08, 0x1000, 0x101c,
4885 0x1040, 0x1048, 0x1080, 0x10a4,
4886 0x1400, 0x1490, 0x1498, 0x14f0,
4887 0x1500, 0x155c, 0x1580, 0x15dc,
4888 0x1600, 0x1658, 0x1680, 0x16d8,
4889 0x1800, 0x1820, 0x1840, 0x1854,
4890 0x1880, 0x1894, 0x1900, 0x1984,
4891 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4892 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4893 0x2000, 0x2030, 0x23c0, 0x2400,
4894 0x2800, 0x2820, 0x2830, 0x2850,
4895 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4896 0x3c00, 0x3c94, 0x4000, 0x4010,
4897 0x4080, 0x4090, 0x43c0, 0x4458,
4898 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4899 0x4fc0, 0x5010, 0x53c0, 0x5444,
4900 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4901 0x5fc0, 0x6000, 0x6400, 0x6428,
4902 0x6800, 0x6848, 0x684c, 0x6860,
4903 0x6888, 0x6910, 0x8000 };
4907 memset(p, 0, BNX2_REGDUMP_LEN);
4909 if (!netif_running(bp->dev))
4913 offset = reg_boundaries[0];
4915 while (offset < BNX2_REGDUMP_LEN) {
4916 *p++ = REG_RD(bp, offset);
4918 if (offset == reg_boundaries[i + 1]) {
4919 offset = reg_boundaries[i + 2];
4920 p = (u32 *) (orig_p + offset);
4927 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4929 struct bnx2 *bp = netdev_priv(dev);
4931 if (bp->flags & NO_WOL_FLAG) {
4936 wol->supported = WAKE_MAGIC;
4938 wol->wolopts = WAKE_MAGIC;
4942 memset(&wol->sopass, 0, sizeof(wol->sopass));
4946 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4948 struct bnx2 *bp = netdev_priv(dev);
4950 if (wol->wolopts & ~WAKE_MAGIC)
4953 if (wol->wolopts & WAKE_MAGIC) {
4954 if (bp->flags & NO_WOL_FLAG)
4966 bnx2_nway_reset(struct net_device *dev)
4968 struct bnx2 *bp = netdev_priv(dev);
4971 if (!(bp->autoneg & AUTONEG_SPEED)) {
4975 spin_lock_bh(&bp->phy_lock);
4977 /* Force a link down visible on the other side */
4978 if (bp->phy_flags & PHY_SERDES_FLAG) {
4979 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
4980 spin_unlock_bh(&bp->phy_lock);
4984 spin_lock_bh(&bp->phy_lock);
4986 bp->current_interval = SERDES_AN_TIMEOUT;
4987 bp->serdes_an_pending = 1;
4988 mod_timer(&bp->timer, jiffies + bp->current_interval);
4991 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4992 bmcr &= ~BMCR_LOOPBACK;
4993 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4995 spin_unlock_bh(&bp->phy_lock);
5001 bnx2_get_eeprom_len(struct net_device *dev)
5003 struct bnx2 *bp = netdev_priv(dev);
5005 if (bp->flash_info == NULL)
5008 return (int) bp->flash_size;
5012 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5015 struct bnx2 *bp = netdev_priv(dev);
5018 /* parameters already validated in ethtool_get_eeprom */
5020 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5026 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5029 struct bnx2 *bp = netdev_priv(dev);
5032 /* parameters already validated in ethtool_set_eeprom */
5034 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5040 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5042 struct bnx2 *bp = netdev_priv(dev);
5044 memset(coal, 0, sizeof(struct ethtool_coalesce));
5046 coal->rx_coalesce_usecs = bp->rx_ticks;
5047 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5048 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5049 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5051 coal->tx_coalesce_usecs = bp->tx_ticks;
5052 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5053 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5054 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5056 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5062 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5064 struct bnx2 *bp = netdev_priv(dev);
5066 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5067 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5069 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5070 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5072 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5073 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5075 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5076 if (bp->rx_quick_cons_trip_int > 0xff)
5077 bp->rx_quick_cons_trip_int = 0xff;
5079 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5080 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5082 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5083 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5085 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5086 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5088 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5089 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5092 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5093 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5094 bp->stats_ticks &= 0xffff00;
5096 if (netif_running(bp->dev)) {
5097 bnx2_netif_stop(bp);
5099 bnx2_netif_start(bp);
5106 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5108 struct bnx2 *bp = netdev_priv(dev);
5110 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5111 ering->rx_mini_max_pending = 0;
5112 ering->rx_jumbo_max_pending = 0;
5114 ering->rx_pending = bp->rx_ring_size;
5115 ering->rx_mini_pending = 0;
5116 ering->rx_jumbo_pending = 0;
5118 ering->tx_max_pending = MAX_TX_DESC_CNT;
5119 ering->tx_pending = bp->tx_ring_size;
5123 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5125 struct bnx2 *bp = netdev_priv(dev);
5127 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5128 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5129 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5133 if (netif_running(bp->dev)) {
5134 bnx2_netif_stop(bp);
5135 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5140 bnx2_set_rx_ring_size(bp, ering->rx_pending);
5141 bp->tx_ring_size = ering->tx_pending;
5143 if (netif_running(bp->dev)) {
5146 rc = bnx2_alloc_mem(bp);
5150 bnx2_netif_start(bp);
5157 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5159 struct bnx2 *bp = netdev_priv(dev);
5161 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5162 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5163 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5167 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5169 struct bnx2 *bp = netdev_priv(dev);
5171 bp->req_flow_ctrl = 0;
5172 if (epause->rx_pause)
5173 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5174 if (epause->tx_pause)
5175 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5177 if (epause->autoneg) {
5178 bp->autoneg |= AUTONEG_FLOW_CTRL;
5181 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5184 spin_lock_bh(&bp->phy_lock);
5188 spin_unlock_bh(&bp->phy_lock);
5194 bnx2_get_rx_csum(struct net_device *dev)
5196 struct bnx2 *bp = netdev_priv(dev);
5202 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5204 struct bnx2 *bp = netdev_priv(dev);
5211 bnx2_set_tso(struct net_device *dev, u32 data)
5214 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5216 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
5220 #define BNX2_NUM_STATS 46
5223 char string[ETH_GSTRING_LEN];
5224 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5226 { "rx_error_bytes" },
5228 { "tx_error_bytes" },
5229 { "rx_ucast_packets" },
5230 { "rx_mcast_packets" },
5231 { "rx_bcast_packets" },
5232 { "tx_ucast_packets" },
5233 { "tx_mcast_packets" },
5234 { "tx_bcast_packets" },
5235 { "tx_mac_errors" },
5236 { "tx_carrier_errors" },
5237 { "rx_crc_errors" },
5238 { "rx_align_errors" },
5239 { "tx_single_collisions" },
5240 { "tx_multi_collisions" },
5242 { "tx_excess_collisions" },
5243 { "tx_late_collisions" },
5244 { "tx_total_collisions" },
5247 { "rx_undersize_packets" },
5248 { "rx_oversize_packets" },
5249 { "rx_64_byte_packets" },
5250 { "rx_65_to_127_byte_packets" },
5251 { "rx_128_to_255_byte_packets" },
5252 { "rx_256_to_511_byte_packets" },
5253 { "rx_512_to_1023_byte_packets" },
5254 { "rx_1024_to_1522_byte_packets" },
5255 { "rx_1523_to_9022_byte_packets" },
5256 { "tx_64_byte_packets" },
5257 { "tx_65_to_127_byte_packets" },
5258 { "tx_128_to_255_byte_packets" },
5259 { "tx_256_to_511_byte_packets" },
5260 { "tx_512_to_1023_byte_packets" },
5261 { "tx_1024_to_1522_byte_packets" },
5262 { "tx_1523_to_9022_byte_packets" },
5263 { "rx_xon_frames" },
5264 { "rx_xoff_frames" },
5265 { "tx_xon_frames" },
5266 { "tx_xoff_frames" },
5267 { "rx_mac_ctrl_frames" },
5268 { "rx_filtered_packets" },
5270 { "rx_fw_discards" },
5273 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5275 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5276 STATS_OFFSET32(stat_IfHCInOctets_hi),
5277 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5278 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5279 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5280 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5281 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5282 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5283 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5284 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5285 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5286 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5287 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5288 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5289 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5290 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5291 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5292 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5293 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5294 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5295 STATS_OFFSET32(stat_EtherStatsCollisions),
5296 STATS_OFFSET32(stat_EtherStatsFragments),
5297 STATS_OFFSET32(stat_EtherStatsJabbers),
5298 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5299 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5300 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5301 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5302 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5303 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5304 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5305 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5306 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5307 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5308 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5309 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5310 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5311 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5312 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5313 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5314 STATS_OFFSET32(stat_XonPauseFramesReceived),
5315 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5316 STATS_OFFSET32(stat_OutXonSent),
5317 STATS_OFFSET32(stat_OutXoffSent),
5318 STATS_OFFSET32(stat_MacControlFramesReceived),
5319 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5320 STATS_OFFSET32(stat_IfInMBUFDiscards),
5321 STATS_OFFSET32(stat_FwRxDrop),
5324 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5325 * skipped because of errata.
5327 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5328 8,0,8,8,8,8,8,8,8,8,
5329 4,0,4,4,4,4,4,4,4,4,
5330 4,4,4,4,4,4,4,4,4,4,
5331 4,4,4,4,4,4,4,4,4,4,
5335 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5336 8,0,8,8,8,8,8,8,8,8,
5337 4,4,4,4,4,4,4,4,4,4,
5338 4,4,4,4,4,4,4,4,4,4,
5339 4,4,4,4,4,4,4,4,4,4,
5343 #define BNX2_NUM_TESTS 6
5346 char string[ETH_GSTRING_LEN];
5347 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5348 { "register_test (offline)" },
5349 { "memory_test (offline)" },
5350 { "loopback_test (offline)" },
5351 { "nvram_test (online)" },
5352 { "interrupt_test (online)" },
5353 { "link_test (online)" },
5357 bnx2_self_test_count(struct net_device *dev)
5359 return BNX2_NUM_TESTS;
5363 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5365 struct bnx2 *bp = netdev_priv(dev);
5367 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5368 if (etest->flags & ETH_TEST_FL_OFFLINE) {
5371 bnx2_netif_stop(bp);
5372 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5375 if (bnx2_test_registers(bp) != 0) {
5377 etest->flags |= ETH_TEST_FL_FAILED;
5379 if (bnx2_test_memory(bp) != 0) {
5381 etest->flags |= ETH_TEST_FL_FAILED;
5383 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5384 etest->flags |= ETH_TEST_FL_FAILED;
5386 if (!netif_running(bp->dev)) {
5387 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5391 bnx2_netif_start(bp);
5394 /* wait for link up */
5395 for (i = 0; i < 7; i++) {
5398 msleep_interruptible(1000);
5402 if (bnx2_test_nvram(bp) != 0) {
5404 etest->flags |= ETH_TEST_FL_FAILED;
5406 if (bnx2_test_intr(bp) != 0) {
5408 etest->flags |= ETH_TEST_FL_FAILED;
5411 if (bnx2_test_link(bp) != 0) {
5413 etest->flags |= ETH_TEST_FL_FAILED;
5419 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5421 switch (stringset) {
5423 memcpy(buf, bnx2_stats_str_arr,
5424 sizeof(bnx2_stats_str_arr));
5427 memcpy(buf, bnx2_tests_str_arr,
5428 sizeof(bnx2_tests_str_arr));
5434 bnx2_get_stats_count(struct net_device *dev)
5436 return BNX2_NUM_STATS;
5440 bnx2_get_ethtool_stats(struct net_device *dev,
5441 struct ethtool_stats *stats, u64 *buf)
5443 struct bnx2 *bp = netdev_priv(dev);
5445 u32 *hw_stats = (u32 *) bp->stats_blk;
5446 u8 *stats_len_arr = NULL;
5448 if (hw_stats == NULL) {
5449 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5453 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5454 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5455 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5456 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5457 stats_len_arr = bnx2_5706_stats_len_arr;
5459 stats_len_arr = bnx2_5708_stats_len_arr;
5461 for (i = 0; i < BNX2_NUM_STATS; i++) {
5462 if (stats_len_arr[i] == 0) {
5463 /* skip this counter */
5467 if (stats_len_arr[i] == 4) {
5468 /* 4-byte counter */
5470 *(hw_stats + bnx2_stats_offset_arr[i]);
5473 /* 8-byte counter */
5474 buf[i] = (((u64) *(hw_stats +
5475 bnx2_stats_offset_arr[i])) << 32) +
5476 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5481 bnx2_phys_id(struct net_device *dev, u32 data)
5483 struct bnx2 *bp = netdev_priv(dev);
5490 save = REG_RD(bp, BNX2_MISC_CFG);
5491 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5493 for (i = 0; i < (data * 2); i++) {
5495 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5498 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5499 BNX2_EMAC_LED_1000MB_OVERRIDE |
5500 BNX2_EMAC_LED_100MB_OVERRIDE |
5501 BNX2_EMAC_LED_10MB_OVERRIDE |
5502 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5503 BNX2_EMAC_LED_TRAFFIC);
5505 msleep_interruptible(500);
5506 if (signal_pending(current))
5509 REG_WR(bp, BNX2_EMAC_LED, 0);
5510 REG_WR(bp, BNX2_MISC_CFG, save);
5514 static const struct ethtool_ops bnx2_ethtool_ops = {
5515 .get_settings = bnx2_get_settings,
5516 .set_settings = bnx2_set_settings,
5517 .get_drvinfo = bnx2_get_drvinfo,
5518 .get_regs_len = bnx2_get_regs_len,
5519 .get_regs = bnx2_get_regs,
5520 .get_wol = bnx2_get_wol,
5521 .set_wol = bnx2_set_wol,
5522 .nway_reset = bnx2_nway_reset,
5523 .get_link = ethtool_op_get_link,
5524 .get_eeprom_len = bnx2_get_eeprom_len,
5525 .get_eeprom = bnx2_get_eeprom,
5526 .set_eeprom = bnx2_set_eeprom,
5527 .get_coalesce = bnx2_get_coalesce,
5528 .set_coalesce = bnx2_set_coalesce,
5529 .get_ringparam = bnx2_get_ringparam,
5530 .set_ringparam = bnx2_set_ringparam,
5531 .get_pauseparam = bnx2_get_pauseparam,
5532 .set_pauseparam = bnx2_set_pauseparam,
5533 .get_rx_csum = bnx2_get_rx_csum,
5534 .set_rx_csum = bnx2_set_rx_csum,
5535 .get_tx_csum = ethtool_op_get_tx_csum,
5536 .set_tx_csum = ethtool_op_set_tx_csum,
5537 .get_sg = ethtool_op_get_sg,
5538 .set_sg = ethtool_op_set_sg,
5540 .get_tso = ethtool_op_get_tso,
5541 .set_tso = bnx2_set_tso,
5543 .self_test_count = bnx2_self_test_count,
5544 .self_test = bnx2_self_test,
5545 .get_strings = bnx2_get_strings,
5546 .phys_id = bnx2_phys_id,
5547 .get_stats_count = bnx2_get_stats_count,
5548 .get_ethtool_stats = bnx2_get_ethtool_stats,
5549 .get_perm_addr = ethtool_op_get_perm_addr,
5552 /* Called with rtnl_lock */
5554 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5556 struct mii_ioctl_data *data = if_mii(ifr);
5557 struct bnx2 *bp = netdev_priv(dev);
5562 data->phy_id = bp->phy_addr;
5568 spin_lock_bh(&bp->phy_lock);
5569 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5570 spin_unlock_bh(&bp->phy_lock);
5572 data->val_out = mii_regval;
5578 if (!capable(CAP_NET_ADMIN))
5581 spin_lock_bh(&bp->phy_lock);
5582 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5583 spin_unlock_bh(&bp->phy_lock);
5594 /* Called with rtnl_lock */
5596 bnx2_change_mac_addr(struct net_device *dev, void *p)
5598 struct sockaddr *addr = p;
5599 struct bnx2 *bp = netdev_priv(dev);
5601 if (!is_valid_ether_addr(addr->sa_data))
5604 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5605 if (netif_running(dev))
5606 bnx2_set_mac_addr(bp);
5611 /* Called with rtnl_lock */
5613 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5615 struct bnx2 *bp = netdev_priv(dev);
5617 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5618 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5622 if (netif_running(dev)) {
5623 bnx2_netif_stop(bp);
5627 bnx2_netif_start(bp);
5632 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5634 poll_bnx2(struct net_device *dev)
5636 struct bnx2 *bp = netdev_priv(dev);
5638 disable_irq(bp->pdev->irq);
5639 bnx2_interrupt(bp->pdev->irq, dev);
5640 enable_irq(bp->pdev->irq);
5644 static void __devinit
5645 bnx2_get_5709_media(struct bnx2 *bp)
5647 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
5648 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
5651 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
5653 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
5654 bp->phy_flags |= PHY_SERDES_FLAG;
5658 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
5659 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
5661 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
5663 if (PCI_FUNC(bp->pdev->devfn) == 0) {
5668 bp->phy_flags |= PHY_SERDES_FLAG;
5676 bp->phy_flags |= PHY_SERDES_FLAG;
5682 static int __devinit
5683 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5686 unsigned long mem_len;
5690 SET_MODULE_OWNER(dev);
5691 SET_NETDEV_DEV(dev, &pdev->dev);
5692 bp = netdev_priv(dev);
5697 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5698 rc = pci_enable_device(pdev);
5700 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
5704 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5706 "Cannot find PCI device base address, aborting.\n");
5708 goto err_out_disable;
5711 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5713 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
5714 goto err_out_disable;
5717 pci_set_master(pdev);
5719 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5720 if (bp->pm_cap == 0) {
5722 "Cannot find power management capability, aborting.\n");
5724 goto err_out_release;
5727 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5728 bp->flags |= USING_DAC_FLAG;
5729 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
5731 "pci_set_consistent_dma_mask failed, aborting.\n");
5733 goto err_out_release;
5736 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
5737 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
5739 goto err_out_release;
5745 spin_lock_init(&bp->phy_lock);
5746 INIT_WORK(&bp->reset_task, bnx2_reset_task);
5748 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5749 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
5750 dev->mem_end = dev->mem_start + mem_len;
5751 dev->irq = pdev->irq;
5753 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5756 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
5758 goto err_out_release;
5761 /* Configure byte swap and enable write to the reg_window registers.
5762 * Rely on CPU to do target byte swapping on big endian systems
5763 * The chip's target access swapping will not swap all accesses
5765 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5766 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5767 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5769 bnx2_set_power_state(bp, PCI_D0);
5771 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5773 if (CHIP_NUM(bp) != CHIP_NUM_5709) {
5774 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5775 if (bp->pcix_cap == 0) {
5777 "Cannot find PCIX capability, aborting.\n");
5783 /* Get bus information. */
5784 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5785 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5788 bp->flags |= PCIX_FLAG;
5790 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5792 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5794 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5795 bp->bus_speed_mhz = 133;
5798 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5799 bp->bus_speed_mhz = 100;
5802 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5803 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5804 bp->bus_speed_mhz = 66;
5807 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5808 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5809 bp->bus_speed_mhz = 50;
5812 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5813 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5814 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5815 bp->bus_speed_mhz = 33;
5820 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5821 bp->bus_speed_mhz = 66;
5823 bp->bus_speed_mhz = 33;
5826 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5827 bp->flags |= PCI_32BIT_FLAG;
5829 /* 5706A0 may falsely detect SERR and PERR. */
5830 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5831 reg = REG_RD(bp, PCI_COMMAND);
5832 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5833 REG_WR(bp, PCI_COMMAND, reg);
5835 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5836 !(bp->flags & PCIX_FLAG)) {
5839 "5706 A1 can only be used in a PCIX bus, aborting.\n");
5843 bnx2_init_nvram(bp);
5845 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5847 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5848 BNX2_SHM_HDR_SIGNATURE_SIG)
5849 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5851 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5853 /* Get the permanent MAC address. First we need to make sure the
5854 * firmware is actually running.
5856 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
5858 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5859 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5860 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
5865 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
5867 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
5868 bp->mac_addr[0] = (u8) (reg >> 8);
5869 bp->mac_addr[1] = (u8) reg;
5871 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
5872 bp->mac_addr[2] = (u8) (reg >> 24);
5873 bp->mac_addr[3] = (u8) (reg >> 16);
5874 bp->mac_addr[4] = (u8) (reg >> 8);
5875 bp->mac_addr[5] = (u8) reg;
5877 bp->tx_ring_size = MAX_TX_DESC_CNT;
5878 bnx2_set_rx_ring_size(bp, 255);
5882 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5884 bp->tx_quick_cons_trip_int = 20;
5885 bp->tx_quick_cons_trip = 20;
5886 bp->tx_ticks_int = 80;
5889 bp->rx_quick_cons_trip_int = 6;
5890 bp->rx_quick_cons_trip = 6;
5891 bp->rx_ticks_int = 18;
5894 bp->stats_ticks = 1000000 & 0xffff00;
5896 bp->timer_interval = HZ;
5897 bp->current_interval = HZ;
5901 /* Disable WOL support if we are running on a SERDES chip. */
5902 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5903 bnx2_get_5709_media(bp);
5904 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
5905 bp->phy_flags |= PHY_SERDES_FLAG;
5907 if (bp->phy_flags & PHY_SERDES_FLAG) {
5908 bp->flags |= NO_WOL_FLAG;
5909 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
5911 reg = REG_RD_IND(bp, bp->shmem_base +
5912 BNX2_SHARED_HW_CFG_CONFIG);
5913 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5914 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5916 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
5917 CHIP_NUM(bp) == CHIP_NUM_5708)
5918 bp->phy_flags |= PHY_CRC_FIX_FLAG;
5920 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
5921 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5922 (CHIP_ID(bp) == CHIP_ID_5708_B1))
5923 bp->flags |= NO_WOL_FLAG;
5925 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5926 bp->tx_quick_cons_trip_int =
5927 bp->tx_quick_cons_trip;
5928 bp->tx_ticks_int = bp->tx_ticks;
5929 bp->rx_quick_cons_trip_int =
5930 bp->rx_quick_cons_trip;
5931 bp->rx_ticks_int = bp->rx_ticks;
5932 bp->comp_prod_trip_int = bp->comp_prod_trip;
5933 bp->com_ticks_int = bp->com_ticks;
5934 bp->cmd_ticks_int = bp->cmd_ticks;
5937 /* Disable MSI on 5706 if AMD 8132 bridge is found.
5939 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
5940 * with byte enables disabled on the unused 32-bit word. This is legal
5941 * but causes problems on the AMD 8132 which will eventually stop
5942 * responding after a while.
5944 * AMD believes this incompatibility is unique to the 5706, and
5945 * prefers to locally disable MSI rather than globally disabling it
5946 * using pci_msi_quirk.
5948 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
5949 struct pci_dev *amd_8132 = NULL;
5951 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
5952 PCI_DEVICE_ID_AMD_8132_BRIDGE,
5956 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
5957 if (rev >= 0x10 && rev <= 0x13) {
5959 pci_dev_put(amd_8132);
5965 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5966 bp->req_line_speed = 0;
5967 if (bp->phy_flags & PHY_SERDES_FLAG) {
5968 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
5970 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
5971 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5972 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5974 bp->req_line_speed = bp->line_speed = SPEED_1000;
5975 bp->req_duplex = DUPLEX_FULL;
5979 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5982 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5984 init_timer(&bp->timer);
5985 bp->timer.expires = RUN_AT(bp->timer_interval);
5986 bp->timer.data = (unsigned long) bp;
5987 bp->timer.function = bnx2_timer;
5993 iounmap(bp->regview);
5998 pci_release_regions(pdev);
6001 pci_disable_device(pdev);
6002 pci_set_drvdata(pdev, NULL);
6008 static int __devinit
6009 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6011 static int version_printed = 0;
6012 struct net_device *dev = NULL;
6016 if (version_printed++ == 0)
6017 printk(KERN_INFO "%s", version);
6019 /* dev zeroed in init_etherdev */
6020 dev = alloc_etherdev(sizeof(*bp));
6025 rc = bnx2_init_board(pdev, dev);
6031 dev->open = bnx2_open;
6032 dev->hard_start_xmit = bnx2_start_xmit;
6033 dev->stop = bnx2_close;
6034 dev->get_stats = bnx2_get_stats;
6035 dev->set_multicast_list = bnx2_set_rx_mode;
6036 dev->do_ioctl = bnx2_ioctl;
6037 dev->set_mac_address = bnx2_change_mac_addr;
6038 dev->change_mtu = bnx2_change_mtu;
6039 dev->tx_timeout = bnx2_tx_timeout;
6040 dev->watchdog_timeo = TX_TIMEOUT;
6042 dev->vlan_rx_register = bnx2_vlan_rx_register;
6043 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
6045 dev->poll = bnx2_poll;
6046 dev->ethtool_ops = &bnx2_ethtool_ops;
6049 bp = netdev_priv(dev);
6051 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6052 dev->poll_controller = poll_bnx2;
6055 if ((rc = register_netdev(dev))) {
6056 dev_err(&pdev->dev, "Cannot register net device\n");
6058 iounmap(bp->regview);
6059 pci_release_regions(pdev);
6060 pci_disable_device(pdev);
6061 pci_set_drvdata(pdev, NULL);
6066 pci_set_drvdata(pdev, dev);
6068 memcpy(dev->dev_addr, bp->mac_addr, 6);
6069 memcpy(dev->perm_addr, bp->mac_addr, 6);
6070 bp->name = board_info[ent->driver_data].name,
6071 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
6075 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6076 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6077 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
6078 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
6083 printk("node addr ");
6084 for (i = 0; i < 6; i++)
6085 printk("%2.2x", dev->dev_addr[i]);
6088 dev->features |= NETIF_F_SG;
6089 if (bp->flags & USING_DAC_FLAG)
6090 dev->features |= NETIF_F_HIGHDMA;
6091 dev->features |= NETIF_F_IP_CSUM;
6093 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6096 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6099 netif_carrier_off(bp->dev);
6104 static void __devexit
6105 bnx2_remove_one(struct pci_dev *pdev)
6107 struct net_device *dev = pci_get_drvdata(pdev);
6108 struct bnx2 *bp = netdev_priv(dev);
6110 flush_scheduled_work();
6112 unregister_netdev(dev);
6115 iounmap(bp->regview);
6118 pci_release_regions(pdev);
6119 pci_disable_device(pdev);
6120 pci_set_drvdata(pdev, NULL);
6124 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6126 struct net_device *dev = pci_get_drvdata(pdev);
6127 struct bnx2 *bp = netdev_priv(dev);
6130 if (!netif_running(dev))
6133 flush_scheduled_work();
6134 bnx2_netif_stop(bp);
6135 netif_device_detach(dev);
6136 del_timer_sync(&bp->timer);
6137 if (bp->flags & NO_WOL_FLAG)
6138 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6140 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6142 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6143 bnx2_reset_chip(bp, reset_code);
6145 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6150 bnx2_resume(struct pci_dev *pdev)
6152 struct net_device *dev = pci_get_drvdata(pdev);
6153 struct bnx2 *bp = netdev_priv(dev);
6155 if (!netif_running(dev))
6158 bnx2_set_power_state(bp, PCI_D0);
6159 netif_device_attach(dev);
6161 bnx2_netif_start(bp);
6165 static struct pci_driver bnx2_pci_driver = {
6166 .name = DRV_MODULE_NAME,
6167 .id_table = bnx2_pci_tbl,
6168 .probe = bnx2_init_one,
6169 .remove = __devexit_p(bnx2_remove_one),
6170 .suspend = bnx2_suspend,
6171 .resume = bnx2_resume,
6174 static int __init bnx2_init(void)
6176 return pci_register_driver(&bnx2_pci_driver);
6179 static void __exit bnx2_cleanup(void)
6181 pci_unregister_driver(&bnx2_pci_driver);
6184 module_init(bnx2_init);
6185 module_exit(bnx2_cleanup);