1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2007 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
55 #define DRV_MODULE_NAME "bnx2"
56 #define PFX DRV_MODULE_NAME ": "
57 #define DRV_MODULE_VERSION "1.5.11"
58 #define DRV_MODULE_RELDATE "June 4, 2007"
60 #define RUN_AT(x) (jiffies + (x))
62 /* Time in jiffies before concluding the transmitter is hung. */
63 #define TX_TIMEOUT (5*HZ)
65 static const char version[] __devinitdata =
66 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
68 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
69 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
73 static int disable_msi = 0;
75 module_param(disable_msi, int, 0);
76 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
90 /* indexed by board_t, above */
93 } board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
101 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
105 static struct pci_device_id bnx2_pci_tbl[] = {
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
127 static struct flash_spec flash_table[] =
130 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
131 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
132 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
134 /* Expansion entry 0001 */
135 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
139 /* Saifun SA25F010 (non-buffered flash) */
140 /* strap, cfg1, & write1 need updates */
141 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
142 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
144 "Non-buffered flash (128kB)"},
145 /* Saifun SA25F020 (non-buffered flash) */
146 /* strap, cfg1, & write1 need updates */
147 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
148 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
149 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
150 "Non-buffered flash (256kB)"},
151 /* Expansion entry 0100 */
152 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
153 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
154 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
156 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
157 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
158 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
159 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
160 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
161 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
162 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
163 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
164 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
165 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
166 /* Saifun SA25F005 (non-buffered flash) */
167 /* strap, cfg1, & write1 need updates */
168 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
169 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
170 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
171 "Non-buffered flash (64kB)"},
173 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
174 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
175 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
177 /* Expansion entry 1001 */
178 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
179 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
180 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
182 /* Expansion entry 1010 */
183 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
184 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
185 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
187 /* ATMEL AT45DB011B (buffered flash) */
188 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
189 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
190 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
191 "Buffered flash (128kB)"},
192 /* Expansion entry 1100 */
193 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
194 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
197 /* Expansion entry 1101 */
198 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
199 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
200 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
202 /* Ateml Expansion entry 1110 */
203 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
204 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
205 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
206 "Entry 1110 (Atmel)"},
207 /* ATMEL AT45DB021B (buffered flash) */
208 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
209 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
210 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
211 "Buffered flash (256kB)"},
214 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
216 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
222 /* The ring uses 256 indices for 255 entries, one of them
223 * needs to be skipped.
225 diff = bp->tx_prod - bp->tx_cons;
226 if (unlikely(diff >= TX_DESC_CNT)) {
228 if (diff == TX_DESC_CNT)
229 diff = MAX_TX_DESC_CNT;
231 return (bp->tx_ring_size - diff);
235 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
239 spin_lock_bh(&bp->indirect_lock);
240 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
241 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
242 spin_unlock_bh(&bp->indirect_lock);
247 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
249 spin_lock_bh(&bp->indirect_lock);
250 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
251 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
252 spin_unlock_bh(&bp->indirect_lock);
256 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
259 spin_lock_bh(&bp->indirect_lock);
260 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
263 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
264 REG_WR(bp, BNX2_CTX_CTX_CTRL,
265 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
266 for (i = 0; i < 5; i++) {
268 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
269 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
274 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
275 REG_WR(bp, BNX2_CTX_DATA, val);
277 spin_unlock_bh(&bp->indirect_lock);
281 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
286 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
287 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
288 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
290 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
291 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
296 val1 = (bp->phy_addr << 21) | (reg << 16) |
297 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
298 BNX2_EMAC_MDIO_COMM_START_BUSY;
299 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
301 for (i = 0; i < 50; i++) {
304 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
305 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
308 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
309 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
315 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
324 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
325 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
326 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
328 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
329 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
338 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
343 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
344 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
345 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
347 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
348 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
353 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
354 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
355 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
356 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
358 for (i = 0; i < 50; i++) {
361 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
362 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
368 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
373 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
375 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
377 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
378 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
387 bnx2_disable_int(struct bnx2 *bp)
389 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
390 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
391 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
395 bnx2_enable_int(struct bnx2 *bp)
397 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
398 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
399 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
401 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
402 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
404 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
408 bnx2_disable_int_sync(struct bnx2 *bp)
410 atomic_inc(&bp->intr_sem);
411 bnx2_disable_int(bp);
412 synchronize_irq(bp->pdev->irq);
416 bnx2_netif_stop(struct bnx2 *bp)
418 bnx2_disable_int_sync(bp);
419 if (netif_running(bp->dev)) {
420 netif_poll_disable(bp->dev);
421 netif_tx_disable(bp->dev);
422 bp->dev->trans_start = jiffies; /* prevent tx timeout */
427 bnx2_netif_start(struct bnx2 *bp)
429 if (atomic_dec_and_test(&bp->intr_sem)) {
430 if (netif_running(bp->dev)) {
431 netif_wake_queue(bp->dev);
432 netif_poll_enable(bp->dev);
439 bnx2_free_mem(struct bnx2 *bp)
443 for (i = 0; i < bp->ctx_pages; i++) {
444 if (bp->ctx_blk[i]) {
445 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
447 bp->ctx_blk_mapping[i]);
448 bp->ctx_blk[i] = NULL;
451 if (bp->status_blk) {
452 pci_free_consistent(bp->pdev, bp->status_stats_size,
453 bp->status_blk, bp->status_blk_mapping);
454 bp->status_blk = NULL;
455 bp->stats_blk = NULL;
457 if (bp->tx_desc_ring) {
458 pci_free_consistent(bp->pdev,
459 sizeof(struct tx_bd) * TX_DESC_CNT,
460 bp->tx_desc_ring, bp->tx_desc_mapping);
461 bp->tx_desc_ring = NULL;
463 kfree(bp->tx_buf_ring);
464 bp->tx_buf_ring = NULL;
465 for (i = 0; i < bp->rx_max_ring; i++) {
466 if (bp->rx_desc_ring[i])
467 pci_free_consistent(bp->pdev,
468 sizeof(struct rx_bd) * RX_DESC_CNT,
470 bp->rx_desc_mapping[i]);
471 bp->rx_desc_ring[i] = NULL;
473 vfree(bp->rx_buf_ring);
474 bp->rx_buf_ring = NULL;
478 bnx2_alloc_mem(struct bnx2 *bp)
480 int i, status_blk_size;
482 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
484 if (bp->tx_buf_ring == NULL)
487 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
488 sizeof(struct tx_bd) *
490 &bp->tx_desc_mapping);
491 if (bp->tx_desc_ring == NULL)
494 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
496 if (bp->rx_buf_ring == NULL)
499 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
502 for (i = 0; i < bp->rx_max_ring; i++) {
503 bp->rx_desc_ring[i] =
504 pci_alloc_consistent(bp->pdev,
505 sizeof(struct rx_bd) * RX_DESC_CNT,
506 &bp->rx_desc_mapping[i]);
507 if (bp->rx_desc_ring[i] == NULL)
512 /* Combine status and statistics blocks into one allocation. */
513 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
514 bp->status_stats_size = status_blk_size +
515 sizeof(struct statistics_block);
517 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
518 &bp->status_blk_mapping);
519 if (bp->status_blk == NULL)
522 memset(bp->status_blk, 0, bp->status_stats_size);
524 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
527 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
529 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
530 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
531 if (bp->ctx_pages == 0)
533 for (i = 0; i < bp->ctx_pages; i++) {
534 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
536 &bp->ctx_blk_mapping[i]);
537 if (bp->ctx_blk[i] == NULL)
549 bnx2_report_fw_link(struct bnx2 *bp)
551 u32 fw_link_status = 0;
556 switch (bp->line_speed) {
558 if (bp->duplex == DUPLEX_HALF)
559 fw_link_status = BNX2_LINK_STATUS_10HALF;
561 fw_link_status = BNX2_LINK_STATUS_10FULL;
564 if (bp->duplex == DUPLEX_HALF)
565 fw_link_status = BNX2_LINK_STATUS_100HALF;
567 fw_link_status = BNX2_LINK_STATUS_100FULL;
570 if (bp->duplex == DUPLEX_HALF)
571 fw_link_status = BNX2_LINK_STATUS_1000HALF;
573 fw_link_status = BNX2_LINK_STATUS_1000FULL;
576 if (bp->duplex == DUPLEX_HALF)
577 fw_link_status = BNX2_LINK_STATUS_2500HALF;
579 fw_link_status = BNX2_LINK_STATUS_2500FULL;
583 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
586 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
588 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
589 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
591 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
592 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
593 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
595 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
599 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
601 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
605 bnx2_report_link(struct bnx2 *bp)
608 netif_carrier_on(bp->dev);
609 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
611 printk("%d Mbps ", bp->line_speed);
613 if (bp->duplex == DUPLEX_FULL)
614 printk("full duplex");
616 printk("half duplex");
619 if (bp->flow_ctrl & FLOW_CTRL_RX) {
620 printk(", receive ");
621 if (bp->flow_ctrl & FLOW_CTRL_TX)
622 printk("& transmit ");
625 printk(", transmit ");
627 printk("flow control ON");
632 netif_carrier_off(bp->dev);
633 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
636 bnx2_report_fw_link(bp);
640 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
642 u32 local_adv, remote_adv;
645 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
646 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
648 if (bp->duplex == DUPLEX_FULL) {
649 bp->flow_ctrl = bp->req_flow_ctrl;
654 if (bp->duplex != DUPLEX_FULL) {
658 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
659 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
662 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
663 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
664 bp->flow_ctrl |= FLOW_CTRL_TX;
665 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
666 bp->flow_ctrl |= FLOW_CTRL_RX;
670 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
671 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
673 if (bp->phy_flags & PHY_SERDES_FLAG) {
674 u32 new_local_adv = 0;
675 u32 new_remote_adv = 0;
677 if (local_adv & ADVERTISE_1000XPAUSE)
678 new_local_adv |= ADVERTISE_PAUSE_CAP;
679 if (local_adv & ADVERTISE_1000XPSE_ASYM)
680 new_local_adv |= ADVERTISE_PAUSE_ASYM;
681 if (remote_adv & ADVERTISE_1000XPAUSE)
682 new_remote_adv |= ADVERTISE_PAUSE_CAP;
683 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
684 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
686 local_adv = new_local_adv;
687 remote_adv = new_remote_adv;
690 /* See Table 28B-3 of 802.3ab-1999 spec. */
691 if (local_adv & ADVERTISE_PAUSE_CAP) {
692 if(local_adv & ADVERTISE_PAUSE_ASYM) {
693 if (remote_adv & ADVERTISE_PAUSE_CAP) {
694 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
696 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
697 bp->flow_ctrl = FLOW_CTRL_RX;
701 if (remote_adv & ADVERTISE_PAUSE_CAP) {
702 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
706 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
707 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
708 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
710 bp->flow_ctrl = FLOW_CTRL_TX;
716 bnx2_5709s_linkup(struct bnx2 *bp)
722 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
723 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
724 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
726 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
727 bp->line_speed = bp->req_line_speed;
728 bp->duplex = bp->req_duplex;
731 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
733 case MII_BNX2_GP_TOP_AN_SPEED_10:
734 bp->line_speed = SPEED_10;
736 case MII_BNX2_GP_TOP_AN_SPEED_100:
737 bp->line_speed = SPEED_100;
739 case MII_BNX2_GP_TOP_AN_SPEED_1G:
740 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
741 bp->line_speed = SPEED_1000;
743 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
744 bp->line_speed = SPEED_2500;
747 if (val & MII_BNX2_GP_TOP_AN_FD)
748 bp->duplex = DUPLEX_FULL;
750 bp->duplex = DUPLEX_HALF;
755 bnx2_5708s_linkup(struct bnx2 *bp)
760 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
761 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
762 case BCM5708S_1000X_STAT1_SPEED_10:
763 bp->line_speed = SPEED_10;
765 case BCM5708S_1000X_STAT1_SPEED_100:
766 bp->line_speed = SPEED_100;
768 case BCM5708S_1000X_STAT1_SPEED_1G:
769 bp->line_speed = SPEED_1000;
771 case BCM5708S_1000X_STAT1_SPEED_2G5:
772 bp->line_speed = SPEED_2500;
775 if (val & BCM5708S_1000X_STAT1_FD)
776 bp->duplex = DUPLEX_FULL;
778 bp->duplex = DUPLEX_HALF;
784 bnx2_5706s_linkup(struct bnx2 *bp)
786 u32 bmcr, local_adv, remote_adv, common;
789 bp->line_speed = SPEED_1000;
791 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
792 if (bmcr & BMCR_FULLDPLX) {
793 bp->duplex = DUPLEX_FULL;
796 bp->duplex = DUPLEX_HALF;
799 if (!(bmcr & BMCR_ANENABLE)) {
803 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
804 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
806 common = local_adv & remote_adv;
807 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
809 if (common & ADVERTISE_1000XFULL) {
810 bp->duplex = DUPLEX_FULL;
813 bp->duplex = DUPLEX_HALF;
821 bnx2_copper_linkup(struct bnx2 *bp)
825 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
826 if (bmcr & BMCR_ANENABLE) {
827 u32 local_adv, remote_adv, common;
829 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
830 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
832 common = local_adv & (remote_adv >> 2);
833 if (common & ADVERTISE_1000FULL) {
834 bp->line_speed = SPEED_1000;
835 bp->duplex = DUPLEX_FULL;
837 else if (common & ADVERTISE_1000HALF) {
838 bp->line_speed = SPEED_1000;
839 bp->duplex = DUPLEX_HALF;
842 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
843 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
845 common = local_adv & remote_adv;
846 if (common & ADVERTISE_100FULL) {
847 bp->line_speed = SPEED_100;
848 bp->duplex = DUPLEX_FULL;
850 else if (common & ADVERTISE_100HALF) {
851 bp->line_speed = SPEED_100;
852 bp->duplex = DUPLEX_HALF;
854 else if (common & ADVERTISE_10FULL) {
855 bp->line_speed = SPEED_10;
856 bp->duplex = DUPLEX_FULL;
858 else if (common & ADVERTISE_10HALF) {
859 bp->line_speed = SPEED_10;
860 bp->duplex = DUPLEX_HALF;
869 if (bmcr & BMCR_SPEED100) {
870 bp->line_speed = SPEED_100;
873 bp->line_speed = SPEED_10;
875 if (bmcr & BMCR_FULLDPLX) {
876 bp->duplex = DUPLEX_FULL;
879 bp->duplex = DUPLEX_HALF;
887 bnx2_set_mac_link(struct bnx2 *bp)
891 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
892 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
893 (bp->duplex == DUPLEX_HALF)) {
894 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
897 /* Configure the EMAC mode register. */
898 val = REG_RD(bp, BNX2_EMAC_MODE);
900 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
901 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
902 BNX2_EMAC_MODE_25G_MODE);
905 switch (bp->line_speed) {
907 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
908 val |= BNX2_EMAC_MODE_PORT_MII_10M;
913 val |= BNX2_EMAC_MODE_PORT_MII;
916 val |= BNX2_EMAC_MODE_25G_MODE;
919 val |= BNX2_EMAC_MODE_PORT_GMII;
924 val |= BNX2_EMAC_MODE_PORT_GMII;
927 /* Set the MAC to operate in the appropriate duplex mode. */
928 if (bp->duplex == DUPLEX_HALF)
929 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
930 REG_WR(bp, BNX2_EMAC_MODE, val);
932 /* Enable/disable rx PAUSE. */
933 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
935 if (bp->flow_ctrl & FLOW_CTRL_RX)
936 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
937 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
939 /* Enable/disable tx PAUSE. */
940 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
941 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
943 if (bp->flow_ctrl & FLOW_CTRL_TX)
944 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
945 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
947 /* Acknowledge the interrupt. */
948 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
954 bnx2_enable_bmsr1(struct bnx2 *bp)
956 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
957 (CHIP_NUM(bp) == CHIP_NUM_5709))
958 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
959 MII_BNX2_BLK_ADDR_GP_STATUS);
963 bnx2_disable_bmsr1(struct bnx2 *bp)
965 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
966 (CHIP_NUM(bp) == CHIP_NUM_5709))
967 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
968 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
972 bnx2_test_and_enable_2g5(struct bnx2 *bp)
977 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
980 if (bp->autoneg & AUTONEG_SPEED)
981 bp->advertising |= ADVERTISED_2500baseX_Full;
983 if (CHIP_NUM(bp) == CHIP_NUM_5709)
984 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
986 bnx2_read_phy(bp, bp->mii_up1, &up1);
987 if (!(up1 & BCM5708S_UP1_2G5)) {
988 up1 |= BCM5708S_UP1_2G5;
989 bnx2_write_phy(bp, bp->mii_up1, up1);
993 if (CHIP_NUM(bp) == CHIP_NUM_5709)
994 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
995 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1001 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1006 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1009 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1010 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1012 bnx2_read_phy(bp, bp->mii_up1, &up1);
1013 if (up1 & BCM5708S_UP1_2G5) {
1014 up1 &= ~BCM5708S_UP1_2G5;
1015 bnx2_write_phy(bp, bp->mii_up1, up1);
1019 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1020 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1021 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1027 bnx2_enable_forced_2g5(struct bnx2 *bp)
1031 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1034 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1037 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1038 MII_BNX2_BLK_ADDR_SERDES_DIG);
1039 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1040 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1041 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1042 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1044 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1045 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1046 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1048 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1049 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1050 bmcr |= BCM5708S_BMCR_FORCE_2500;
1053 if (bp->autoneg & AUTONEG_SPEED) {
1054 bmcr &= ~BMCR_ANENABLE;
1055 if (bp->req_duplex == DUPLEX_FULL)
1056 bmcr |= BMCR_FULLDPLX;
1058 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1062 bnx2_disable_forced_2g5(struct bnx2 *bp)
1066 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1069 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1072 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1073 MII_BNX2_BLK_ADDR_SERDES_DIG);
1074 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1075 val &= ~MII_BNX2_SD_MISC1_FORCE;
1076 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1078 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1079 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1080 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1082 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1083 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1084 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1087 if (bp->autoneg & AUTONEG_SPEED)
1088 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1089 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1093 bnx2_set_link(struct bnx2 *bp)
1098 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1103 link_up = bp->link_up;
1105 bnx2_enable_bmsr1(bp);
1106 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1107 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1108 bnx2_disable_bmsr1(bp);
1110 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1111 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1114 val = REG_RD(bp, BNX2_EMAC_STATUS);
1115 if (val & BNX2_EMAC_STATUS_LINK)
1116 bmsr |= BMSR_LSTATUS;
1118 bmsr &= ~BMSR_LSTATUS;
1121 if (bmsr & BMSR_LSTATUS) {
1124 if (bp->phy_flags & PHY_SERDES_FLAG) {
1125 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1126 bnx2_5706s_linkup(bp);
1127 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1128 bnx2_5708s_linkup(bp);
1129 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1130 bnx2_5709s_linkup(bp);
1133 bnx2_copper_linkup(bp);
1135 bnx2_resolve_flow_ctrl(bp);
1138 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1139 (bp->autoneg & AUTONEG_SPEED))
1140 bnx2_disable_forced_2g5(bp);
1142 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1146 if (bp->link_up != link_up) {
1147 bnx2_report_link(bp);
1150 bnx2_set_mac_link(bp);
1156 bnx2_reset_phy(struct bnx2 *bp)
1161 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1163 #define PHY_RESET_MAX_WAIT 100
1164 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1167 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1168 if (!(reg & BMCR_RESET)) {
1173 if (i == PHY_RESET_MAX_WAIT) {
1180 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1184 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1185 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1187 if (bp->phy_flags & PHY_SERDES_FLAG) {
1188 adv = ADVERTISE_1000XPAUSE;
1191 adv = ADVERTISE_PAUSE_CAP;
1194 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1195 if (bp->phy_flags & PHY_SERDES_FLAG) {
1196 adv = ADVERTISE_1000XPSE_ASYM;
1199 adv = ADVERTISE_PAUSE_ASYM;
1202 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1203 if (bp->phy_flags & PHY_SERDES_FLAG) {
1204 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1207 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1214 bnx2_setup_serdes_phy(struct bnx2 *bp)
1219 if (!(bp->autoneg & AUTONEG_SPEED)) {
1221 int force_link_down = 0;
1223 if (bp->req_line_speed == SPEED_2500) {
1224 if (!bnx2_test_and_enable_2g5(bp))
1225 force_link_down = 1;
1226 } else if (bp->req_line_speed == SPEED_1000) {
1227 if (bnx2_test_and_disable_2g5(bp))
1228 force_link_down = 1;
1230 bnx2_read_phy(bp, bp->mii_adv, &adv);
1231 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1233 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1234 new_bmcr = bmcr & ~BMCR_ANENABLE;
1235 new_bmcr |= BMCR_SPEED1000;
1237 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1238 if (bp->req_line_speed == SPEED_2500)
1239 bnx2_enable_forced_2g5(bp);
1240 else if (bp->req_line_speed == SPEED_1000) {
1241 bnx2_disable_forced_2g5(bp);
1242 new_bmcr &= ~0x2000;
1245 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1246 if (bp->req_line_speed == SPEED_2500)
1247 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1249 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1252 if (bp->req_duplex == DUPLEX_FULL) {
1253 adv |= ADVERTISE_1000XFULL;
1254 new_bmcr |= BMCR_FULLDPLX;
1257 adv |= ADVERTISE_1000XHALF;
1258 new_bmcr &= ~BMCR_FULLDPLX;
1260 if ((new_bmcr != bmcr) || (force_link_down)) {
1261 /* Force a link down visible on the other side */
1263 bnx2_write_phy(bp, bp->mii_adv, adv &
1264 ~(ADVERTISE_1000XFULL |
1265 ADVERTISE_1000XHALF));
1266 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1267 BMCR_ANRESTART | BMCR_ANENABLE);
1270 netif_carrier_off(bp->dev);
1271 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1272 bnx2_report_link(bp);
1274 bnx2_write_phy(bp, bp->mii_adv, adv);
1275 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1277 bnx2_resolve_flow_ctrl(bp);
1278 bnx2_set_mac_link(bp);
1283 bnx2_test_and_enable_2g5(bp);
1285 if (bp->advertising & ADVERTISED_1000baseT_Full)
1286 new_adv |= ADVERTISE_1000XFULL;
1288 new_adv |= bnx2_phy_get_pause_adv(bp);
1290 bnx2_read_phy(bp, bp->mii_adv, &adv);
1291 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1293 bp->serdes_an_pending = 0;
1294 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1295 /* Force a link down visible on the other side */
1297 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1298 spin_unlock_bh(&bp->phy_lock);
1300 spin_lock_bh(&bp->phy_lock);
1303 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1304 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1306 /* Speed up link-up time when the link partner
1307 * does not autonegotiate which is very common
1308 * in blade servers. Some blade servers use
1309 * IPMI for kerboard input and it's important
1310 * to minimize link disruptions. Autoneg. involves
1311 * exchanging base pages plus 3 next pages and
1312 * normally completes in about 120 msec.
1314 bp->current_interval = SERDES_AN_TIMEOUT;
1315 bp->serdes_an_pending = 1;
1316 mod_timer(&bp->timer, jiffies + bp->current_interval);
1318 bnx2_resolve_flow_ctrl(bp);
1319 bnx2_set_mac_link(bp);
1325 #define ETHTOOL_ALL_FIBRE_SPEED \
1326 (ADVERTISED_1000baseT_Full)
1328 #define ETHTOOL_ALL_COPPER_SPEED \
1329 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1330 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1331 ADVERTISED_1000baseT_Full)
1333 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1334 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1336 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1339 bnx2_setup_copper_phy(struct bnx2 *bp)
1344 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1346 if (bp->autoneg & AUTONEG_SPEED) {
1347 u32 adv_reg, adv1000_reg;
1348 u32 new_adv_reg = 0;
1349 u32 new_adv1000_reg = 0;
1351 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1352 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1353 ADVERTISE_PAUSE_ASYM);
1355 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1356 adv1000_reg &= PHY_ALL_1000_SPEED;
1358 if (bp->advertising & ADVERTISED_10baseT_Half)
1359 new_adv_reg |= ADVERTISE_10HALF;
1360 if (bp->advertising & ADVERTISED_10baseT_Full)
1361 new_adv_reg |= ADVERTISE_10FULL;
1362 if (bp->advertising & ADVERTISED_100baseT_Half)
1363 new_adv_reg |= ADVERTISE_100HALF;
1364 if (bp->advertising & ADVERTISED_100baseT_Full)
1365 new_adv_reg |= ADVERTISE_100FULL;
1366 if (bp->advertising & ADVERTISED_1000baseT_Full)
1367 new_adv1000_reg |= ADVERTISE_1000FULL;
1369 new_adv_reg |= ADVERTISE_CSMA;
1371 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1373 if ((adv1000_reg != new_adv1000_reg) ||
1374 (adv_reg != new_adv_reg) ||
1375 ((bmcr & BMCR_ANENABLE) == 0)) {
1377 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1378 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1379 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1382 else if (bp->link_up) {
1383 /* Flow ctrl may have changed from auto to forced */
1384 /* or vice-versa. */
1386 bnx2_resolve_flow_ctrl(bp);
1387 bnx2_set_mac_link(bp);
1393 if (bp->req_line_speed == SPEED_100) {
1394 new_bmcr |= BMCR_SPEED100;
1396 if (bp->req_duplex == DUPLEX_FULL) {
1397 new_bmcr |= BMCR_FULLDPLX;
1399 if (new_bmcr != bmcr) {
1402 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1403 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1405 if (bmsr & BMSR_LSTATUS) {
1406 /* Force link down */
1407 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1408 spin_unlock_bh(&bp->phy_lock);
1410 spin_lock_bh(&bp->phy_lock);
1412 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1413 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1416 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1418 /* Normally, the new speed is setup after the link has
1419 * gone down and up again. In some cases, link will not go
1420 * down so we need to set up the new speed here.
1422 if (bmsr & BMSR_LSTATUS) {
1423 bp->line_speed = bp->req_line_speed;
1424 bp->duplex = bp->req_duplex;
1425 bnx2_resolve_flow_ctrl(bp);
1426 bnx2_set_mac_link(bp);
1429 bnx2_resolve_flow_ctrl(bp);
1430 bnx2_set_mac_link(bp);
1436 bnx2_setup_phy(struct bnx2 *bp)
1438 if (bp->loopback == MAC_LOOPBACK)
1441 if (bp->phy_flags & PHY_SERDES_FLAG) {
1442 return (bnx2_setup_serdes_phy(bp));
1445 return (bnx2_setup_copper_phy(bp));
1450 bnx2_init_5709s_phy(struct bnx2 *bp)
1454 bp->mii_bmcr = MII_BMCR + 0x10;
1455 bp->mii_bmsr = MII_BMSR + 0x10;
1456 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1457 bp->mii_adv = MII_ADVERTISE + 0x10;
1458 bp->mii_lpa = MII_LPA + 0x10;
1459 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1461 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1462 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1464 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1467 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1469 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1470 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1471 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1472 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1474 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1475 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1476 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1477 val |= BCM5708S_UP1_2G5;
1479 val &= ~BCM5708S_UP1_2G5;
1480 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1482 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1483 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1484 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1485 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1487 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1489 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1490 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1491 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1493 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1499 bnx2_init_5708s_phy(struct bnx2 *bp)
1505 bp->mii_up1 = BCM5708S_UP1;
1507 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1508 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1509 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1511 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1512 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1513 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1515 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1516 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1517 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1519 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1520 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1521 val |= BCM5708S_UP1_2G5;
1522 bnx2_write_phy(bp, BCM5708S_UP1, val);
1525 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1526 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1527 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1528 /* increase tx signal amplitude */
1529 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1530 BCM5708S_BLK_ADDR_TX_MISC);
1531 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1532 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1533 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1534 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1537 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1538 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1543 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1544 BNX2_SHARED_HW_CFG_CONFIG);
1545 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1546 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1547 BCM5708S_BLK_ADDR_TX_MISC);
1548 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1549 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1550 BCM5708S_BLK_ADDR_DIG);
1557 bnx2_init_5706s_phy(struct bnx2 *bp)
1561 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1563 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1564 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1566 if (bp->dev->mtu > 1500) {
1569 /* Set extended packet length bit */
1570 bnx2_write_phy(bp, 0x18, 0x7);
1571 bnx2_read_phy(bp, 0x18, &val);
1572 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1574 bnx2_write_phy(bp, 0x1c, 0x6c00);
1575 bnx2_read_phy(bp, 0x1c, &val);
1576 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1581 bnx2_write_phy(bp, 0x18, 0x7);
1582 bnx2_read_phy(bp, 0x18, &val);
1583 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1585 bnx2_write_phy(bp, 0x1c, 0x6c00);
1586 bnx2_read_phy(bp, 0x1c, &val);
1587 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1594 bnx2_init_copper_phy(struct bnx2 *bp)
1600 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1601 bnx2_write_phy(bp, 0x18, 0x0c00);
1602 bnx2_write_phy(bp, 0x17, 0x000a);
1603 bnx2_write_phy(bp, 0x15, 0x310b);
1604 bnx2_write_phy(bp, 0x17, 0x201f);
1605 bnx2_write_phy(bp, 0x15, 0x9506);
1606 bnx2_write_phy(bp, 0x17, 0x401f);
1607 bnx2_write_phy(bp, 0x15, 0x14e2);
1608 bnx2_write_phy(bp, 0x18, 0x0400);
1611 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1612 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1613 MII_BNX2_DSP_EXPAND_REG | 0x8);
1614 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1616 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1619 if (bp->dev->mtu > 1500) {
1620 /* Set extended packet length bit */
1621 bnx2_write_phy(bp, 0x18, 0x7);
1622 bnx2_read_phy(bp, 0x18, &val);
1623 bnx2_write_phy(bp, 0x18, val | 0x4000);
1625 bnx2_read_phy(bp, 0x10, &val);
1626 bnx2_write_phy(bp, 0x10, val | 0x1);
1629 bnx2_write_phy(bp, 0x18, 0x7);
1630 bnx2_read_phy(bp, 0x18, &val);
1631 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1633 bnx2_read_phy(bp, 0x10, &val);
1634 bnx2_write_phy(bp, 0x10, val & ~0x1);
1637 /* ethernet@wirespeed */
1638 bnx2_write_phy(bp, 0x18, 0x7007);
1639 bnx2_read_phy(bp, 0x18, &val);
1640 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1646 bnx2_init_phy(struct bnx2 *bp)
1651 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1652 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1654 bp->mii_bmcr = MII_BMCR;
1655 bp->mii_bmsr = MII_BMSR;
1656 bp->mii_bmsr1 = MII_BMSR;
1657 bp->mii_adv = MII_ADVERTISE;
1658 bp->mii_lpa = MII_LPA;
1660 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1662 bnx2_read_phy(bp, MII_PHYSID1, &val);
1663 bp->phy_id = val << 16;
1664 bnx2_read_phy(bp, MII_PHYSID2, &val);
1665 bp->phy_id |= val & 0xffff;
1667 if (bp->phy_flags & PHY_SERDES_FLAG) {
1668 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1669 rc = bnx2_init_5706s_phy(bp);
1670 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1671 rc = bnx2_init_5708s_phy(bp);
1672 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1673 rc = bnx2_init_5709s_phy(bp);
1676 rc = bnx2_init_copper_phy(bp);
1685 bnx2_set_mac_loopback(struct bnx2 *bp)
1689 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1690 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1691 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1692 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1697 static int bnx2_test_link(struct bnx2 *);
1700 bnx2_set_phy_loopback(struct bnx2 *bp)
1705 spin_lock_bh(&bp->phy_lock);
1706 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
1708 spin_unlock_bh(&bp->phy_lock);
1712 for (i = 0; i < 10; i++) {
1713 if (bnx2_test_link(bp) == 0)
1718 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1719 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1720 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1721 BNX2_EMAC_MODE_25G_MODE);
1723 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1724 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1730 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1736 msg_data |= bp->fw_wr_seq;
1738 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1740 /* wait for an acknowledgement. */
1741 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1744 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1746 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1749 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1752 /* If we timed out, inform the firmware that this is the case. */
1753 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1755 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1758 msg_data &= ~BNX2_DRV_MSG_CODE;
1759 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1761 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1766 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1773 bnx2_init_5709_context(struct bnx2 *bp)
1778 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1779 val |= (BCM_PAGE_BITS - 8) << 16;
1780 REG_WR(bp, BNX2_CTX_COMMAND, val);
1781 for (i = 0; i < 10; i++) {
1782 val = REG_RD(bp, BNX2_CTX_COMMAND);
1783 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
1787 if (val & BNX2_CTX_COMMAND_MEM_INIT)
1790 for (i = 0; i < bp->ctx_pages; i++) {
1793 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1794 (bp->ctx_blk_mapping[i] & 0xffffffff) |
1795 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1796 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1797 (u64) bp->ctx_blk_mapping[i] >> 32);
1798 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1799 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1800 for (j = 0; j < 10; j++) {
1802 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1803 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1807 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1816 bnx2_init_context(struct bnx2 *bp)
1822 u32 vcid_addr, pcid_addr, offset;
1827 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1830 vcid_addr = GET_PCID_ADDR(vcid);
1832 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1837 pcid_addr = GET_PCID_ADDR(new_vcid);
1840 vcid_addr = GET_CID_ADDR(vcid);
1841 pcid_addr = vcid_addr;
1844 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
1845 vcid_addr += (i << PHY_CTX_SHIFT);
1846 pcid_addr += (i << PHY_CTX_SHIFT);
1848 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1849 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1851 /* Zero out the context. */
1852 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
1853 CTX_WR(bp, 0x00, offset, 0);
1855 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1856 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1862 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1868 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1869 if (good_mbuf == NULL) {
1870 printk(KERN_ERR PFX "Failed to allocate memory in "
1871 "bnx2_alloc_bad_rbuf\n");
1875 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1876 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1880 /* Allocate a bunch of mbufs and save the good ones in an array. */
1881 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1882 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1883 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1885 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1887 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1889 /* The addresses with Bit 9 set are bad memory blocks. */
1890 if (!(val & (1 << 9))) {
1891 good_mbuf[good_mbuf_cnt] = (u16) val;
1895 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1898 /* Free the good ones back to the mbuf pool thus discarding
1899 * all the bad ones. */
1900 while (good_mbuf_cnt) {
1903 val = good_mbuf[good_mbuf_cnt];
1904 val = (val << 9) | val | 1;
1906 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1913 bnx2_set_mac_addr(struct bnx2 *bp)
1916 u8 *mac_addr = bp->dev->dev_addr;
1918 val = (mac_addr[0] << 8) | mac_addr[1];
1920 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1922 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1923 (mac_addr[4] << 8) | mac_addr[5];
1925 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1929 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1931 struct sk_buff *skb;
1932 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1934 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1935 unsigned long align;
1937 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1942 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1943 skb_reserve(skb, BNX2_RX_ALIGN - align);
1945 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1946 PCI_DMA_FROMDEVICE);
1949 pci_unmap_addr_set(rx_buf, mapping, mapping);
1951 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1952 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1954 bp->rx_prod_bseq += bp->rx_buf_use_size;
1960 bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
1962 struct status_block *sblk = bp->status_blk;
1963 u32 new_link_state, old_link_state;
1966 new_link_state = sblk->status_attn_bits & event;
1967 old_link_state = sblk->status_attn_bits_ack & event;
1968 if (new_link_state != old_link_state) {
1970 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
1972 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
1980 bnx2_phy_int(struct bnx2 *bp)
1982 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
1983 spin_lock(&bp->phy_lock);
1985 spin_unlock(&bp->phy_lock);
1990 bnx2_tx_int(struct bnx2 *bp)
1992 struct status_block *sblk = bp->status_blk;
1993 u16 hw_cons, sw_cons, sw_ring_cons;
1996 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1997 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2000 sw_cons = bp->tx_cons;
2002 while (sw_cons != hw_cons) {
2003 struct sw_bd *tx_buf;
2004 struct sk_buff *skb;
2007 sw_ring_cons = TX_RING_IDX(sw_cons);
2009 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2012 /* partial BD completions possible with TSO packets */
2013 if (skb_is_gso(skb)) {
2014 u16 last_idx, last_ring_idx;
2016 last_idx = sw_cons +
2017 skb_shinfo(skb)->nr_frags + 1;
2018 last_ring_idx = sw_ring_cons +
2019 skb_shinfo(skb)->nr_frags + 1;
2020 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2023 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2028 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2029 skb_headlen(skb), PCI_DMA_TODEVICE);
2032 last = skb_shinfo(skb)->nr_frags;
2034 for (i = 0; i < last; i++) {
2035 sw_cons = NEXT_TX_BD(sw_cons);
2037 pci_unmap_page(bp->pdev,
2039 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2041 skb_shinfo(skb)->frags[i].size,
2045 sw_cons = NEXT_TX_BD(sw_cons);
2047 tx_free_bd += last + 1;
2051 hw_cons = bp->hw_tx_cons =
2052 sblk->status_tx_quick_consumer_index0;
2054 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2059 bp->tx_cons = sw_cons;
2060 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2061 * before checking for netif_queue_stopped(). Without the
2062 * memory barrier, there is a small possibility that bnx2_start_xmit()
2063 * will miss it and cause the queue to be stopped forever.
2067 if (unlikely(netif_queue_stopped(bp->dev)) &&
2068 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2069 netif_tx_lock(bp->dev);
2070 if ((netif_queue_stopped(bp->dev)) &&
2071 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2072 netif_wake_queue(bp->dev);
2073 netif_tx_unlock(bp->dev);
2078 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2081 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2082 struct rx_bd *cons_bd, *prod_bd;
2084 cons_rx_buf = &bp->rx_buf_ring[cons];
2085 prod_rx_buf = &bp->rx_buf_ring[prod];
2087 pci_dma_sync_single_for_device(bp->pdev,
2088 pci_unmap_addr(cons_rx_buf, mapping),
2089 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2091 bp->rx_prod_bseq += bp->rx_buf_use_size;
2093 prod_rx_buf->skb = skb;
2098 pci_unmap_addr_set(prod_rx_buf, mapping,
2099 pci_unmap_addr(cons_rx_buf, mapping));
2101 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2102 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2103 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2104 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2108 bnx2_rx_int(struct bnx2 *bp, int budget)
2110 struct status_block *sblk = bp->status_blk;
2111 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2112 struct l2_fhdr *rx_hdr;
2115 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
2116 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2119 sw_cons = bp->rx_cons;
2120 sw_prod = bp->rx_prod;
2122 /* Memory barrier necessary as speculative reads of the rx
2123 * buffer can be ahead of the index in the status block
2126 while (sw_cons != hw_cons) {
2129 struct sw_bd *rx_buf;
2130 struct sk_buff *skb;
2131 dma_addr_t dma_addr;
2133 sw_ring_cons = RX_RING_IDX(sw_cons);
2134 sw_ring_prod = RX_RING_IDX(sw_prod);
2136 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2141 dma_addr = pci_unmap_addr(rx_buf, mapping);
2143 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2144 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2146 rx_hdr = (struct l2_fhdr *) skb->data;
2147 len = rx_hdr->l2_fhdr_pkt_len - 4;
2149 if ((status = rx_hdr->l2_fhdr_status) &
2150 (L2_FHDR_ERRORS_BAD_CRC |
2151 L2_FHDR_ERRORS_PHY_DECODE |
2152 L2_FHDR_ERRORS_ALIGNMENT |
2153 L2_FHDR_ERRORS_TOO_SHORT |
2154 L2_FHDR_ERRORS_GIANT_FRAME)) {
2159 /* Since we don't have a jumbo ring, copy small packets
2162 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2163 struct sk_buff *new_skb;
2165 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2166 if (new_skb == NULL)
2170 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2171 new_skb->data, len + 2);
2172 skb_reserve(new_skb, 2);
2173 skb_put(new_skb, len);
2175 bnx2_reuse_rx_skb(bp, skb,
2176 sw_ring_cons, sw_ring_prod);
2180 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
2181 pci_unmap_single(bp->pdev, dma_addr,
2182 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2184 skb_reserve(skb, bp->rx_offset);
2189 bnx2_reuse_rx_skb(bp, skb,
2190 sw_ring_cons, sw_ring_prod);
2194 skb->protocol = eth_type_trans(skb, bp->dev);
2196 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2197 (ntohs(skb->protocol) != 0x8100)) {
2204 skb->ip_summed = CHECKSUM_NONE;
2206 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2207 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2209 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2210 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2211 skb->ip_summed = CHECKSUM_UNNECESSARY;
2215 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2216 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2217 rx_hdr->l2_fhdr_vlan_tag);
2221 netif_receive_skb(skb);
2223 bp->dev->last_rx = jiffies;
2227 sw_cons = NEXT_RX_BD(sw_cons);
2228 sw_prod = NEXT_RX_BD(sw_prod);
2230 if ((rx_pkt == budget))
2233 /* Refresh hw_cons to see if there is new work */
2234 if (sw_cons == hw_cons) {
2235 hw_cons = bp->hw_rx_cons =
2236 sblk->status_rx_quick_consumer_index0;
2237 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2242 bp->rx_cons = sw_cons;
2243 bp->rx_prod = sw_prod;
2245 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2247 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2255 /* MSI ISR - The only difference between this and the INTx ISR
2256 * is that the MSI interrupt is always serviced.
2259 bnx2_msi(int irq, void *dev_instance)
2261 struct net_device *dev = dev_instance;
2262 struct bnx2 *bp = netdev_priv(dev);
2264 prefetch(bp->status_blk);
2265 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2266 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2267 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2269 /* Return here if interrupt is disabled. */
2270 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2273 netif_rx_schedule(dev);
2279 bnx2_msi_1shot(int irq, void *dev_instance)
2281 struct net_device *dev = dev_instance;
2282 struct bnx2 *bp = netdev_priv(dev);
2284 prefetch(bp->status_blk);
2286 /* Return here if interrupt is disabled. */
2287 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2290 netif_rx_schedule(dev);
2296 bnx2_interrupt(int irq, void *dev_instance)
2298 struct net_device *dev = dev_instance;
2299 struct bnx2 *bp = netdev_priv(dev);
2301 /* When using INTx, it is possible for the interrupt to arrive
2302 * at the CPU before the status block posted prior to the
2303 * interrupt. Reading a register will flush the status block.
2304 * When using MSI, the MSI message will always complete after
2305 * the status block write.
2307 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
2308 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2309 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2312 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2313 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2314 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2316 /* Return here if interrupt is shared and is disabled. */
2317 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2320 netif_rx_schedule(dev);
2325 #define STATUS_ATTN_EVENTS STATUS_ATTN_BITS_LINK_STATE
2328 bnx2_has_work(struct bnx2 *bp)
2330 struct status_block *sblk = bp->status_blk;
2332 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2333 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2336 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2337 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2344 bnx2_poll(struct net_device *dev, int *budget)
2346 struct bnx2 *bp = netdev_priv(dev);
2347 struct status_block *sblk = bp->status_blk;
2348 u32 status_attn_bits = sblk->status_attn_bits;
2349 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2351 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2352 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2356 /* This is needed to take care of transient status
2357 * during link changes.
2359 REG_WR(bp, BNX2_HC_COMMAND,
2360 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2361 REG_RD(bp, BNX2_HC_COMMAND);
2364 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2367 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2368 int orig_budget = *budget;
2371 if (orig_budget > dev->quota)
2372 orig_budget = dev->quota;
2374 work_done = bnx2_rx_int(bp, orig_budget);
2375 *budget -= work_done;
2376 dev->quota -= work_done;
2379 bp->last_status_idx = bp->status_blk->status_idx;
2382 if (!bnx2_has_work(bp)) {
2383 netif_rx_complete(dev);
2384 if (likely(bp->flags & USING_MSI_FLAG)) {
2385 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2386 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2387 bp->last_status_idx);
2390 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2391 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2392 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2393 bp->last_status_idx);
2395 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2396 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2397 bp->last_status_idx);
2404 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2405 * from set_multicast.
2408 bnx2_set_rx_mode(struct net_device *dev)
2410 struct bnx2 *bp = netdev_priv(dev);
2411 u32 rx_mode, sort_mode;
2414 spin_lock_bh(&bp->phy_lock);
2416 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2417 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2418 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2420 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2421 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2423 if (!(bp->flags & ASF_ENABLE_FLAG))
2424 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2426 if (dev->flags & IFF_PROMISC) {
2427 /* Promiscuous mode. */
2428 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2429 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2430 BNX2_RPM_SORT_USER0_PROM_VLAN;
2432 else if (dev->flags & IFF_ALLMULTI) {
2433 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2434 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2437 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2440 /* Accept one or more multicast(s). */
2441 struct dev_mc_list *mclist;
2442 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2447 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2449 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2450 i++, mclist = mclist->next) {
2452 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2454 regidx = (bit & 0xe0) >> 5;
2456 mc_filter[regidx] |= (1 << bit);
2459 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2460 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2464 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2467 if (rx_mode != bp->rx_mode) {
2468 bp->rx_mode = rx_mode;
2469 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2472 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2473 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2474 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2476 spin_unlock_bh(&bp->phy_lock);
2479 #define FW_BUF_SIZE 0x8000
2482 bnx2_gunzip_init(struct bnx2 *bp)
2484 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2487 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2490 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2491 if (bp->strm->workspace == NULL)
2501 vfree(bp->gunzip_buf);
2502 bp->gunzip_buf = NULL;
2505 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2506 "uncompression.\n", bp->dev->name);
2511 bnx2_gunzip_end(struct bnx2 *bp)
2513 kfree(bp->strm->workspace);
2518 if (bp->gunzip_buf) {
2519 vfree(bp->gunzip_buf);
2520 bp->gunzip_buf = NULL;
2525 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2529 /* check gzip header */
2530 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2536 if (zbuf[3] & FNAME)
2537 while ((zbuf[n++] != 0) && (n < len));
2539 bp->strm->next_in = zbuf + n;
2540 bp->strm->avail_in = len - n;
2541 bp->strm->next_out = bp->gunzip_buf;
2542 bp->strm->avail_out = FW_BUF_SIZE;
2544 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2548 rc = zlib_inflate(bp->strm, Z_FINISH);
2550 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2551 *outbuf = bp->gunzip_buf;
2553 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2554 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2555 bp->dev->name, bp->strm->msg);
2557 zlib_inflateEnd(bp->strm);
2559 if (rc == Z_STREAM_END)
2566 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2573 for (i = 0; i < rv2p_code_len; i += 8) {
2574 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2576 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2579 if (rv2p_proc == RV2P_PROC1) {
2580 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2581 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2584 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2585 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2589 /* Reset the processor, un-stall is done later. */
2590 if (rv2p_proc == RV2P_PROC1) {
2591 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2594 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2599 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2606 val = REG_RD_IND(bp, cpu_reg->mode);
2607 val |= cpu_reg->mode_value_halt;
2608 REG_WR_IND(bp, cpu_reg->mode, val);
2609 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2611 /* Load the Text area. */
2612 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2617 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2627 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2628 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2632 /* Load the Data area. */
2633 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2637 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2638 REG_WR_IND(bp, offset, fw->data[j]);
2642 /* Load the SBSS area. */
2643 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2647 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2648 REG_WR_IND(bp, offset, fw->sbss[j]);
2652 /* Load the BSS area. */
2653 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2657 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2658 REG_WR_IND(bp, offset, fw->bss[j]);
2662 /* Load the Read-Only area. */
2663 offset = cpu_reg->spad_base +
2664 (fw->rodata_addr - cpu_reg->mips_view_base);
2668 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2669 REG_WR_IND(bp, offset, fw->rodata[j]);
2673 /* Clear the pre-fetch instruction. */
2674 REG_WR_IND(bp, cpu_reg->inst, 0);
2675 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2677 /* Start the CPU. */
2678 val = REG_RD_IND(bp, cpu_reg->mode);
2679 val &= ~cpu_reg->mode_value_halt;
2680 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2681 REG_WR_IND(bp, cpu_reg->mode, val);
2687 bnx2_init_cpus(struct bnx2 *bp)
2689 struct cpu_reg cpu_reg;
2695 if ((rc = bnx2_gunzip_init(bp)) != 0)
2698 /* Initialize the RV2P processor. */
2699 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2704 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2706 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2711 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2713 /* Initialize the RX Processor. */
2714 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2715 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2716 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2717 cpu_reg.state = BNX2_RXP_CPU_STATE;
2718 cpu_reg.state_value_clear = 0xffffff;
2719 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2720 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2721 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2722 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2723 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2724 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2725 cpu_reg.mips_view_base = 0x8000000;
2727 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2728 fw = &bnx2_rxp_fw_09;
2730 fw = &bnx2_rxp_fw_06;
2732 rc = load_cpu_fw(bp, &cpu_reg, fw);
2736 /* Initialize the TX Processor. */
2737 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2738 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2739 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2740 cpu_reg.state = BNX2_TXP_CPU_STATE;
2741 cpu_reg.state_value_clear = 0xffffff;
2742 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2743 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2744 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2745 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2746 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2747 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2748 cpu_reg.mips_view_base = 0x8000000;
2750 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2751 fw = &bnx2_txp_fw_09;
2753 fw = &bnx2_txp_fw_06;
2755 rc = load_cpu_fw(bp, &cpu_reg, fw);
2759 /* Initialize the TX Patch-up Processor. */
2760 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2761 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2762 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2763 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2764 cpu_reg.state_value_clear = 0xffffff;
2765 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2766 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2767 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2768 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2769 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2770 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2771 cpu_reg.mips_view_base = 0x8000000;
2773 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2774 fw = &bnx2_tpat_fw_09;
2776 fw = &bnx2_tpat_fw_06;
2778 rc = load_cpu_fw(bp, &cpu_reg, fw);
2782 /* Initialize the Completion Processor. */
2783 cpu_reg.mode = BNX2_COM_CPU_MODE;
2784 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2785 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2786 cpu_reg.state = BNX2_COM_CPU_STATE;
2787 cpu_reg.state_value_clear = 0xffffff;
2788 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2789 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2790 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2791 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2792 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2793 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2794 cpu_reg.mips_view_base = 0x8000000;
2796 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2797 fw = &bnx2_com_fw_09;
2799 fw = &bnx2_com_fw_06;
2801 rc = load_cpu_fw(bp, &cpu_reg, fw);
2805 /* Initialize the Command Processor. */
2806 cpu_reg.mode = BNX2_CP_CPU_MODE;
2807 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2808 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2809 cpu_reg.state = BNX2_CP_CPU_STATE;
2810 cpu_reg.state_value_clear = 0xffffff;
2811 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2812 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2813 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2814 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2815 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2816 cpu_reg.spad_base = BNX2_CP_SCRATCH;
2817 cpu_reg.mips_view_base = 0x8000000;
2819 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2820 fw = &bnx2_cp_fw_09;
2822 rc = load_cpu_fw(bp, &cpu_reg, fw);
2827 bnx2_gunzip_end(bp);
2832 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2836 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2842 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2843 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2844 PCI_PM_CTRL_PME_STATUS);
2846 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2847 /* delay required during transition out of D3hot */
2850 val = REG_RD(bp, BNX2_EMAC_MODE);
2851 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2852 val &= ~BNX2_EMAC_MODE_MPKT;
2853 REG_WR(bp, BNX2_EMAC_MODE, val);
2855 val = REG_RD(bp, BNX2_RPM_CONFIG);
2856 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2857 REG_WR(bp, BNX2_RPM_CONFIG, val);
2868 autoneg = bp->autoneg;
2869 advertising = bp->advertising;
2871 bp->autoneg = AUTONEG_SPEED;
2872 bp->advertising = ADVERTISED_10baseT_Half |
2873 ADVERTISED_10baseT_Full |
2874 ADVERTISED_100baseT_Half |
2875 ADVERTISED_100baseT_Full |
2878 bnx2_setup_copper_phy(bp);
2880 bp->autoneg = autoneg;
2881 bp->advertising = advertising;
2883 bnx2_set_mac_addr(bp);
2885 val = REG_RD(bp, BNX2_EMAC_MODE);
2887 /* Enable port mode. */
2888 val &= ~BNX2_EMAC_MODE_PORT;
2889 val |= BNX2_EMAC_MODE_PORT_MII |
2890 BNX2_EMAC_MODE_MPKT_RCVD |
2891 BNX2_EMAC_MODE_ACPI_RCVD |
2892 BNX2_EMAC_MODE_MPKT;
2894 REG_WR(bp, BNX2_EMAC_MODE, val);
2896 /* receive all multicast */
2897 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2898 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2901 REG_WR(bp, BNX2_EMAC_RX_MODE,
2902 BNX2_EMAC_RX_MODE_SORT_MODE);
2904 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2905 BNX2_RPM_SORT_USER0_MC_EN;
2906 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2907 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2908 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2909 BNX2_RPM_SORT_USER0_ENA);
2911 /* Need to enable EMAC and RPM for WOL. */
2912 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2913 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2914 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2915 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2917 val = REG_RD(bp, BNX2_RPM_CONFIG);
2918 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2919 REG_WR(bp, BNX2_RPM_CONFIG, val);
2921 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2924 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2927 if (!(bp->flags & NO_WOL_FLAG))
2928 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2930 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2931 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2932 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2941 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2943 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2946 /* No more memory access after this point until
2947 * device is brought back to D0.
2959 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2964 /* Request access to the flash interface. */
2965 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2966 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2967 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2968 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2974 if (j >= NVRAM_TIMEOUT_COUNT)
2981 bnx2_release_nvram_lock(struct bnx2 *bp)
2986 /* Relinquish nvram interface. */
2987 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2989 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2990 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2991 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2997 if (j >= NVRAM_TIMEOUT_COUNT)
3005 bnx2_enable_nvram_write(struct bnx2 *bp)
3009 val = REG_RD(bp, BNX2_MISC_CFG);
3010 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3012 if (!bp->flash_info->buffered) {
3015 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3016 REG_WR(bp, BNX2_NVM_COMMAND,
3017 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3019 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3022 val = REG_RD(bp, BNX2_NVM_COMMAND);
3023 if (val & BNX2_NVM_COMMAND_DONE)
3027 if (j >= NVRAM_TIMEOUT_COUNT)
3034 bnx2_disable_nvram_write(struct bnx2 *bp)
3038 val = REG_RD(bp, BNX2_MISC_CFG);
3039 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3044 bnx2_enable_nvram_access(struct bnx2 *bp)
3048 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3049 /* Enable both bits, even on read. */
3050 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3051 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3055 bnx2_disable_nvram_access(struct bnx2 *bp)
3059 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3060 /* Disable both bits, even after read. */
3061 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3062 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3063 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3067 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3072 if (bp->flash_info->buffered)
3073 /* Buffered flash, no erase needed */
3076 /* Build an erase command */
3077 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3078 BNX2_NVM_COMMAND_DOIT;
3080 /* Need to clear DONE bit separately. */
3081 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3083 /* Address of the NVRAM to read from. */
3084 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3086 /* Issue an erase command. */
3087 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3089 /* Wait for completion. */
3090 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3095 val = REG_RD(bp, BNX2_NVM_COMMAND);
3096 if (val & BNX2_NVM_COMMAND_DONE)
3100 if (j >= NVRAM_TIMEOUT_COUNT)
3107 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3112 /* Build the command word. */
3113 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3115 /* Calculate an offset of a buffered flash. */
3116 if (bp->flash_info->buffered) {
3117 offset = ((offset / bp->flash_info->page_size) <<
3118 bp->flash_info->page_bits) +
3119 (offset % bp->flash_info->page_size);
3122 /* Need to clear DONE bit separately. */
3123 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3125 /* Address of the NVRAM to read from. */
3126 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3128 /* Issue a read command. */
3129 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3131 /* Wait for completion. */
3132 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3137 val = REG_RD(bp, BNX2_NVM_COMMAND);
3138 if (val & BNX2_NVM_COMMAND_DONE) {
3139 val = REG_RD(bp, BNX2_NVM_READ);
3141 val = be32_to_cpu(val);
3142 memcpy(ret_val, &val, 4);
3146 if (j >= NVRAM_TIMEOUT_COUNT)
3154 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3159 /* Build the command word. */
3160 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3162 /* Calculate an offset of a buffered flash. */
3163 if (bp->flash_info->buffered) {
3164 offset = ((offset / bp->flash_info->page_size) <<
3165 bp->flash_info->page_bits) +
3166 (offset % bp->flash_info->page_size);
3169 /* Need to clear DONE bit separately. */
3170 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3172 memcpy(&val32, val, 4);
3173 val32 = cpu_to_be32(val32);
3175 /* Write the data. */
3176 REG_WR(bp, BNX2_NVM_WRITE, val32);
3178 /* Address of the NVRAM to write to. */
3179 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3181 /* Issue the write command. */
3182 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3184 /* Wait for completion. */
3185 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3188 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3191 if (j >= NVRAM_TIMEOUT_COUNT)
3198 bnx2_init_nvram(struct bnx2 *bp)
3201 int j, entry_count, rc;
3202 struct flash_spec *flash;
3204 /* Determine the selected interface. */
3205 val = REG_RD(bp, BNX2_NVM_CFG1);
3207 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
3210 if (val & 0x40000000) {
3212 /* Flash interface has been reconfigured */
3213 for (j = 0, flash = &flash_table[0]; j < entry_count;
3215 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3216 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3217 bp->flash_info = flash;
3224 /* Not yet been reconfigured */
3226 if (val & (1 << 23))
3227 mask = FLASH_BACKUP_STRAP_MASK;
3229 mask = FLASH_STRAP_MASK;
3231 for (j = 0, flash = &flash_table[0]; j < entry_count;
3234 if ((val & mask) == (flash->strapping & mask)) {
3235 bp->flash_info = flash;
3237 /* Request access to the flash interface. */
3238 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3241 /* Enable access to flash interface */
3242 bnx2_enable_nvram_access(bp);
3244 /* Reconfigure the flash interface */
3245 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3246 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3247 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3248 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3250 /* Disable access to flash interface */
3251 bnx2_disable_nvram_access(bp);
3252 bnx2_release_nvram_lock(bp);
3257 } /* if (val & 0x40000000) */
3259 if (j == entry_count) {
3260 bp->flash_info = NULL;
3261 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3265 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3266 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3268 bp->flash_size = val;
3270 bp->flash_size = bp->flash_info->total_size;
3276 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3280 u32 cmd_flags, offset32, len32, extra;
3285 /* Request access to the flash interface. */
3286 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3289 /* Enable access to flash interface */
3290 bnx2_enable_nvram_access(bp);
3303 pre_len = 4 - (offset & 3);
3305 if (pre_len >= len32) {
3307 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3308 BNX2_NVM_COMMAND_LAST;
3311 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3314 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3319 memcpy(ret_buf, buf + (offset & 3), pre_len);
3326 extra = 4 - (len32 & 3);
3327 len32 = (len32 + 4) & ~3;
3334 cmd_flags = BNX2_NVM_COMMAND_LAST;
3336 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3337 BNX2_NVM_COMMAND_LAST;
3339 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3341 memcpy(ret_buf, buf, 4 - extra);
3343 else if (len32 > 0) {
3346 /* Read the first word. */
3350 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3352 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3354 /* Advance to the next dword. */
3359 while (len32 > 4 && rc == 0) {
3360 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3362 /* Advance to the next dword. */
3371 cmd_flags = BNX2_NVM_COMMAND_LAST;
3372 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3374 memcpy(ret_buf, buf, 4 - extra);
3377 /* Disable access to flash interface */
3378 bnx2_disable_nvram_access(bp);
3380 bnx2_release_nvram_lock(bp);
3386 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3389 u32 written, offset32, len32;
3390 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3392 int align_start, align_end;
3397 align_start = align_end = 0;
3399 if ((align_start = (offset32 & 3))) {
3401 len32 += align_start;
3404 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3409 align_end = 4 - (len32 & 3);
3411 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3415 if (align_start || align_end) {
3416 align_buf = kmalloc(len32, GFP_KERNEL);
3417 if (align_buf == NULL)
3420 memcpy(align_buf, start, 4);
3423 memcpy(align_buf + len32 - 4, end, 4);
3425 memcpy(align_buf + align_start, data_buf, buf_size);
3429 if (bp->flash_info->buffered == 0) {
3430 flash_buffer = kmalloc(264, GFP_KERNEL);
3431 if (flash_buffer == NULL) {
3433 goto nvram_write_end;
3438 while ((written < len32) && (rc == 0)) {
3439 u32 page_start, page_end, data_start, data_end;
3440 u32 addr, cmd_flags;
3443 /* Find the page_start addr */
3444 page_start = offset32 + written;
3445 page_start -= (page_start % bp->flash_info->page_size);
3446 /* Find the page_end addr */
3447 page_end = page_start + bp->flash_info->page_size;
3448 /* Find the data_start addr */
3449 data_start = (written == 0) ? offset32 : page_start;
3450 /* Find the data_end addr */
3451 data_end = (page_end > offset32 + len32) ?
3452 (offset32 + len32) : page_end;
3454 /* Request access to the flash interface. */
3455 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3456 goto nvram_write_end;
3458 /* Enable access to flash interface */
3459 bnx2_enable_nvram_access(bp);
3461 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3462 if (bp->flash_info->buffered == 0) {
3465 /* Read the whole page into the buffer
3466 * (non-buffer flash only) */
3467 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3468 if (j == (bp->flash_info->page_size - 4)) {
3469 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3471 rc = bnx2_nvram_read_dword(bp,
3477 goto nvram_write_end;
3483 /* Enable writes to flash interface (unlock write-protect) */
3484 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3485 goto nvram_write_end;
3487 /* Loop to write back the buffer data from page_start to
3490 if (bp->flash_info->buffered == 0) {
3491 /* Erase the page */
3492 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3493 goto nvram_write_end;
3495 /* Re-enable the write again for the actual write */
3496 bnx2_enable_nvram_write(bp);
3498 for (addr = page_start; addr < data_start;
3499 addr += 4, i += 4) {
3501 rc = bnx2_nvram_write_dword(bp, addr,
3502 &flash_buffer[i], cmd_flags);
3505 goto nvram_write_end;
3511 /* Loop to write the new data from data_start to data_end */
3512 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3513 if ((addr == page_end - 4) ||
3514 ((bp->flash_info->buffered) &&
3515 (addr == data_end - 4))) {
3517 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3519 rc = bnx2_nvram_write_dword(bp, addr, buf,
3523 goto nvram_write_end;
3529 /* Loop to write back the buffer data from data_end
3531 if (bp->flash_info->buffered == 0) {
3532 for (addr = data_end; addr < page_end;
3533 addr += 4, i += 4) {
3535 if (addr == page_end-4) {
3536 cmd_flags = BNX2_NVM_COMMAND_LAST;
3538 rc = bnx2_nvram_write_dword(bp, addr,
3539 &flash_buffer[i], cmd_flags);
3542 goto nvram_write_end;
3548 /* Disable writes to flash interface (lock write-protect) */
3549 bnx2_disable_nvram_write(bp);
3551 /* Disable access to flash interface */
3552 bnx2_disable_nvram_access(bp);
3553 bnx2_release_nvram_lock(bp);
3555 /* Increment written */
3556 written += data_end - data_start;
3560 kfree(flash_buffer);
3566 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3571 /* Wait for the current PCI transaction to complete before
3572 * issuing a reset. */
3573 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3574 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3575 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3576 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3577 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3578 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3581 /* Wait for the firmware to tell us it is ok to issue a reset. */
3582 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3584 /* Deposit a driver reset signature so the firmware knows that
3585 * this is a soft reset. */
3586 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3587 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3589 /* Do a dummy read to force the chip to complete all current transaction
3590 * before we issue a reset. */
3591 val = REG_RD(bp, BNX2_MISC_ID);
3593 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3594 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3595 REG_RD(bp, BNX2_MISC_COMMAND);
3598 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3599 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3601 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3604 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3605 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3606 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3609 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3611 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3612 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3613 current->state = TASK_UNINTERRUPTIBLE;
3614 schedule_timeout(HZ / 50);
3617 /* Reset takes approximate 30 usec */
3618 for (i = 0; i < 10; i++) {
3619 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3620 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3621 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3626 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3627 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3628 printk(KERN_ERR PFX "Chip reset did not complete\n");
3633 /* Make sure byte swapping is properly configured. */
3634 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3635 if (val != 0x01020304) {
3636 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3640 /* Wait for the firmware to finish its initialization. */
3641 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3645 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3646 /* Adjust the voltage regular to two steps lower. The default
3647 * of this register is 0x0000000e. */
3648 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3650 /* Remove bad rbuf memory from the free pool. */
3651 rc = bnx2_alloc_bad_rbuf(bp);
3658 bnx2_init_chip(struct bnx2 *bp)
3663 /* Make sure the interrupt is not active. */
3664 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3666 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3667 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3669 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3671 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3672 DMA_READ_CHANS << 12 |
3673 DMA_WRITE_CHANS << 16;
3675 val |= (0x2 << 20) | (1 << 11);
3677 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3680 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3681 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3682 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3684 REG_WR(bp, BNX2_DMA_CONFIG, val);
3686 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3687 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3688 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3689 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3692 if (bp->flags & PCIX_FLAG) {
3695 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3697 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3698 val16 & ~PCI_X_CMD_ERO);
3701 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3702 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3703 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3704 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3706 /* Initialize context mapping and zero out the quick contexts. The
3707 * context block must have already been enabled. */
3708 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3709 rc = bnx2_init_5709_context(bp);
3713 bnx2_init_context(bp);
3715 if ((rc = bnx2_init_cpus(bp)) != 0)
3718 bnx2_init_nvram(bp);
3720 bnx2_set_mac_addr(bp);
3722 val = REG_RD(bp, BNX2_MQ_CONFIG);
3723 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3724 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3725 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
3726 val |= BNX2_MQ_CONFIG_HALT_DIS;
3728 REG_WR(bp, BNX2_MQ_CONFIG, val);
3730 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3731 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3732 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3734 val = (BCM_PAGE_BITS - 8) << 24;
3735 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3737 /* Configure page size. */
3738 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3739 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3740 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3741 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3743 val = bp->mac_addr[0] +
3744 (bp->mac_addr[1] << 8) +
3745 (bp->mac_addr[2] << 16) +
3747 (bp->mac_addr[4] << 8) +
3748 (bp->mac_addr[5] << 16);
3749 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3751 /* Program the MTU. Also include 4 bytes for CRC32. */
3752 val = bp->dev->mtu + ETH_HLEN + 4;
3753 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3754 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3755 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3757 bp->last_status_idx = 0;
3758 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3760 /* Set up how to generate a link change interrupt. */
3761 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3763 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3764 (u64) bp->status_blk_mapping & 0xffffffff);
3765 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3767 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3768 (u64) bp->stats_blk_mapping & 0xffffffff);
3769 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3770 (u64) bp->stats_blk_mapping >> 32);
3772 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3773 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3775 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3776 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3778 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3779 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3781 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3783 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3785 REG_WR(bp, BNX2_HC_COM_TICKS,
3786 (bp->com_ticks_int << 16) | bp->com_ticks);
3788 REG_WR(bp, BNX2_HC_CMD_TICKS,
3789 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3791 if (CHIP_NUM(bp) == CHIP_NUM_5708)
3792 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
3794 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3795 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3797 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3798 val = BNX2_HC_CONFIG_COLLECT_STATS;
3800 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
3801 BNX2_HC_CONFIG_COLLECT_STATS;
3804 if (bp->flags & ONE_SHOT_MSI_FLAG)
3805 val |= BNX2_HC_CONFIG_ONE_SHOT;
3807 REG_WR(bp, BNX2_HC_CONFIG, val);
3809 /* Clear internal stats counters. */
3810 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3812 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
3814 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3815 BNX2_PORT_FEATURE_ASF_ENABLED)
3816 bp->flags |= ASF_ENABLE_FLAG;
3818 /* Initialize the receive filter. */
3819 bnx2_set_rx_mode(bp->dev);
3821 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3822 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
3823 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
3824 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
3826 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3829 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3830 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3834 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3840 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3842 u32 val, offset0, offset1, offset2, offset3;
3844 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3845 offset0 = BNX2_L2CTX_TYPE_XI;
3846 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3847 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3848 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3850 offset0 = BNX2_L2CTX_TYPE;
3851 offset1 = BNX2_L2CTX_CMD_TYPE;
3852 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3853 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3855 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3856 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3858 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3859 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3861 val = (u64) bp->tx_desc_mapping >> 32;
3862 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3864 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3865 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3869 bnx2_init_tx_ring(struct bnx2 *bp)
3874 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3876 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3878 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3879 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3884 bp->tx_prod_bseq = 0;
3887 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3888 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
3890 bnx2_init_tx_context(bp, cid);
3894 bnx2_init_rx_ring(struct bnx2 *bp)
3898 u16 prod, ring_prod;
3901 /* 8 for CRC and VLAN */
3902 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3904 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
3906 ring_prod = prod = bp->rx_prod = 0;
3909 bp->rx_prod_bseq = 0;
3911 for (i = 0; i < bp->rx_max_ring; i++) {
3914 rxbd = &bp->rx_desc_ring[i][0];
3915 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3916 rxbd->rx_bd_len = bp->rx_buf_use_size;
3917 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3919 if (i == (bp->rx_max_ring - 1))
3923 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3924 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3928 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3929 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3931 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3933 val = (u64) bp->rx_desc_mapping[0] >> 32;
3934 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3936 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3937 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3939 for (i = 0; i < bp->rx_ring_size; i++) {
3940 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3943 prod = NEXT_RX_BD(prod);
3944 ring_prod = RX_RING_IDX(prod);
3948 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3950 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3954 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3958 bp->rx_ring_size = size;
3960 while (size > MAX_RX_DESC_CNT) {
3961 size -= MAX_RX_DESC_CNT;
3964 /* round to next power of 2 */
3966 while ((max & num_rings) == 0)
3969 if (num_rings != max)
3972 bp->rx_max_ring = max;
3973 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3977 bnx2_free_tx_skbs(struct bnx2 *bp)
3981 if (bp->tx_buf_ring == NULL)
3984 for (i = 0; i < TX_DESC_CNT; ) {
3985 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3986 struct sk_buff *skb = tx_buf->skb;
3994 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3995 skb_headlen(skb), PCI_DMA_TODEVICE);
3999 last = skb_shinfo(skb)->nr_frags;
4000 for (j = 0; j < last; j++) {
4001 tx_buf = &bp->tx_buf_ring[i + j + 1];
4002 pci_unmap_page(bp->pdev,
4003 pci_unmap_addr(tx_buf, mapping),
4004 skb_shinfo(skb)->frags[j].size,
4014 bnx2_free_rx_skbs(struct bnx2 *bp)
4018 if (bp->rx_buf_ring == NULL)
4021 for (i = 0; i < bp->rx_max_ring_idx; i++) {
4022 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4023 struct sk_buff *skb = rx_buf->skb;
4028 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4029 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4038 bnx2_free_skbs(struct bnx2 *bp)
4040 bnx2_free_tx_skbs(bp);
4041 bnx2_free_rx_skbs(bp);
4045 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4049 rc = bnx2_reset_chip(bp, reset_code);
4054 if ((rc = bnx2_init_chip(bp)) != 0)
4057 bnx2_init_tx_ring(bp);
4058 bnx2_init_rx_ring(bp);
4063 bnx2_init_nic(struct bnx2 *bp)
4067 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4070 spin_lock_bh(&bp->phy_lock);
4072 spin_unlock_bh(&bp->phy_lock);
4078 bnx2_test_registers(struct bnx2 *bp)
4082 static const struct {
4085 #define BNX2_FL_NOT_5709 1
4089 { 0x006c, 0, 0x00000000, 0x0000003f },
4090 { 0x0090, 0, 0xffffffff, 0x00000000 },
4091 { 0x0094, 0, 0x00000000, 0x00000000 },
4093 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4094 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4095 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4096 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4097 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4098 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4099 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4100 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4101 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4103 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4104 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4105 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4106 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4107 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4108 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4110 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4111 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4112 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
4114 { 0x1000, 0, 0x00000000, 0x00000001 },
4115 { 0x1004, 0, 0x00000000, 0x000f0001 },
4117 { 0x1408, 0, 0x01c00800, 0x00000000 },
4118 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4119 { 0x14a8, 0, 0x00000000, 0x000001ff },
4120 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4121 { 0x14b0, 0, 0x00000002, 0x00000001 },
4122 { 0x14b8, 0, 0x00000000, 0x00000000 },
4123 { 0x14c0, 0, 0x00000000, 0x00000009 },
4124 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4125 { 0x14cc, 0, 0x00000000, 0x00000001 },
4126 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4128 { 0x1800, 0, 0x00000000, 0x00000001 },
4129 { 0x1804, 0, 0x00000000, 0x00000003 },
4131 { 0x2800, 0, 0x00000000, 0x00000001 },
4132 { 0x2804, 0, 0x00000000, 0x00003f01 },
4133 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4134 { 0x2810, 0, 0xffff0000, 0x00000000 },
4135 { 0x2814, 0, 0xffff0000, 0x00000000 },
4136 { 0x2818, 0, 0xffff0000, 0x00000000 },
4137 { 0x281c, 0, 0xffff0000, 0x00000000 },
4138 { 0x2834, 0, 0xffffffff, 0x00000000 },
4139 { 0x2840, 0, 0x00000000, 0xffffffff },
4140 { 0x2844, 0, 0x00000000, 0xffffffff },
4141 { 0x2848, 0, 0xffffffff, 0x00000000 },
4142 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4144 { 0x2c00, 0, 0x00000000, 0x00000011 },
4145 { 0x2c04, 0, 0x00000000, 0x00030007 },
4147 { 0x3c00, 0, 0x00000000, 0x00000001 },
4148 { 0x3c04, 0, 0x00000000, 0x00070000 },
4149 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4150 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4151 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4152 { 0x3c14, 0, 0x00000000, 0xffffffff },
4153 { 0x3c18, 0, 0x00000000, 0xffffffff },
4154 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4155 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4157 { 0x5004, 0, 0x00000000, 0x0000007f },
4158 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4160 { 0x5c00, 0, 0x00000000, 0x00000001 },
4161 { 0x5c04, 0, 0x00000000, 0x0003000f },
4162 { 0x5c08, 0, 0x00000003, 0x00000000 },
4163 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4164 { 0x5c10, 0, 0x00000000, 0xffffffff },
4165 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4166 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4167 { 0x5c88, 0, 0x00000000, 0x00077373 },
4168 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4170 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4171 { 0x680c, 0, 0xffffffff, 0x00000000 },
4172 { 0x6810, 0, 0xffffffff, 0x00000000 },
4173 { 0x6814, 0, 0xffffffff, 0x00000000 },
4174 { 0x6818, 0, 0xffffffff, 0x00000000 },
4175 { 0x681c, 0, 0xffffffff, 0x00000000 },
4176 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4177 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4178 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4179 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4180 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4181 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4182 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4183 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4184 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4185 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4186 { 0x684c, 0, 0xffffffff, 0x00000000 },
4187 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4188 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4189 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4190 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4191 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4192 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4194 { 0xffff, 0, 0x00000000, 0x00000000 },
4199 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4202 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4203 u32 offset, rw_mask, ro_mask, save_val, val;
4204 u16 flags = reg_tbl[i].flags;
4206 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4209 offset = (u32) reg_tbl[i].offset;
4210 rw_mask = reg_tbl[i].rw_mask;
4211 ro_mask = reg_tbl[i].ro_mask;
4213 save_val = readl(bp->regview + offset);
4215 writel(0, bp->regview + offset);
4217 val = readl(bp->regview + offset);
4218 if ((val & rw_mask) != 0) {
4222 if ((val & ro_mask) != (save_val & ro_mask)) {
4226 writel(0xffffffff, bp->regview + offset);
4228 val = readl(bp->regview + offset);
4229 if ((val & rw_mask) != rw_mask) {
4233 if ((val & ro_mask) != (save_val & ro_mask)) {
4237 writel(save_val, bp->regview + offset);
4241 writel(save_val, bp->regview + offset);
4249 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4251 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4252 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4255 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4258 for (offset = 0; offset < size; offset += 4) {
4260 REG_WR_IND(bp, start + offset, test_pattern[i]);
4262 if (REG_RD_IND(bp, start + offset) !=
4272 bnx2_test_memory(struct bnx2 *bp)
4276 static struct mem_entry {
4279 } mem_tbl_5706[] = {
4280 { 0x60000, 0x4000 },
4281 { 0xa0000, 0x3000 },
4282 { 0xe0000, 0x4000 },
4283 { 0x120000, 0x4000 },
4284 { 0x1a0000, 0x4000 },
4285 { 0x160000, 0x4000 },
4289 { 0x60000, 0x4000 },
4290 { 0xa0000, 0x3000 },
4291 { 0xe0000, 0x4000 },
4292 { 0x120000, 0x4000 },
4293 { 0x1a0000, 0x4000 },
4296 struct mem_entry *mem_tbl;
4298 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4299 mem_tbl = mem_tbl_5709;
4301 mem_tbl = mem_tbl_5706;
4303 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4304 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4305 mem_tbl[i].len)) != 0) {
4313 #define BNX2_MAC_LOOPBACK 0
4314 #define BNX2_PHY_LOOPBACK 1
4317 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4319 unsigned int pkt_size, num_pkts, i;
4320 struct sk_buff *skb, *rx_skb;
4321 unsigned char *packet;
4322 u16 rx_start_idx, rx_idx;
4325 struct sw_bd *rx_buf;
4326 struct l2_fhdr *rx_hdr;
4329 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4330 bp->loopback = MAC_LOOPBACK;
4331 bnx2_set_mac_loopback(bp);
4333 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4334 bp->loopback = PHY_LOOPBACK;
4335 bnx2_set_phy_loopback(bp);
4341 skb = netdev_alloc_skb(bp->dev, pkt_size);
4344 packet = skb_put(skb, pkt_size);
4345 memcpy(packet, bp->dev->dev_addr, 6);
4346 memset(packet + 6, 0x0, 8);
4347 for (i = 14; i < pkt_size; i++)
4348 packet[i] = (unsigned char) (i & 0xff);
4350 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4353 REG_WR(bp, BNX2_HC_COMMAND,
4354 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4356 REG_RD(bp, BNX2_HC_COMMAND);
4359 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4363 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4365 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4366 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4367 txbd->tx_bd_mss_nbytes = pkt_size;
4368 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4371 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4372 bp->tx_prod_bseq += pkt_size;
4374 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4375 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4379 REG_WR(bp, BNX2_HC_COMMAND,
4380 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4382 REG_RD(bp, BNX2_HC_COMMAND);
4386 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4389 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4390 goto loopback_test_done;
4393 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4394 if (rx_idx != rx_start_idx + num_pkts) {
4395 goto loopback_test_done;
4398 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4399 rx_skb = rx_buf->skb;
4401 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4402 skb_reserve(rx_skb, bp->rx_offset);
4404 pci_dma_sync_single_for_cpu(bp->pdev,
4405 pci_unmap_addr(rx_buf, mapping),
4406 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4408 if (rx_hdr->l2_fhdr_status &
4409 (L2_FHDR_ERRORS_BAD_CRC |
4410 L2_FHDR_ERRORS_PHY_DECODE |
4411 L2_FHDR_ERRORS_ALIGNMENT |
4412 L2_FHDR_ERRORS_TOO_SHORT |
4413 L2_FHDR_ERRORS_GIANT_FRAME)) {
4415 goto loopback_test_done;
4418 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4419 goto loopback_test_done;
4422 for (i = 14; i < pkt_size; i++) {
4423 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4424 goto loopback_test_done;
4435 #define BNX2_MAC_LOOPBACK_FAILED 1
4436 #define BNX2_PHY_LOOPBACK_FAILED 2
4437 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4438 BNX2_PHY_LOOPBACK_FAILED)
4441 bnx2_test_loopback(struct bnx2 *bp)
4445 if (!netif_running(bp->dev))
4446 return BNX2_LOOPBACK_FAILED;
4448 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4449 spin_lock_bh(&bp->phy_lock);
4451 spin_unlock_bh(&bp->phy_lock);
4452 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4453 rc |= BNX2_MAC_LOOPBACK_FAILED;
4454 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4455 rc |= BNX2_PHY_LOOPBACK_FAILED;
4459 #define NVRAM_SIZE 0x200
4460 #define CRC32_RESIDUAL 0xdebb20e3
4463 bnx2_test_nvram(struct bnx2 *bp)
4465 u32 buf[NVRAM_SIZE / 4];
4466 u8 *data = (u8 *) buf;
4470 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4471 goto test_nvram_done;
4473 magic = be32_to_cpu(buf[0]);
4474 if (magic != 0x669955aa) {
4476 goto test_nvram_done;
4479 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4480 goto test_nvram_done;
4482 csum = ether_crc_le(0x100, data);
4483 if (csum != CRC32_RESIDUAL) {
4485 goto test_nvram_done;
4488 csum = ether_crc_le(0x100, data + 0x100);
4489 if (csum != CRC32_RESIDUAL) {
4498 bnx2_test_link(struct bnx2 *bp)
4502 spin_lock_bh(&bp->phy_lock);
4503 bnx2_enable_bmsr1(bp);
4504 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4505 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4506 bnx2_disable_bmsr1(bp);
4507 spin_unlock_bh(&bp->phy_lock);
4509 if (bmsr & BMSR_LSTATUS) {
4516 bnx2_test_intr(struct bnx2 *bp)
4521 if (!netif_running(bp->dev))
4524 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4526 /* This register is not touched during run-time. */
4527 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4528 REG_RD(bp, BNX2_HC_COMMAND);
4530 for (i = 0; i < 10; i++) {
4531 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4537 msleep_interruptible(10);
4546 bnx2_5706_serdes_timer(struct bnx2 *bp)
4548 spin_lock(&bp->phy_lock);
4549 if (bp->serdes_an_pending)
4550 bp->serdes_an_pending--;
4551 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4554 bp->current_interval = bp->timer_interval;
4556 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4558 if (bmcr & BMCR_ANENABLE) {
4561 bnx2_write_phy(bp, 0x1c, 0x7c00);
4562 bnx2_read_phy(bp, 0x1c, &phy1);
4564 bnx2_write_phy(bp, 0x17, 0x0f01);
4565 bnx2_read_phy(bp, 0x15, &phy2);
4566 bnx2_write_phy(bp, 0x17, 0x0f01);
4567 bnx2_read_phy(bp, 0x15, &phy2);
4569 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4570 !(phy2 & 0x20)) { /* no CONFIG */
4572 bmcr &= ~BMCR_ANENABLE;
4573 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4574 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4575 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4579 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4580 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4583 bnx2_write_phy(bp, 0x17, 0x0f01);
4584 bnx2_read_phy(bp, 0x15, &phy2);
4588 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4589 bmcr |= BMCR_ANENABLE;
4590 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4592 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4595 bp->current_interval = bp->timer_interval;
4597 spin_unlock(&bp->phy_lock);
4601 bnx2_5708_serdes_timer(struct bnx2 *bp)
4603 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4604 bp->serdes_an_pending = 0;
4608 spin_lock(&bp->phy_lock);
4609 if (bp->serdes_an_pending)
4610 bp->serdes_an_pending--;
4611 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4614 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4615 if (bmcr & BMCR_ANENABLE) {
4616 bnx2_enable_forced_2g5(bp);
4617 bp->current_interval = SERDES_FORCED_TIMEOUT;
4619 bnx2_disable_forced_2g5(bp);
4620 bp->serdes_an_pending = 2;
4621 bp->current_interval = bp->timer_interval;
4625 bp->current_interval = bp->timer_interval;
4627 spin_unlock(&bp->phy_lock);
4631 bnx2_timer(unsigned long data)
4633 struct bnx2 *bp = (struct bnx2 *) data;
4636 if (!netif_running(bp->dev))
4639 if (atomic_read(&bp->intr_sem) != 0)
4640 goto bnx2_restart_timer;
4642 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4643 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4645 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4647 /* workaround occasional corrupted counters */
4648 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
4649 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
4650 BNX2_HC_COMMAND_STATS_NOW);
4652 if (bp->phy_flags & PHY_SERDES_FLAG) {
4653 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4654 bnx2_5706_serdes_timer(bp);
4656 bnx2_5708_serdes_timer(bp);
4660 mod_timer(&bp->timer, jiffies + bp->current_interval);
4664 bnx2_request_irq(struct bnx2 *bp)
4666 struct net_device *dev = bp->dev;
4669 if (bp->flags & USING_MSI_FLAG) {
4670 irq_handler_t fn = bnx2_msi;
4672 if (bp->flags & ONE_SHOT_MSI_FLAG)
4673 fn = bnx2_msi_1shot;
4675 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
4677 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4678 IRQF_SHARED, dev->name, dev);
4683 bnx2_free_irq(struct bnx2 *bp)
4685 struct net_device *dev = bp->dev;
4687 if (bp->flags & USING_MSI_FLAG) {
4688 free_irq(bp->pdev->irq, dev);
4689 pci_disable_msi(bp->pdev);
4690 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
4692 free_irq(bp->pdev->irq, dev);
4695 /* Called with rtnl_lock */
4697 bnx2_open(struct net_device *dev)
4699 struct bnx2 *bp = netdev_priv(dev);
4702 netif_carrier_off(dev);
4704 bnx2_set_power_state(bp, PCI_D0);
4705 bnx2_disable_int(bp);
4707 rc = bnx2_alloc_mem(bp);
4711 if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
4712 if (pci_enable_msi(bp->pdev) == 0) {
4713 bp->flags |= USING_MSI_FLAG;
4714 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4715 bp->flags |= ONE_SHOT_MSI_FLAG;
4718 rc = bnx2_request_irq(bp);
4725 rc = bnx2_init_nic(bp);
4734 mod_timer(&bp->timer, jiffies + bp->current_interval);
4736 atomic_set(&bp->intr_sem, 0);
4738 bnx2_enable_int(bp);
4740 if (bp->flags & USING_MSI_FLAG) {
4741 /* Test MSI to make sure it is working
4742 * If MSI test fails, go back to INTx mode
4744 if (bnx2_test_intr(bp) != 0) {
4745 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4746 " using MSI, switching to INTx mode. Please"
4747 " report this failure to the PCI maintainer"
4748 " and include system chipset information.\n",
4751 bnx2_disable_int(bp);
4754 rc = bnx2_init_nic(bp);
4757 rc = bnx2_request_irq(bp);
4762 del_timer_sync(&bp->timer);
4765 bnx2_enable_int(bp);
4768 if (bp->flags & USING_MSI_FLAG) {
4769 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4772 netif_start_queue(dev);
4778 bnx2_reset_task(struct work_struct *work)
4780 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
4782 if (!netif_running(bp->dev))
4785 bp->in_reset_task = 1;
4786 bnx2_netif_stop(bp);
4790 atomic_set(&bp->intr_sem, 1);
4791 bnx2_netif_start(bp);
4792 bp->in_reset_task = 0;
4796 bnx2_tx_timeout(struct net_device *dev)
4798 struct bnx2 *bp = netdev_priv(dev);
4800 /* This allows the netif to be shutdown gracefully before resetting */
4801 schedule_work(&bp->reset_task);
4805 /* Called with rtnl_lock */
4807 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4809 struct bnx2 *bp = netdev_priv(dev);
4811 bnx2_netif_stop(bp);
4814 bnx2_set_rx_mode(dev);
4816 bnx2_netif_start(bp);
4820 /* Called with netif_tx_lock.
4821 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4822 * netif_wake_queue().
4825 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4827 struct bnx2 *bp = netdev_priv(dev);
4830 struct sw_bd *tx_buf;
4831 u32 len, vlan_tag_flags, last_frag, mss;
4832 u16 prod, ring_prod;
4835 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4836 netif_stop_queue(dev);
4837 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4840 return NETDEV_TX_BUSY;
4842 len = skb_headlen(skb);
4844 ring_prod = TX_RING_IDX(prod);
4847 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4848 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4851 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4853 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4855 if ((mss = skb_shinfo(skb)->gso_size)) {
4856 u32 tcp_opt_len, ip_tcp_len;
4859 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4861 tcp_opt_len = tcp_optlen(skb);
4863 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
4864 u32 tcp_off = skb_transport_offset(skb) -
4865 sizeof(struct ipv6hdr) - ETH_HLEN;
4867 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
4868 TX_BD_FLAGS_SW_FLAGS;
4869 if (likely(tcp_off == 0))
4870 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
4873 vlan_tag_flags |= ((tcp_off & 0x3) <<
4874 TX_BD_FLAGS_TCP6_OFF0_SHL) |
4875 ((tcp_off & 0x10) <<
4876 TX_BD_FLAGS_TCP6_OFF4_SHL);
4877 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
4880 if (skb_header_cloned(skb) &&
4881 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4883 return NETDEV_TX_OK;
4886 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4890 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4891 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4895 if (tcp_opt_len || (iph->ihl > 5)) {
4896 vlan_tag_flags |= ((iph->ihl - 5) +
4897 (tcp_opt_len >> 2)) << 8;
4903 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4905 tx_buf = &bp->tx_buf_ring[ring_prod];
4907 pci_unmap_addr_set(tx_buf, mapping, mapping);
4909 txbd = &bp->tx_desc_ring[ring_prod];
4911 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4912 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4913 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4914 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4916 last_frag = skb_shinfo(skb)->nr_frags;
4918 for (i = 0; i < last_frag; i++) {
4919 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4921 prod = NEXT_TX_BD(prod);
4922 ring_prod = TX_RING_IDX(prod);
4923 txbd = &bp->tx_desc_ring[ring_prod];
4926 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4927 len, PCI_DMA_TODEVICE);
4928 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4931 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4932 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4933 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4934 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4937 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4939 prod = NEXT_TX_BD(prod);
4940 bp->tx_prod_bseq += skb->len;
4942 REG_WR16(bp, bp->tx_bidx_addr, prod);
4943 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4948 dev->trans_start = jiffies;
4950 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4951 netif_stop_queue(dev);
4952 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
4953 netif_wake_queue(dev);
4956 return NETDEV_TX_OK;
4959 /* Called with rtnl_lock */
4961 bnx2_close(struct net_device *dev)
4963 struct bnx2 *bp = netdev_priv(dev);
4966 /* Calling flush_scheduled_work() may deadlock because
4967 * linkwatch_event() may be on the workqueue and it will try to get
4968 * the rtnl_lock which we are holding.
4970 while (bp->in_reset_task)
4973 bnx2_netif_stop(bp);
4974 del_timer_sync(&bp->timer);
4975 if (bp->flags & NO_WOL_FLAG)
4976 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
4978 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4980 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4981 bnx2_reset_chip(bp, reset_code);
4986 netif_carrier_off(bp->dev);
4987 bnx2_set_power_state(bp, PCI_D3hot);
4991 #define GET_NET_STATS64(ctr) \
4992 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4993 (unsigned long) (ctr##_lo)
4995 #define GET_NET_STATS32(ctr) \
4998 #if (BITS_PER_LONG == 64)
4999 #define GET_NET_STATS GET_NET_STATS64
5001 #define GET_NET_STATS GET_NET_STATS32
5004 static struct net_device_stats *
5005 bnx2_get_stats(struct net_device *dev)
5007 struct bnx2 *bp = netdev_priv(dev);
5008 struct statistics_block *stats_blk = bp->stats_blk;
5009 struct net_device_stats *net_stats = &bp->net_stats;
5011 if (bp->stats_blk == NULL) {
5014 net_stats->rx_packets =
5015 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5016 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5017 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5019 net_stats->tx_packets =
5020 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5021 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5022 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5024 net_stats->rx_bytes =
5025 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5027 net_stats->tx_bytes =
5028 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5030 net_stats->multicast =
5031 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5033 net_stats->collisions =
5034 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5036 net_stats->rx_length_errors =
5037 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5038 stats_blk->stat_EtherStatsOverrsizePkts);
5040 net_stats->rx_over_errors =
5041 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5043 net_stats->rx_frame_errors =
5044 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5046 net_stats->rx_crc_errors =
5047 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5049 net_stats->rx_errors = net_stats->rx_length_errors +
5050 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5051 net_stats->rx_crc_errors;
5053 net_stats->tx_aborted_errors =
5054 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5055 stats_blk->stat_Dot3StatsLateCollisions);
5057 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5058 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5059 net_stats->tx_carrier_errors = 0;
5061 net_stats->tx_carrier_errors =
5063 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5066 net_stats->tx_errors =
5068 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5070 net_stats->tx_aborted_errors +
5071 net_stats->tx_carrier_errors;
5073 net_stats->rx_missed_errors =
5074 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5075 stats_blk->stat_FwRxDrop);
5080 /* All ethtool functions called with rtnl_lock */
5083 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5085 struct bnx2 *bp = netdev_priv(dev);
5087 cmd->supported = SUPPORTED_Autoneg;
5088 if (bp->phy_flags & PHY_SERDES_FLAG) {
5089 cmd->supported |= SUPPORTED_1000baseT_Full |
5091 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5092 cmd->supported |= SUPPORTED_2500baseX_Full;
5094 cmd->port = PORT_FIBRE;
5097 cmd->supported |= SUPPORTED_10baseT_Half |
5098 SUPPORTED_10baseT_Full |
5099 SUPPORTED_100baseT_Half |
5100 SUPPORTED_100baseT_Full |
5101 SUPPORTED_1000baseT_Full |
5104 cmd->port = PORT_TP;
5107 cmd->advertising = bp->advertising;
5109 if (bp->autoneg & AUTONEG_SPEED) {
5110 cmd->autoneg = AUTONEG_ENABLE;
5113 cmd->autoneg = AUTONEG_DISABLE;
5116 if (netif_carrier_ok(dev)) {
5117 cmd->speed = bp->line_speed;
5118 cmd->duplex = bp->duplex;
5125 cmd->transceiver = XCVR_INTERNAL;
5126 cmd->phy_address = bp->phy_addr;
5132 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5134 struct bnx2 *bp = netdev_priv(dev);
5135 u8 autoneg = bp->autoneg;
5136 u8 req_duplex = bp->req_duplex;
5137 u16 req_line_speed = bp->req_line_speed;
5138 u32 advertising = bp->advertising;
5140 if (cmd->autoneg == AUTONEG_ENABLE) {
5141 autoneg |= AUTONEG_SPEED;
5143 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5145 /* allow advertising 1 speed */
5146 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5147 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5148 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5149 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5151 if (bp->phy_flags & PHY_SERDES_FLAG)
5154 advertising = cmd->advertising;
5156 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5157 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5159 } else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
5160 advertising = cmd->advertising;
5162 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
5166 if (bp->phy_flags & PHY_SERDES_FLAG) {
5167 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5170 advertising = ETHTOOL_ALL_COPPER_SPEED;
5173 advertising |= ADVERTISED_Autoneg;
5176 if (bp->phy_flags & PHY_SERDES_FLAG) {
5177 if ((cmd->speed != SPEED_1000 &&
5178 cmd->speed != SPEED_2500) ||
5179 (cmd->duplex != DUPLEX_FULL))
5182 if (cmd->speed == SPEED_2500 &&
5183 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5186 else if (cmd->speed == SPEED_1000) {
5189 autoneg &= ~AUTONEG_SPEED;
5190 req_line_speed = cmd->speed;
5191 req_duplex = cmd->duplex;
5195 bp->autoneg = autoneg;
5196 bp->advertising = advertising;
5197 bp->req_line_speed = req_line_speed;
5198 bp->req_duplex = req_duplex;
5200 spin_lock_bh(&bp->phy_lock);
5204 spin_unlock_bh(&bp->phy_lock);
5210 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5212 struct bnx2 *bp = netdev_priv(dev);
5214 strcpy(info->driver, DRV_MODULE_NAME);
5215 strcpy(info->version, DRV_MODULE_VERSION);
5216 strcpy(info->bus_info, pci_name(bp->pdev));
5217 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
5218 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
5219 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
5220 info->fw_version[1] = info->fw_version[3] = '.';
5221 info->fw_version[5] = 0;
5224 #define BNX2_REGDUMP_LEN (32 * 1024)
5227 bnx2_get_regs_len(struct net_device *dev)
5229 return BNX2_REGDUMP_LEN;
5233 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5235 u32 *p = _p, i, offset;
5237 struct bnx2 *bp = netdev_priv(dev);
5238 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5239 0x0800, 0x0880, 0x0c00, 0x0c10,
5240 0x0c30, 0x0d08, 0x1000, 0x101c,
5241 0x1040, 0x1048, 0x1080, 0x10a4,
5242 0x1400, 0x1490, 0x1498, 0x14f0,
5243 0x1500, 0x155c, 0x1580, 0x15dc,
5244 0x1600, 0x1658, 0x1680, 0x16d8,
5245 0x1800, 0x1820, 0x1840, 0x1854,
5246 0x1880, 0x1894, 0x1900, 0x1984,
5247 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5248 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5249 0x2000, 0x2030, 0x23c0, 0x2400,
5250 0x2800, 0x2820, 0x2830, 0x2850,
5251 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5252 0x3c00, 0x3c94, 0x4000, 0x4010,
5253 0x4080, 0x4090, 0x43c0, 0x4458,
5254 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5255 0x4fc0, 0x5010, 0x53c0, 0x5444,
5256 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5257 0x5fc0, 0x6000, 0x6400, 0x6428,
5258 0x6800, 0x6848, 0x684c, 0x6860,
5259 0x6888, 0x6910, 0x8000 };
5263 memset(p, 0, BNX2_REGDUMP_LEN);
5265 if (!netif_running(bp->dev))
5269 offset = reg_boundaries[0];
5271 while (offset < BNX2_REGDUMP_LEN) {
5272 *p++ = REG_RD(bp, offset);
5274 if (offset == reg_boundaries[i + 1]) {
5275 offset = reg_boundaries[i + 2];
5276 p = (u32 *) (orig_p + offset);
5283 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5285 struct bnx2 *bp = netdev_priv(dev);
5287 if (bp->flags & NO_WOL_FLAG) {
5292 wol->supported = WAKE_MAGIC;
5294 wol->wolopts = WAKE_MAGIC;
5298 memset(&wol->sopass, 0, sizeof(wol->sopass));
5302 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5304 struct bnx2 *bp = netdev_priv(dev);
5306 if (wol->wolopts & ~WAKE_MAGIC)
5309 if (wol->wolopts & WAKE_MAGIC) {
5310 if (bp->flags & NO_WOL_FLAG)
5322 bnx2_nway_reset(struct net_device *dev)
5324 struct bnx2 *bp = netdev_priv(dev);
5327 if (!(bp->autoneg & AUTONEG_SPEED)) {
5331 spin_lock_bh(&bp->phy_lock);
5333 /* Force a link down visible on the other side */
5334 if (bp->phy_flags & PHY_SERDES_FLAG) {
5335 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5336 spin_unlock_bh(&bp->phy_lock);
5340 spin_lock_bh(&bp->phy_lock);
5342 bp->current_interval = SERDES_AN_TIMEOUT;
5343 bp->serdes_an_pending = 1;
5344 mod_timer(&bp->timer, jiffies + bp->current_interval);
5347 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5348 bmcr &= ~BMCR_LOOPBACK;
5349 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5351 spin_unlock_bh(&bp->phy_lock);
5357 bnx2_get_eeprom_len(struct net_device *dev)
5359 struct bnx2 *bp = netdev_priv(dev);
5361 if (bp->flash_info == NULL)
5364 return (int) bp->flash_size;
5368 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5371 struct bnx2 *bp = netdev_priv(dev);
5374 /* parameters already validated in ethtool_get_eeprom */
5376 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5382 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5385 struct bnx2 *bp = netdev_priv(dev);
5388 /* parameters already validated in ethtool_set_eeprom */
5390 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5396 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5398 struct bnx2 *bp = netdev_priv(dev);
5400 memset(coal, 0, sizeof(struct ethtool_coalesce));
5402 coal->rx_coalesce_usecs = bp->rx_ticks;
5403 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5404 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5405 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5407 coal->tx_coalesce_usecs = bp->tx_ticks;
5408 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5409 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5410 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5412 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5418 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5420 struct bnx2 *bp = netdev_priv(dev);
5422 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5423 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5425 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5426 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5428 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5429 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5431 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5432 if (bp->rx_quick_cons_trip_int > 0xff)
5433 bp->rx_quick_cons_trip_int = 0xff;
5435 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5436 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5438 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5439 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5441 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5442 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5444 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5445 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5448 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5449 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5450 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
5451 bp->stats_ticks = USEC_PER_SEC;
5453 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5454 bp->stats_ticks &= 0xffff00;
5456 if (netif_running(bp->dev)) {
5457 bnx2_netif_stop(bp);
5459 bnx2_netif_start(bp);
5466 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5468 struct bnx2 *bp = netdev_priv(dev);
5470 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5471 ering->rx_mini_max_pending = 0;
5472 ering->rx_jumbo_max_pending = 0;
5474 ering->rx_pending = bp->rx_ring_size;
5475 ering->rx_mini_pending = 0;
5476 ering->rx_jumbo_pending = 0;
5478 ering->tx_max_pending = MAX_TX_DESC_CNT;
5479 ering->tx_pending = bp->tx_ring_size;
5483 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5485 struct bnx2 *bp = netdev_priv(dev);
5487 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5488 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5489 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5493 if (netif_running(bp->dev)) {
5494 bnx2_netif_stop(bp);
5495 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5500 bnx2_set_rx_ring_size(bp, ering->rx_pending);
5501 bp->tx_ring_size = ering->tx_pending;
5503 if (netif_running(bp->dev)) {
5506 rc = bnx2_alloc_mem(bp);
5510 bnx2_netif_start(bp);
5517 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5519 struct bnx2 *bp = netdev_priv(dev);
5521 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5522 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5523 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5527 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5529 struct bnx2 *bp = netdev_priv(dev);
5531 bp->req_flow_ctrl = 0;
5532 if (epause->rx_pause)
5533 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5534 if (epause->tx_pause)
5535 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5537 if (epause->autoneg) {
5538 bp->autoneg |= AUTONEG_FLOW_CTRL;
5541 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5544 spin_lock_bh(&bp->phy_lock);
5548 spin_unlock_bh(&bp->phy_lock);
5554 bnx2_get_rx_csum(struct net_device *dev)
5556 struct bnx2 *bp = netdev_priv(dev);
5562 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5564 struct bnx2 *bp = netdev_priv(dev);
5571 bnx2_set_tso(struct net_device *dev, u32 data)
5573 struct bnx2 *bp = netdev_priv(dev);
5576 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5577 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5578 dev->features |= NETIF_F_TSO6;
5580 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5585 #define BNX2_NUM_STATS 46
5588 char string[ETH_GSTRING_LEN];
5589 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5591 { "rx_error_bytes" },
5593 { "tx_error_bytes" },
5594 { "rx_ucast_packets" },
5595 { "rx_mcast_packets" },
5596 { "rx_bcast_packets" },
5597 { "tx_ucast_packets" },
5598 { "tx_mcast_packets" },
5599 { "tx_bcast_packets" },
5600 { "tx_mac_errors" },
5601 { "tx_carrier_errors" },
5602 { "rx_crc_errors" },
5603 { "rx_align_errors" },
5604 { "tx_single_collisions" },
5605 { "tx_multi_collisions" },
5607 { "tx_excess_collisions" },
5608 { "tx_late_collisions" },
5609 { "tx_total_collisions" },
5612 { "rx_undersize_packets" },
5613 { "rx_oversize_packets" },
5614 { "rx_64_byte_packets" },
5615 { "rx_65_to_127_byte_packets" },
5616 { "rx_128_to_255_byte_packets" },
5617 { "rx_256_to_511_byte_packets" },
5618 { "rx_512_to_1023_byte_packets" },
5619 { "rx_1024_to_1522_byte_packets" },
5620 { "rx_1523_to_9022_byte_packets" },
5621 { "tx_64_byte_packets" },
5622 { "tx_65_to_127_byte_packets" },
5623 { "tx_128_to_255_byte_packets" },
5624 { "tx_256_to_511_byte_packets" },
5625 { "tx_512_to_1023_byte_packets" },
5626 { "tx_1024_to_1522_byte_packets" },
5627 { "tx_1523_to_9022_byte_packets" },
5628 { "rx_xon_frames" },
5629 { "rx_xoff_frames" },
5630 { "tx_xon_frames" },
5631 { "tx_xoff_frames" },
5632 { "rx_mac_ctrl_frames" },
5633 { "rx_filtered_packets" },
5635 { "rx_fw_discards" },
5638 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5640 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5641 STATS_OFFSET32(stat_IfHCInOctets_hi),
5642 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5643 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5644 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5645 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5646 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5647 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5648 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5649 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5650 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5651 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5652 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5653 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5654 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5655 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5656 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5657 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5658 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5659 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5660 STATS_OFFSET32(stat_EtherStatsCollisions),
5661 STATS_OFFSET32(stat_EtherStatsFragments),
5662 STATS_OFFSET32(stat_EtherStatsJabbers),
5663 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5664 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5665 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5666 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5667 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5668 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5669 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5670 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5671 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5672 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5673 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5674 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5675 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5676 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5677 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5678 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5679 STATS_OFFSET32(stat_XonPauseFramesReceived),
5680 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5681 STATS_OFFSET32(stat_OutXonSent),
5682 STATS_OFFSET32(stat_OutXoffSent),
5683 STATS_OFFSET32(stat_MacControlFramesReceived),
5684 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5685 STATS_OFFSET32(stat_IfInMBUFDiscards),
5686 STATS_OFFSET32(stat_FwRxDrop),
5689 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5690 * skipped because of errata.
5692 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5693 8,0,8,8,8,8,8,8,8,8,
5694 4,0,4,4,4,4,4,4,4,4,
5695 4,4,4,4,4,4,4,4,4,4,
5696 4,4,4,4,4,4,4,4,4,4,
5700 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5701 8,0,8,8,8,8,8,8,8,8,
5702 4,4,4,4,4,4,4,4,4,4,
5703 4,4,4,4,4,4,4,4,4,4,
5704 4,4,4,4,4,4,4,4,4,4,
5708 #define BNX2_NUM_TESTS 6
5711 char string[ETH_GSTRING_LEN];
5712 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5713 { "register_test (offline)" },
5714 { "memory_test (offline)" },
5715 { "loopback_test (offline)" },
5716 { "nvram_test (online)" },
5717 { "interrupt_test (online)" },
5718 { "link_test (online)" },
5722 bnx2_self_test_count(struct net_device *dev)
5724 return BNX2_NUM_TESTS;
5728 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5730 struct bnx2 *bp = netdev_priv(dev);
5732 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5733 if (etest->flags & ETH_TEST_FL_OFFLINE) {
5736 bnx2_netif_stop(bp);
5737 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5740 if (bnx2_test_registers(bp) != 0) {
5742 etest->flags |= ETH_TEST_FL_FAILED;
5744 if (bnx2_test_memory(bp) != 0) {
5746 etest->flags |= ETH_TEST_FL_FAILED;
5748 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5749 etest->flags |= ETH_TEST_FL_FAILED;
5751 if (!netif_running(bp->dev)) {
5752 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5756 bnx2_netif_start(bp);
5759 /* wait for link up */
5760 for (i = 0; i < 7; i++) {
5763 msleep_interruptible(1000);
5767 if (bnx2_test_nvram(bp) != 0) {
5769 etest->flags |= ETH_TEST_FL_FAILED;
5771 if (bnx2_test_intr(bp) != 0) {
5773 etest->flags |= ETH_TEST_FL_FAILED;
5776 if (bnx2_test_link(bp) != 0) {
5778 etest->flags |= ETH_TEST_FL_FAILED;
5784 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5786 switch (stringset) {
5788 memcpy(buf, bnx2_stats_str_arr,
5789 sizeof(bnx2_stats_str_arr));
5792 memcpy(buf, bnx2_tests_str_arr,
5793 sizeof(bnx2_tests_str_arr));
5799 bnx2_get_stats_count(struct net_device *dev)
5801 return BNX2_NUM_STATS;
5805 bnx2_get_ethtool_stats(struct net_device *dev,
5806 struct ethtool_stats *stats, u64 *buf)
5808 struct bnx2 *bp = netdev_priv(dev);
5810 u32 *hw_stats = (u32 *) bp->stats_blk;
5811 u8 *stats_len_arr = NULL;
5813 if (hw_stats == NULL) {
5814 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5818 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5819 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5820 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5821 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5822 stats_len_arr = bnx2_5706_stats_len_arr;
5824 stats_len_arr = bnx2_5708_stats_len_arr;
5826 for (i = 0; i < BNX2_NUM_STATS; i++) {
5827 if (stats_len_arr[i] == 0) {
5828 /* skip this counter */
5832 if (stats_len_arr[i] == 4) {
5833 /* 4-byte counter */
5835 *(hw_stats + bnx2_stats_offset_arr[i]);
5838 /* 8-byte counter */
5839 buf[i] = (((u64) *(hw_stats +
5840 bnx2_stats_offset_arr[i])) << 32) +
5841 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5846 bnx2_phys_id(struct net_device *dev, u32 data)
5848 struct bnx2 *bp = netdev_priv(dev);
5855 save = REG_RD(bp, BNX2_MISC_CFG);
5856 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5858 for (i = 0; i < (data * 2); i++) {
5860 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5863 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5864 BNX2_EMAC_LED_1000MB_OVERRIDE |
5865 BNX2_EMAC_LED_100MB_OVERRIDE |
5866 BNX2_EMAC_LED_10MB_OVERRIDE |
5867 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5868 BNX2_EMAC_LED_TRAFFIC);
5870 msleep_interruptible(500);
5871 if (signal_pending(current))
5874 REG_WR(bp, BNX2_EMAC_LED, 0);
5875 REG_WR(bp, BNX2_MISC_CFG, save);
5880 bnx2_set_tx_csum(struct net_device *dev, u32 data)
5882 struct bnx2 *bp = netdev_priv(dev);
5884 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5885 return (ethtool_op_set_tx_hw_csum(dev, data));
5887 return (ethtool_op_set_tx_csum(dev, data));
5890 static const struct ethtool_ops bnx2_ethtool_ops = {
5891 .get_settings = bnx2_get_settings,
5892 .set_settings = bnx2_set_settings,
5893 .get_drvinfo = bnx2_get_drvinfo,
5894 .get_regs_len = bnx2_get_regs_len,
5895 .get_regs = bnx2_get_regs,
5896 .get_wol = bnx2_get_wol,
5897 .set_wol = bnx2_set_wol,
5898 .nway_reset = bnx2_nway_reset,
5899 .get_link = ethtool_op_get_link,
5900 .get_eeprom_len = bnx2_get_eeprom_len,
5901 .get_eeprom = bnx2_get_eeprom,
5902 .set_eeprom = bnx2_set_eeprom,
5903 .get_coalesce = bnx2_get_coalesce,
5904 .set_coalesce = bnx2_set_coalesce,
5905 .get_ringparam = bnx2_get_ringparam,
5906 .set_ringparam = bnx2_set_ringparam,
5907 .get_pauseparam = bnx2_get_pauseparam,
5908 .set_pauseparam = bnx2_set_pauseparam,
5909 .get_rx_csum = bnx2_get_rx_csum,
5910 .set_rx_csum = bnx2_set_rx_csum,
5911 .get_tx_csum = ethtool_op_get_tx_csum,
5912 .set_tx_csum = bnx2_set_tx_csum,
5913 .get_sg = ethtool_op_get_sg,
5914 .set_sg = ethtool_op_set_sg,
5915 .get_tso = ethtool_op_get_tso,
5916 .set_tso = bnx2_set_tso,
5917 .self_test_count = bnx2_self_test_count,
5918 .self_test = bnx2_self_test,
5919 .get_strings = bnx2_get_strings,
5920 .phys_id = bnx2_phys_id,
5921 .get_stats_count = bnx2_get_stats_count,
5922 .get_ethtool_stats = bnx2_get_ethtool_stats,
5923 .get_perm_addr = ethtool_op_get_perm_addr,
5926 /* Called with rtnl_lock */
5928 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5930 struct mii_ioctl_data *data = if_mii(ifr);
5931 struct bnx2 *bp = netdev_priv(dev);
5936 data->phy_id = bp->phy_addr;
5942 if (!netif_running(dev))
5945 spin_lock_bh(&bp->phy_lock);
5946 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5947 spin_unlock_bh(&bp->phy_lock);
5949 data->val_out = mii_regval;
5955 if (!capable(CAP_NET_ADMIN))
5958 if (!netif_running(dev))
5961 spin_lock_bh(&bp->phy_lock);
5962 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5963 spin_unlock_bh(&bp->phy_lock);
5974 /* Called with rtnl_lock */
5976 bnx2_change_mac_addr(struct net_device *dev, void *p)
5978 struct sockaddr *addr = p;
5979 struct bnx2 *bp = netdev_priv(dev);
5981 if (!is_valid_ether_addr(addr->sa_data))
5984 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5985 if (netif_running(dev))
5986 bnx2_set_mac_addr(bp);
5991 /* Called with rtnl_lock */
5993 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5995 struct bnx2 *bp = netdev_priv(dev);
5997 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5998 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6002 if (netif_running(dev)) {
6003 bnx2_netif_stop(bp);
6007 bnx2_netif_start(bp);
6012 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6014 poll_bnx2(struct net_device *dev)
6016 struct bnx2 *bp = netdev_priv(dev);
6018 disable_irq(bp->pdev->irq);
6019 bnx2_interrupt(bp->pdev->irq, dev);
6020 enable_irq(bp->pdev->irq);
6024 static void __devinit
6025 bnx2_get_5709_media(struct bnx2 *bp)
6027 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6028 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6031 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6033 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6034 bp->phy_flags |= PHY_SERDES_FLAG;
6038 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6039 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6041 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6043 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6048 bp->phy_flags |= PHY_SERDES_FLAG;
6056 bp->phy_flags |= PHY_SERDES_FLAG;
6062 static void __devinit
6063 bnx2_get_pci_speed(struct bnx2 *bp)
6067 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6068 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6071 bp->flags |= PCIX_FLAG;
6073 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6075 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6077 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6078 bp->bus_speed_mhz = 133;
6081 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6082 bp->bus_speed_mhz = 100;
6085 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6086 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6087 bp->bus_speed_mhz = 66;
6090 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6091 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6092 bp->bus_speed_mhz = 50;
6095 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6096 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6097 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6098 bp->bus_speed_mhz = 33;
6103 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6104 bp->bus_speed_mhz = 66;
6106 bp->bus_speed_mhz = 33;
6109 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6110 bp->flags |= PCI_32BIT_FLAG;
6114 static int __devinit
6115 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6118 unsigned long mem_len;
6121 u64 dma_mask, persist_dma_mask;
6123 SET_MODULE_OWNER(dev);
6124 SET_NETDEV_DEV(dev, &pdev->dev);
6125 bp = netdev_priv(dev);
6130 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6131 rc = pci_enable_device(pdev);
6133 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
6137 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6139 "Cannot find PCI device base address, aborting.\n");
6141 goto err_out_disable;
6144 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6146 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
6147 goto err_out_disable;
6150 pci_set_master(pdev);
6152 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6153 if (bp->pm_cap == 0) {
6155 "Cannot find power management capability, aborting.\n");
6157 goto err_out_release;
6163 spin_lock_init(&bp->phy_lock);
6164 spin_lock_init(&bp->indirect_lock);
6165 INIT_WORK(&bp->reset_task, bnx2_reset_task);
6167 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
6168 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
6169 dev->mem_end = dev->mem_start + mem_len;
6170 dev->irq = pdev->irq;
6172 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6175 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
6177 goto err_out_release;
6180 /* Configure byte swap and enable write to the reg_window registers.
6181 * Rely on CPU to do target byte swapping on big endian systems
6182 * The chip's target access swapping will not swap all accesses
6184 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6185 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6186 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6188 bnx2_set_power_state(bp, PCI_D0);
6190 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6192 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6193 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6195 "Cannot find PCIE capability, aborting.\n");
6199 bp->flags |= PCIE_FLAG;
6201 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6202 if (bp->pcix_cap == 0) {
6204 "Cannot find PCIX capability, aborting.\n");
6210 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6211 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6212 bp->flags |= MSI_CAP_FLAG;
6215 /* 5708 cannot support DMA addresses > 40-bit. */
6216 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6217 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6219 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6221 /* Configure DMA attributes. */
6222 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6223 dev->features |= NETIF_F_HIGHDMA;
6224 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6227 "pci_set_consistent_dma_mask failed, aborting.\n");
6230 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6231 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6235 if (!(bp->flags & PCIE_FLAG))
6236 bnx2_get_pci_speed(bp);
6238 /* 5706A0 may falsely detect SERR and PERR. */
6239 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6240 reg = REG_RD(bp, PCI_COMMAND);
6241 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6242 REG_WR(bp, PCI_COMMAND, reg);
6244 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6245 !(bp->flags & PCIX_FLAG)) {
6248 "5706 A1 can only be used in a PCIX bus, aborting.\n");
6252 bnx2_init_nvram(bp);
6254 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6256 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
6257 BNX2_SHM_HDR_SIGNATURE_SIG) {
6258 u32 off = PCI_FUNC(pdev->devfn) << 2;
6260 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6262 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6264 /* Get the permanent MAC address. First we need to make sure the
6265 * firmware is actually running.
6267 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
6269 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6270 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
6271 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
6276 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6278 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
6279 bp->mac_addr[0] = (u8) (reg >> 8);
6280 bp->mac_addr[1] = (u8) reg;
6282 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
6283 bp->mac_addr[2] = (u8) (reg >> 24);
6284 bp->mac_addr[3] = (u8) (reg >> 16);
6285 bp->mac_addr[4] = (u8) (reg >> 8);
6286 bp->mac_addr[5] = (u8) reg;
6288 bp->tx_ring_size = MAX_TX_DESC_CNT;
6289 bnx2_set_rx_ring_size(bp, 255);
6293 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6295 bp->tx_quick_cons_trip_int = 20;
6296 bp->tx_quick_cons_trip = 20;
6297 bp->tx_ticks_int = 80;
6300 bp->rx_quick_cons_trip_int = 6;
6301 bp->rx_quick_cons_trip = 6;
6302 bp->rx_ticks_int = 18;
6305 bp->stats_ticks = 1000000 & 0xffff00;
6307 bp->timer_interval = HZ;
6308 bp->current_interval = HZ;
6312 /* Disable WOL support if we are running on a SERDES chip. */
6313 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6314 bnx2_get_5709_media(bp);
6315 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
6316 bp->phy_flags |= PHY_SERDES_FLAG;
6318 if (bp->phy_flags & PHY_SERDES_FLAG) {
6319 bp->flags |= NO_WOL_FLAG;
6320 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
6322 reg = REG_RD_IND(bp, bp->shmem_base +
6323 BNX2_SHARED_HW_CFG_CONFIG);
6324 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6325 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6327 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6328 CHIP_NUM(bp) == CHIP_NUM_5708)
6329 bp->phy_flags |= PHY_CRC_FIX_FLAG;
6330 else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
6331 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
6333 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6334 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6335 (CHIP_ID(bp) == CHIP_ID_5708_B1))
6336 bp->flags |= NO_WOL_FLAG;
6338 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6339 bp->tx_quick_cons_trip_int =
6340 bp->tx_quick_cons_trip;
6341 bp->tx_ticks_int = bp->tx_ticks;
6342 bp->rx_quick_cons_trip_int =
6343 bp->rx_quick_cons_trip;
6344 bp->rx_ticks_int = bp->rx_ticks;
6345 bp->comp_prod_trip_int = bp->comp_prod_trip;
6346 bp->com_ticks_int = bp->com_ticks;
6347 bp->cmd_ticks_int = bp->cmd_ticks;
6350 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6352 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6353 * with byte enables disabled on the unused 32-bit word. This is legal
6354 * but causes problems on the AMD 8132 which will eventually stop
6355 * responding after a while.
6357 * AMD believes this incompatibility is unique to the 5706, and
6358 * prefers to locally disable MSI rather than globally disabling it.
6360 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6361 struct pci_dev *amd_8132 = NULL;
6363 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6364 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6368 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
6369 if (rev >= 0x10 && rev <= 0x13) {
6371 pci_dev_put(amd_8132);
6377 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
6378 bp->req_line_speed = 0;
6379 if (bp->phy_flags & PHY_SERDES_FLAG) {
6380 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
6382 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
6383 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
6384 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
6386 bp->req_line_speed = bp->line_speed = SPEED_1000;
6387 bp->req_duplex = DUPLEX_FULL;
6391 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
6394 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6396 init_timer(&bp->timer);
6397 bp->timer.expires = RUN_AT(bp->timer_interval);
6398 bp->timer.data = (unsigned long) bp;
6399 bp->timer.function = bnx2_timer;
6405 iounmap(bp->regview);
6410 pci_release_regions(pdev);
6413 pci_disable_device(pdev);
6414 pci_set_drvdata(pdev, NULL);
6420 static char * __devinit
6421 bnx2_bus_string(struct bnx2 *bp, char *str)
6425 if (bp->flags & PCIE_FLAG) {
6426 s += sprintf(s, "PCI Express");
6428 s += sprintf(s, "PCI");
6429 if (bp->flags & PCIX_FLAG)
6430 s += sprintf(s, "-X");
6431 if (bp->flags & PCI_32BIT_FLAG)
6432 s += sprintf(s, " 32-bit");
6434 s += sprintf(s, " 64-bit");
6435 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
6440 static int __devinit
6441 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6443 static int version_printed = 0;
6444 struct net_device *dev = NULL;
6449 if (version_printed++ == 0)
6450 printk(KERN_INFO "%s", version);
6452 /* dev zeroed in init_etherdev */
6453 dev = alloc_etherdev(sizeof(*bp));
6458 rc = bnx2_init_board(pdev, dev);
6464 dev->open = bnx2_open;
6465 dev->hard_start_xmit = bnx2_start_xmit;
6466 dev->stop = bnx2_close;
6467 dev->get_stats = bnx2_get_stats;
6468 dev->set_multicast_list = bnx2_set_rx_mode;
6469 dev->do_ioctl = bnx2_ioctl;
6470 dev->set_mac_address = bnx2_change_mac_addr;
6471 dev->change_mtu = bnx2_change_mtu;
6472 dev->tx_timeout = bnx2_tx_timeout;
6473 dev->watchdog_timeo = TX_TIMEOUT;
6475 dev->vlan_rx_register = bnx2_vlan_rx_register;
6477 dev->poll = bnx2_poll;
6478 dev->ethtool_ops = &bnx2_ethtool_ops;
6481 bp = netdev_priv(dev);
6483 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6484 dev->poll_controller = poll_bnx2;
6487 pci_set_drvdata(pdev, dev);
6489 memcpy(dev->dev_addr, bp->mac_addr, 6);
6490 memcpy(dev->perm_addr, bp->mac_addr, 6);
6491 bp->name = board_info[ent->driver_data].name;
6493 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6494 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
6496 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
6498 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6500 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6501 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6502 dev->features |= NETIF_F_TSO6;
6504 if ((rc = register_netdev(dev))) {
6505 dev_err(&pdev->dev, "Cannot register net device\n");
6507 iounmap(bp->regview);
6508 pci_release_regions(pdev);
6509 pci_disable_device(pdev);
6510 pci_set_drvdata(pdev, NULL);
6515 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
6519 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6520 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6521 bnx2_bus_string(bp, str),
6525 printk("node addr ");
6526 for (i = 0; i < 6; i++)
6527 printk("%2.2x", dev->dev_addr[i]);
6533 static void __devexit
6534 bnx2_remove_one(struct pci_dev *pdev)
6536 struct net_device *dev = pci_get_drvdata(pdev);
6537 struct bnx2 *bp = netdev_priv(dev);
6539 flush_scheduled_work();
6541 unregister_netdev(dev);
6544 iounmap(bp->regview);
6547 pci_release_regions(pdev);
6548 pci_disable_device(pdev);
6549 pci_set_drvdata(pdev, NULL);
6553 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6555 struct net_device *dev = pci_get_drvdata(pdev);
6556 struct bnx2 *bp = netdev_priv(dev);
6559 if (!netif_running(dev))
6562 flush_scheduled_work();
6563 bnx2_netif_stop(bp);
6564 netif_device_detach(dev);
6565 del_timer_sync(&bp->timer);
6566 if (bp->flags & NO_WOL_FLAG)
6567 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6569 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6571 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6572 bnx2_reset_chip(bp, reset_code);
6574 pci_save_state(pdev);
6575 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6580 bnx2_resume(struct pci_dev *pdev)
6582 struct net_device *dev = pci_get_drvdata(pdev);
6583 struct bnx2 *bp = netdev_priv(dev);
6585 if (!netif_running(dev))
6588 pci_restore_state(pdev);
6589 bnx2_set_power_state(bp, PCI_D0);
6590 netif_device_attach(dev);
6592 bnx2_netif_start(bp);
6596 static struct pci_driver bnx2_pci_driver = {
6597 .name = DRV_MODULE_NAME,
6598 .id_table = bnx2_pci_tbl,
6599 .probe = bnx2_init_one,
6600 .remove = __devexit_p(bnx2_remove_one),
6601 .suspend = bnx2_suspend,
6602 .resume = bnx2_resume,
6605 static int __init bnx2_init(void)
6607 return pci_register_driver(&bnx2_pci_driver);
6610 static void __exit bnx2_cleanup(void)
6612 pci_unregister_driver(&bnx2_pci_driver);
6615 module_init(bnx2_init);
6616 module_exit(bnx2_cleanup);