1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/if_vlan.h>
39 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/list.h>
56 #define DRV_MODULE_NAME "bnx2"
57 #define PFX DRV_MODULE_NAME ": "
58 #define DRV_MODULE_VERSION "2.0.1"
59 #define DRV_MODULE_RELDATE "May 6, 2009"
60 #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-4.6.16.fw"
61 #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-4.6.16.fw"
62 #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-4.6.17.fw"
63 #define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-4.6.15.fw"
65 #define RUN_AT(x) (jiffies + (x))
67 /* Time in jiffies before concluding the transmitter is hung. */
68 #define TX_TIMEOUT (5*HZ)
70 static char version[] __devinitdata =
71 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
73 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
74 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
75 MODULE_LICENSE("GPL");
76 MODULE_VERSION(DRV_MODULE_VERSION);
77 MODULE_FIRMWARE(FW_MIPS_FILE_06);
78 MODULE_FIRMWARE(FW_RV2P_FILE_06);
79 MODULE_FIRMWARE(FW_MIPS_FILE_09);
80 MODULE_FIRMWARE(FW_RV2P_FILE_09);
82 static int disable_msi = 0;
84 module_param(disable_msi, int, 0);
85 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
101 /* indexed by board_t, above */
104 } board_info[] __devinitdata = {
105 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
106 { "HP NC370T Multifunction Gigabit Server Adapter" },
107 { "HP NC370i Multifunction Gigabit Server Adapter" },
108 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
109 { "HP NC370F Multifunction Gigabit Server Adapter" },
110 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
111 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
112 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
113 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
114 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
115 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
118 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
120 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
122 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
123 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
124 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
125 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
126 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
127 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
128 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
129 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
130 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
131 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
132 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
133 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
134 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
135 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
136 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
137 { PCI_VENDOR_ID_BROADCOM, 0x163b,
138 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
139 { PCI_VENDOR_ID_BROADCOM, 0x163c,
140 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
144 static struct flash_spec flash_table[] =
146 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
147 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
149 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
150 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
151 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
153 /* Expansion entry 0001 */
154 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
155 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
156 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
158 /* Saifun SA25F010 (non-buffered flash) */
159 /* strap, cfg1, & write1 need updates */
160 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
161 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
162 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
163 "Non-buffered flash (128kB)"},
164 /* Saifun SA25F020 (non-buffered flash) */
165 /* strap, cfg1, & write1 need updates */
166 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
167 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
168 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
169 "Non-buffered flash (256kB)"},
170 /* Expansion entry 0100 */
171 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
172 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
175 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
176 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
177 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
178 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
179 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
180 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
181 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
182 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
184 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
185 /* Saifun SA25F005 (non-buffered flash) */
186 /* strap, cfg1, & write1 need updates */
187 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
190 "Non-buffered flash (64kB)"},
192 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
193 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
194 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
196 /* Expansion entry 1001 */
197 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
198 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201 /* Expansion entry 1010 */
202 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
206 /* ATMEL AT45DB011B (buffered flash) */
207 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
208 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
210 "Buffered flash (128kB)"},
211 /* Expansion entry 1100 */
212 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
213 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
214 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
216 /* Expansion entry 1101 */
217 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
218 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
221 /* Ateml Expansion entry 1110 */
222 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
223 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
224 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
225 "Entry 1110 (Atmel)"},
226 /* ATMEL AT45DB021B (buffered flash) */
227 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
228 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
230 "Buffered flash (256kB)"},
233 static struct flash_spec flash_5709 = {
234 .flags = BNX2_NV_BUFFERED,
235 .page_bits = BCM5709_FLASH_PAGE_BITS,
236 .page_size = BCM5709_FLASH_PAGE_SIZE,
237 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
238 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
239 .name = "5709 Buffered flash (256kB)",
242 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
244 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
250 /* The ring uses 256 indices for 255 entries, one of them
251 * needs to be skipped.
253 diff = txr->tx_prod - txr->tx_cons;
254 if (unlikely(diff >= TX_DESC_CNT)) {
256 if (diff == TX_DESC_CNT)
257 diff = MAX_TX_DESC_CNT;
259 return (bp->tx_ring_size - diff);
263 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
267 spin_lock_bh(&bp->indirect_lock);
268 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
269 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
270 spin_unlock_bh(&bp->indirect_lock);
275 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
277 spin_lock_bh(&bp->indirect_lock);
278 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
279 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
280 spin_unlock_bh(&bp->indirect_lock);
284 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
286 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
290 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
292 return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
296 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
299 spin_lock_bh(&bp->indirect_lock);
300 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
303 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
304 REG_WR(bp, BNX2_CTX_CTX_CTRL,
305 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
306 for (i = 0; i < 5; i++) {
307 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
308 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
313 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
314 REG_WR(bp, BNX2_CTX_DATA, val);
316 spin_unlock_bh(&bp->indirect_lock);
320 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
325 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
326 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
327 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
329 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
330 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
335 val1 = (bp->phy_addr << 21) | (reg << 16) |
336 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
337 BNX2_EMAC_MDIO_COMM_START_BUSY;
338 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
340 for (i = 0; i < 50; i++) {
343 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
344 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
347 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
348 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
354 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
363 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
364 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
365 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
367 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
368 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
377 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
382 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
383 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
384 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
386 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
387 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
392 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
393 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
394 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
395 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
397 for (i = 0; i < 50; i++) {
400 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
401 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
407 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
412 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
413 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
414 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
416 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
417 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
426 bnx2_disable_int(struct bnx2 *bp)
429 struct bnx2_napi *bnapi;
431 for (i = 0; i < bp->irq_nvecs; i++) {
432 bnapi = &bp->bnx2_napi[i];
433 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
434 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
436 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
440 bnx2_enable_int(struct bnx2 *bp)
443 struct bnx2_napi *bnapi;
445 for (i = 0; i < bp->irq_nvecs; i++) {
446 bnapi = &bp->bnx2_napi[i];
448 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
449 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
450 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
451 bnapi->last_status_idx);
453 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
454 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
455 bnapi->last_status_idx);
457 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
461 bnx2_disable_int_sync(struct bnx2 *bp)
465 atomic_inc(&bp->intr_sem);
466 bnx2_disable_int(bp);
467 for (i = 0; i < bp->irq_nvecs; i++)
468 synchronize_irq(bp->irq_tbl[i].vector);
472 bnx2_napi_disable(struct bnx2 *bp)
476 for (i = 0; i < bp->irq_nvecs; i++)
477 napi_disable(&bp->bnx2_napi[i].napi);
481 bnx2_napi_enable(struct bnx2 *bp)
485 for (i = 0; i < bp->irq_nvecs; i++)
486 napi_enable(&bp->bnx2_napi[i].napi);
490 bnx2_netif_stop(struct bnx2 *bp)
492 bnx2_disable_int_sync(bp);
493 if (netif_running(bp->dev)) {
494 bnx2_napi_disable(bp);
495 netif_tx_disable(bp->dev);
496 bp->dev->trans_start = jiffies; /* prevent tx timeout */
501 bnx2_netif_start(struct bnx2 *bp)
503 if (atomic_dec_and_test(&bp->intr_sem)) {
504 if (netif_running(bp->dev)) {
505 netif_tx_wake_all_queues(bp->dev);
506 bnx2_napi_enable(bp);
513 bnx2_free_tx_mem(struct bnx2 *bp)
517 for (i = 0; i < bp->num_tx_rings; i++) {
518 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
519 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
521 if (txr->tx_desc_ring) {
522 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
524 txr->tx_desc_mapping);
525 txr->tx_desc_ring = NULL;
527 kfree(txr->tx_buf_ring);
528 txr->tx_buf_ring = NULL;
533 bnx2_free_rx_mem(struct bnx2 *bp)
537 for (i = 0; i < bp->num_rx_rings; i++) {
538 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
539 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
542 for (j = 0; j < bp->rx_max_ring; j++) {
543 if (rxr->rx_desc_ring[j])
544 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
545 rxr->rx_desc_ring[j],
546 rxr->rx_desc_mapping[j]);
547 rxr->rx_desc_ring[j] = NULL;
549 if (rxr->rx_buf_ring)
550 vfree(rxr->rx_buf_ring);
551 rxr->rx_buf_ring = NULL;
553 for (j = 0; j < bp->rx_max_pg_ring; j++) {
554 if (rxr->rx_pg_desc_ring[j])
555 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
556 rxr->rx_pg_desc_ring[j],
557 rxr->rx_pg_desc_mapping[j]);
558 rxr->rx_pg_desc_ring[j] = NULL;
561 vfree(rxr->rx_pg_ring);
562 rxr->rx_pg_ring = NULL;
567 bnx2_alloc_tx_mem(struct bnx2 *bp)
571 for (i = 0; i < bp->num_tx_rings; i++) {
572 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
573 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
575 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
576 if (txr->tx_buf_ring == NULL)
580 pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
581 &txr->tx_desc_mapping);
582 if (txr->tx_desc_ring == NULL)
589 bnx2_alloc_rx_mem(struct bnx2 *bp)
593 for (i = 0; i < bp->num_rx_rings; i++) {
594 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
595 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
599 vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
600 if (rxr->rx_buf_ring == NULL)
603 memset(rxr->rx_buf_ring, 0,
604 SW_RXBD_RING_SIZE * bp->rx_max_ring);
606 for (j = 0; j < bp->rx_max_ring; j++) {
607 rxr->rx_desc_ring[j] =
608 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
609 &rxr->rx_desc_mapping[j]);
610 if (rxr->rx_desc_ring[j] == NULL)
615 if (bp->rx_pg_ring_size) {
616 rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
618 if (rxr->rx_pg_ring == NULL)
621 memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
625 for (j = 0; j < bp->rx_max_pg_ring; j++) {
626 rxr->rx_pg_desc_ring[j] =
627 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
628 &rxr->rx_pg_desc_mapping[j]);
629 if (rxr->rx_pg_desc_ring[j] == NULL)
638 bnx2_free_mem(struct bnx2 *bp)
641 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
643 bnx2_free_tx_mem(bp);
644 bnx2_free_rx_mem(bp);
646 for (i = 0; i < bp->ctx_pages; i++) {
647 if (bp->ctx_blk[i]) {
648 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
650 bp->ctx_blk_mapping[i]);
651 bp->ctx_blk[i] = NULL;
654 if (bnapi->status_blk.msi) {
655 pci_free_consistent(bp->pdev, bp->status_stats_size,
656 bnapi->status_blk.msi,
657 bp->status_blk_mapping);
658 bnapi->status_blk.msi = NULL;
659 bp->stats_blk = NULL;
664 bnx2_alloc_mem(struct bnx2 *bp)
666 int i, status_blk_size, err;
667 struct bnx2_napi *bnapi;
670 /* Combine status and statistics blocks into one allocation. */
671 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
672 if (bp->flags & BNX2_FLAG_MSIX_CAP)
673 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
674 BNX2_SBLK_MSIX_ALIGN_SIZE);
675 bp->status_stats_size = status_blk_size +
676 sizeof(struct statistics_block);
678 status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
679 &bp->status_blk_mapping);
680 if (status_blk == NULL)
683 memset(status_blk, 0, bp->status_stats_size);
685 bnapi = &bp->bnx2_napi[0];
686 bnapi->status_blk.msi = status_blk;
687 bnapi->hw_tx_cons_ptr =
688 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
689 bnapi->hw_rx_cons_ptr =
690 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
691 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
692 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
693 struct status_block_msix *sblk;
695 bnapi = &bp->bnx2_napi[i];
697 sblk = (void *) (status_blk +
698 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
699 bnapi->status_blk.msix = sblk;
700 bnapi->hw_tx_cons_ptr =
701 &sblk->status_tx_quick_consumer_index;
702 bnapi->hw_rx_cons_ptr =
703 &sblk->status_rx_quick_consumer_index;
704 bnapi->int_num = i << 24;
708 bp->stats_blk = status_blk + status_blk_size;
710 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
712 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
713 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
714 if (bp->ctx_pages == 0)
716 for (i = 0; i < bp->ctx_pages; i++) {
717 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
719 &bp->ctx_blk_mapping[i]);
720 if (bp->ctx_blk[i] == NULL)
725 err = bnx2_alloc_rx_mem(bp);
729 err = bnx2_alloc_tx_mem(bp);
741 bnx2_report_fw_link(struct bnx2 *bp)
743 u32 fw_link_status = 0;
745 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
751 switch (bp->line_speed) {
753 if (bp->duplex == DUPLEX_HALF)
754 fw_link_status = BNX2_LINK_STATUS_10HALF;
756 fw_link_status = BNX2_LINK_STATUS_10FULL;
759 if (bp->duplex == DUPLEX_HALF)
760 fw_link_status = BNX2_LINK_STATUS_100HALF;
762 fw_link_status = BNX2_LINK_STATUS_100FULL;
765 if (bp->duplex == DUPLEX_HALF)
766 fw_link_status = BNX2_LINK_STATUS_1000HALF;
768 fw_link_status = BNX2_LINK_STATUS_1000FULL;
771 if (bp->duplex == DUPLEX_HALF)
772 fw_link_status = BNX2_LINK_STATUS_2500HALF;
774 fw_link_status = BNX2_LINK_STATUS_2500FULL;
778 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
781 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
783 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
784 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
786 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
787 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
788 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
790 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
794 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
796 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
800 bnx2_xceiver_str(struct bnx2 *bp)
802 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
803 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
808 bnx2_report_link(struct bnx2 *bp)
811 netif_carrier_on(bp->dev);
812 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
813 bnx2_xceiver_str(bp));
815 printk("%d Mbps ", bp->line_speed);
817 if (bp->duplex == DUPLEX_FULL)
818 printk("full duplex");
820 printk("half duplex");
823 if (bp->flow_ctrl & FLOW_CTRL_RX) {
824 printk(", receive ");
825 if (bp->flow_ctrl & FLOW_CTRL_TX)
826 printk("& transmit ");
829 printk(", transmit ");
831 printk("flow control ON");
836 netif_carrier_off(bp->dev);
837 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
838 bnx2_xceiver_str(bp));
841 bnx2_report_fw_link(bp);
845 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
847 u32 local_adv, remote_adv;
850 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
851 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
853 if (bp->duplex == DUPLEX_FULL) {
854 bp->flow_ctrl = bp->req_flow_ctrl;
859 if (bp->duplex != DUPLEX_FULL) {
863 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
864 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
867 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
868 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
869 bp->flow_ctrl |= FLOW_CTRL_TX;
870 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
871 bp->flow_ctrl |= FLOW_CTRL_RX;
875 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
876 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
878 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
879 u32 new_local_adv = 0;
880 u32 new_remote_adv = 0;
882 if (local_adv & ADVERTISE_1000XPAUSE)
883 new_local_adv |= ADVERTISE_PAUSE_CAP;
884 if (local_adv & ADVERTISE_1000XPSE_ASYM)
885 new_local_adv |= ADVERTISE_PAUSE_ASYM;
886 if (remote_adv & ADVERTISE_1000XPAUSE)
887 new_remote_adv |= ADVERTISE_PAUSE_CAP;
888 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
889 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
891 local_adv = new_local_adv;
892 remote_adv = new_remote_adv;
895 /* See Table 28B-3 of 802.3ab-1999 spec. */
896 if (local_adv & ADVERTISE_PAUSE_CAP) {
897 if(local_adv & ADVERTISE_PAUSE_ASYM) {
898 if (remote_adv & ADVERTISE_PAUSE_CAP) {
899 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
901 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
902 bp->flow_ctrl = FLOW_CTRL_RX;
906 if (remote_adv & ADVERTISE_PAUSE_CAP) {
907 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
911 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
912 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
913 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
915 bp->flow_ctrl = FLOW_CTRL_TX;
921 bnx2_5709s_linkup(struct bnx2 *bp)
927 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
928 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
929 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
931 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
932 bp->line_speed = bp->req_line_speed;
933 bp->duplex = bp->req_duplex;
936 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
938 case MII_BNX2_GP_TOP_AN_SPEED_10:
939 bp->line_speed = SPEED_10;
941 case MII_BNX2_GP_TOP_AN_SPEED_100:
942 bp->line_speed = SPEED_100;
944 case MII_BNX2_GP_TOP_AN_SPEED_1G:
945 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
946 bp->line_speed = SPEED_1000;
948 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
949 bp->line_speed = SPEED_2500;
952 if (val & MII_BNX2_GP_TOP_AN_FD)
953 bp->duplex = DUPLEX_FULL;
955 bp->duplex = DUPLEX_HALF;
960 bnx2_5708s_linkup(struct bnx2 *bp)
965 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
966 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
967 case BCM5708S_1000X_STAT1_SPEED_10:
968 bp->line_speed = SPEED_10;
970 case BCM5708S_1000X_STAT1_SPEED_100:
971 bp->line_speed = SPEED_100;
973 case BCM5708S_1000X_STAT1_SPEED_1G:
974 bp->line_speed = SPEED_1000;
976 case BCM5708S_1000X_STAT1_SPEED_2G5:
977 bp->line_speed = SPEED_2500;
980 if (val & BCM5708S_1000X_STAT1_FD)
981 bp->duplex = DUPLEX_FULL;
983 bp->duplex = DUPLEX_HALF;
989 bnx2_5706s_linkup(struct bnx2 *bp)
991 u32 bmcr, local_adv, remote_adv, common;
994 bp->line_speed = SPEED_1000;
996 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
997 if (bmcr & BMCR_FULLDPLX) {
998 bp->duplex = DUPLEX_FULL;
1001 bp->duplex = DUPLEX_HALF;
1004 if (!(bmcr & BMCR_ANENABLE)) {
1008 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1009 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1011 common = local_adv & remote_adv;
1012 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1014 if (common & ADVERTISE_1000XFULL) {
1015 bp->duplex = DUPLEX_FULL;
1018 bp->duplex = DUPLEX_HALF;
1026 bnx2_copper_linkup(struct bnx2 *bp)
1030 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1031 if (bmcr & BMCR_ANENABLE) {
1032 u32 local_adv, remote_adv, common;
1034 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1035 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1037 common = local_adv & (remote_adv >> 2);
1038 if (common & ADVERTISE_1000FULL) {
1039 bp->line_speed = SPEED_1000;
1040 bp->duplex = DUPLEX_FULL;
1042 else if (common & ADVERTISE_1000HALF) {
1043 bp->line_speed = SPEED_1000;
1044 bp->duplex = DUPLEX_HALF;
1047 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1048 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1050 common = local_adv & remote_adv;
1051 if (common & ADVERTISE_100FULL) {
1052 bp->line_speed = SPEED_100;
1053 bp->duplex = DUPLEX_FULL;
1055 else if (common & ADVERTISE_100HALF) {
1056 bp->line_speed = SPEED_100;
1057 bp->duplex = DUPLEX_HALF;
1059 else if (common & ADVERTISE_10FULL) {
1060 bp->line_speed = SPEED_10;
1061 bp->duplex = DUPLEX_FULL;
1063 else if (common & ADVERTISE_10HALF) {
1064 bp->line_speed = SPEED_10;
1065 bp->duplex = DUPLEX_HALF;
1074 if (bmcr & BMCR_SPEED100) {
1075 bp->line_speed = SPEED_100;
1078 bp->line_speed = SPEED_10;
1080 if (bmcr & BMCR_FULLDPLX) {
1081 bp->duplex = DUPLEX_FULL;
1084 bp->duplex = DUPLEX_HALF;
1092 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1094 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1096 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1097 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1100 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1101 u32 lo_water, hi_water;
1103 if (bp->flow_ctrl & FLOW_CTRL_TX)
1104 lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1106 lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1107 if (lo_water >= bp->rx_ring_size)
1110 hi_water = bp->rx_ring_size / 4;
1112 if (hi_water <= lo_water)
1115 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1116 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1120 else if (hi_water == 0)
1122 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1124 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1128 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1133 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1136 bnx2_init_rx_context(bp, cid);
1141 bnx2_set_mac_link(struct bnx2 *bp)
1145 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1146 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1147 (bp->duplex == DUPLEX_HALF)) {
1148 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1151 /* Configure the EMAC mode register. */
1152 val = REG_RD(bp, BNX2_EMAC_MODE);
1154 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1155 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1156 BNX2_EMAC_MODE_25G_MODE);
1159 switch (bp->line_speed) {
1161 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1162 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1167 val |= BNX2_EMAC_MODE_PORT_MII;
1170 val |= BNX2_EMAC_MODE_25G_MODE;
1173 val |= BNX2_EMAC_MODE_PORT_GMII;
1178 val |= BNX2_EMAC_MODE_PORT_GMII;
1181 /* Set the MAC to operate in the appropriate duplex mode. */
1182 if (bp->duplex == DUPLEX_HALF)
1183 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1184 REG_WR(bp, BNX2_EMAC_MODE, val);
1186 /* Enable/disable rx PAUSE. */
1187 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1189 if (bp->flow_ctrl & FLOW_CTRL_RX)
1190 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1191 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1193 /* Enable/disable tx PAUSE. */
1194 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1195 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1197 if (bp->flow_ctrl & FLOW_CTRL_TX)
1198 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1199 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1201 /* Acknowledge the interrupt. */
1202 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1204 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1205 bnx2_init_all_rx_contexts(bp);
1209 bnx2_enable_bmsr1(struct bnx2 *bp)
1211 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1212 (CHIP_NUM(bp) == CHIP_NUM_5709))
1213 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1214 MII_BNX2_BLK_ADDR_GP_STATUS);
1218 bnx2_disable_bmsr1(struct bnx2 *bp)
1220 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1221 (CHIP_NUM(bp) == CHIP_NUM_5709))
1222 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1223 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1227 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1232 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1235 if (bp->autoneg & AUTONEG_SPEED)
1236 bp->advertising |= ADVERTISED_2500baseX_Full;
1238 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1239 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1241 bnx2_read_phy(bp, bp->mii_up1, &up1);
1242 if (!(up1 & BCM5708S_UP1_2G5)) {
1243 up1 |= BCM5708S_UP1_2G5;
1244 bnx2_write_phy(bp, bp->mii_up1, up1);
1248 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1249 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1250 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1256 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1261 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1264 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1265 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1267 bnx2_read_phy(bp, bp->mii_up1, &up1);
1268 if (up1 & BCM5708S_UP1_2G5) {
1269 up1 &= ~BCM5708S_UP1_2G5;
1270 bnx2_write_phy(bp, bp->mii_up1, up1);
1274 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1275 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1276 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1282 bnx2_enable_forced_2g5(struct bnx2 *bp)
1286 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1289 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1292 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1293 MII_BNX2_BLK_ADDR_SERDES_DIG);
1294 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1295 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1296 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1297 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1299 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1300 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1301 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1303 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1304 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1305 bmcr |= BCM5708S_BMCR_FORCE_2500;
1308 if (bp->autoneg & AUTONEG_SPEED) {
1309 bmcr &= ~BMCR_ANENABLE;
1310 if (bp->req_duplex == DUPLEX_FULL)
1311 bmcr |= BMCR_FULLDPLX;
1313 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1317 bnx2_disable_forced_2g5(struct bnx2 *bp)
1321 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1324 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1327 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1328 MII_BNX2_BLK_ADDR_SERDES_DIG);
1329 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1330 val &= ~MII_BNX2_SD_MISC1_FORCE;
1331 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1333 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1334 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1335 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1337 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1338 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1339 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1342 if (bp->autoneg & AUTONEG_SPEED)
1343 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1344 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1348 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1352 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1353 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1355 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1357 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1361 bnx2_set_link(struct bnx2 *bp)
1366 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1371 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1374 link_up = bp->link_up;
1376 bnx2_enable_bmsr1(bp);
1377 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1378 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1379 bnx2_disable_bmsr1(bp);
1381 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1382 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1385 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1386 bnx2_5706s_force_link_dn(bp, 0);
1387 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1389 val = REG_RD(bp, BNX2_EMAC_STATUS);
1391 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1392 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1393 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1395 if ((val & BNX2_EMAC_STATUS_LINK) &&
1396 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1397 bmsr |= BMSR_LSTATUS;
1399 bmsr &= ~BMSR_LSTATUS;
1402 if (bmsr & BMSR_LSTATUS) {
1405 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1406 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1407 bnx2_5706s_linkup(bp);
1408 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1409 bnx2_5708s_linkup(bp);
1410 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1411 bnx2_5709s_linkup(bp);
1414 bnx2_copper_linkup(bp);
1416 bnx2_resolve_flow_ctrl(bp);
1419 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1420 (bp->autoneg & AUTONEG_SPEED))
1421 bnx2_disable_forced_2g5(bp);
1423 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1426 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1427 bmcr |= BMCR_ANENABLE;
1428 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1430 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1435 if (bp->link_up != link_up) {
1436 bnx2_report_link(bp);
1439 bnx2_set_mac_link(bp);
1445 bnx2_reset_phy(struct bnx2 *bp)
1450 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1452 #define PHY_RESET_MAX_WAIT 100
1453 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1456 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1457 if (!(reg & BMCR_RESET)) {
1462 if (i == PHY_RESET_MAX_WAIT) {
1469 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1473 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1474 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1476 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1477 adv = ADVERTISE_1000XPAUSE;
1480 adv = ADVERTISE_PAUSE_CAP;
1483 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1484 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1485 adv = ADVERTISE_1000XPSE_ASYM;
1488 adv = ADVERTISE_PAUSE_ASYM;
1491 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1492 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1493 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1496 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1502 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1505 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1506 __releases(&bp->phy_lock)
1507 __acquires(&bp->phy_lock)
1509 u32 speed_arg = 0, pause_adv;
1511 pause_adv = bnx2_phy_get_pause_adv(bp);
1513 if (bp->autoneg & AUTONEG_SPEED) {
1514 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1515 if (bp->advertising & ADVERTISED_10baseT_Half)
1516 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1517 if (bp->advertising & ADVERTISED_10baseT_Full)
1518 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1519 if (bp->advertising & ADVERTISED_100baseT_Half)
1520 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1521 if (bp->advertising & ADVERTISED_100baseT_Full)
1522 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1523 if (bp->advertising & ADVERTISED_1000baseT_Full)
1524 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1525 if (bp->advertising & ADVERTISED_2500baseX_Full)
1526 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1528 if (bp->req_line_speed == SPEED_2500)
1529 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1530 else if (bp->req_line_speed == SPEED_1000)
1531 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1532 else if (bp->req_line_speed == SPEED_100) {
1533 if (bp->req_duplex == DUPLEX_FULL)
1534 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1536 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1537 } else if (bp->req_line_speed == SPEED_10) {
1538 if (bp->req_duplex == DUPLEX_FULL)
1539 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1541 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1545 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1546 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1547 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1548 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1550 if (port == PORT_TP)
1551 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1552 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1554 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1556 spin_unlock_bh(&bp->phy_lock);
1557 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1558 spin_lock_bh(&bp->phy_lock);
1564 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1565 __releases(&bp->phy_lock)
1566 __acquires(&bp->phy_lock)
1571 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1572 return (bnx2_setup_remote_phy(bp, port));
1574 if (!(bp->autoneg & AUTONEG_SPEED)) {
1576 int force_link_down = 0;
1578 if (bp->req_line_speed == SPEED_2500) {
1579 if (!bnx2_test_and_enable_2g5(bp))
1580 force_link_down = 1;
1581 } else if (bp->req_line_speed == SPEED_1000) {
1582 if (bnx2_test_and_disable_2g5(bp))
1583 force_link_down = 1;
1585 bnx2_read_phy(bp, bp->mii_adv, &adv);
1586 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1588 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1589 new_bmcr = bmcr & ~BMCR_ANENABLE;
1590 new_bmcr |= BMCR_SPEED1000;
1592 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1593 if (bp->req_line_speed == SPEED_2500)
1594 bnx2_enable_forced_2g5(bp);
1595 else if (bp->req_line_speed == SPEED_1000) {
1596 bnx2_disable_forced_2g5(bp);
1597 new_bmcr &= ~0x2000;
1600 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1601 if (bp->req_line_speed == SPEED_2500)
1602 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1604 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1607 if (bp->req_duplex == DUPLEX_FULL) {
1608 adv |= ADVERTISE_1000XFULL;
1609 new_bmcr |= BMCR_FULLDPLX;
1612 adv |= ADVERTISE_1000XHALF;
1613 new_bmcr &= ~BMCR_FULLDPLX;
1615 if ((new_bmcr != bmcr) || (force_link_down)) {
1616 /* Force a link down visible on the other side */
1618 bnx2_write_phy(bp, bp->mii_adv, adv &
1619 ~(ADVERTISE_1000XFULL |
1620 ADVERTISE_1000XHALF));
1621 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1622 BMCR_ANRESTART | BMCR_ANENABLE);
1625 netif_carrier_off(bp->dev);
1626 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1627 bnx2_report_link(bp);
1629 bnx2_write_phy(bp, bp->mii_adv, adv);
1630 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1632 bnx2_resolve_flow_ctrl(bp);
1633 bnx2_set_mac_link(bp);
1638 bnx2_test_and_enable_2g5(bp);
1640 if (bp->advertising & ADVERTISED_1000baseT_Full)
1641 new_adv |= ADVERTISE_1000XFULL;
1643 new_adv |= bnx2_phy_get_pause_adv(bp);
1645 bnx2_read_phy(bp, bp->mii_adv, &adv);
1646 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1648 bp->serdes_an_pending = 0;
1649 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1650 /* Force a link down visible on the other side */
1652 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1653 spin_unlock_bh(&bp->phy_lock);
1655 spin_lock_bh(&bp->phy_lock);
1658 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1659 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1661 /* Speed up link-up time when the link partner
1662 * does not autonegotiate which is very common
1663 * in blade servers. Some blade servers use
1664 * IPMI for kerboard input and it's important
1665 * to minimize link disruptions. Autoneg. involves
1666 * exchanging base pages plus 3 next pages and
1667 * normally completes in about 120 msec.
1669 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1670 bp->serdes_an_pending = 1;
1671 mod_timer(&bp->timer, jiffies + bp->current_interval);
1673 bnx2_resolve_flow_ctrl(bp);
1674 bnx2_set_mac_link(bp);
1680 #define ETHTOOL_ALL_FIBRE_SPEED \
1681 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1682 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1683 (ADVERTISED_1000baseT_Full)
1685 #define ETHTOOL_ALL_COPPER_SPEED \
1686 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1687 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1688 ADVERTISED_1000baseT_Full)
1690 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1691 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1693 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1696 bnx2_set_default_remote_link(struct bnx2 *bp)
1700 if (bp->phy_port == PORT_TP)
1701 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1703 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1705 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1706 bp->req_line_speed = 0;
1707 bp->autoneg |= AUTONEG_SPEED;
1708 bp->advertising = ADVERTISED_Autoneg;
1709 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1710 bp->advertising |= ADVERTISED_10baseT_Half;
1711 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1712 bp->advertising |= ADVERTISED_10baseT_Full;
1713 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1714 bp->advertising |= ADVERTISED_100baseT_Half;
1715 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1716 bp->advertising |= ADVERTISED_100baseT_Full;
1717 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1718 bp->advertising |= ADVERTISED_1000baseT_Full;
1719 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1720 bp->advertising |= ADVERTISED_2500baseX_Full;
1723 bp->advertising = 0;
1724 bp->req_duplex = DUPLEX_FULL;
1725 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1726 bp->req_line_speed = SPEED_10;
1727 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1728 bp->req_duplex = DUPLEX_HALF;
1730 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1731 bp->req_line_speed = SPEED_100;
1732 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1733 bp->req_duplex = DUPLEX_HALF;
1735 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1736 bp->req_line_speed = SPEED_1000;
1737 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1738 bp->req_line_speed = SPEED_2500;
1743 bnx2_set_default_link(struct bnx2 *bp)
1745 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1746 bnx2_set_default_remote_link(bp);
1750 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1751 bp->req_line_speed = 0;
1752 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1755 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1757 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1758 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1759 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1761 bp->req_line_speed = bp->line_speed = SPEED_1000;
1762 bp->req_duplex = DUPLEX_FULL;
1765 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1769 bnx2_send_heart_beat(struct bnx2 *bp)
1774 spin_lock(&bp->indirect_lock);
1775 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1776 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1777 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1778 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1779 spin_unlock(&bp->indirect_lock);
1783 bnx2_remote_phy_event(struct bnx2 *bp)
1786 u8 link_up = bp->link_up;
1789 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1791 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1792 bnx2_send_heart_beat(bp);
1794 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1796 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1802 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1803 bp->duplex = DUPLEX_FULL;
1805 case BNX2_LINK_STATUS_10HALF:
1806 bp->duplex = DUPLEX_HALF;
1807 case BNX2_LINK_STATUS_10FULL:
1808 bp->line_speed = SPEED_10;
1810 case BNX2_LINK_STATUS_100HALF:
1811 bp->duplex = DUPLEX_HALF;
1812 case BNX2_LINK_STATUS_100BASE_T4:
1813 case BNX2_LINK_STATUS_100FULL:
1814 bp->line_speed = SPEED_100;
1816 case BNX2_LINK_STATUS_1000HALF:
1817 bp->duplex = DUPLEX_HALF;
1818 case BNX2_LINK_STATUS_1000FULL:
1819 bp->line_speed = SPEED_1000;
1821 case BNX2_LINK_STATUS_2500HALF:
1822 bp->duplex = DUPLEX_HALF;
1823 case BNX2_LINK_STATUS_2500FULL:
1824 bp->line_speed = SPEED_2500;
1832 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1833 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1834 if (bp->duplex == DUPLEX_FULL)
1835 bp->flow_ctrl = bp->req_flow_ctrl;
1837 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1838 bp->flow_ctrl |= FLOW_CTRL_TX;
1839 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1840 bp->flow_ctrl |= FLOW_CTRL_RX;
1843 old_port = bp->phy_port;
1844 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1845 bp->phy_port = PORT_FIBRE;
1847 bp->phy_port = PORT_TP;
1849 if (old_port != bp->phy_port)
1850 bnx2_set_default_link(bp);
1853 if (bp->link_up != link_up)
1854 bnx2_report_link(bp);
1856 bnx2_set_mac_link(bp);
1860 bnx2_set_remote_link(struct bnx2 *bp)
1864 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
1866 case BNX2_FW_EVT_CODE_LINK_EVENT:
1867 bnx2_remote_phy_event(bp);
1869 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1871 bnx2_send_heart_beat(bp);
1878 bnx2_setup_copper_phy(struct bnx2 *bp)
1879 __releases(&bp->phy_lock)
1880 __acquires(&bp->phy_lock)
1885 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1887 if (bp->autoneg & AUTONEG_SPEED) {
1888 u32 adv_reg, adv1000_reg;
1889 u32 new_adv_reg = 0;
1890 u32 new_adv1000_reg = 0;
1892 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1893 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1894 ADVERTISE_PAUSE_ASYM);
1896 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1897 adv1000_reg &= PHY_ALL_1000_SPEED;
1899 if (bp->advertising & ADVERTISED_10baseT_Half)
1900 new_adv_reg |= ADVERTISE_10HALF;
1901 if (bp->advertising & ADVERTISED_10baseT_Full)
1902 new_adv_reg |= ADVERTISE_10FULL;
1903 if (bp->advertising & ADVERTISED_100baseT_Half)
1904 new_adv_reg |= ADVERTISE_100HALF;
1905 if (bp->advertising & ADVERTISED_100baseT_Full)
1906 new_adv_reg |= ADVERTISE_100FULL;
1907 if (bp->advertising & ADVERTISED_1000baseT_Full)
1908 new_adv1000_reg |= ADVERTISE_1000FULL;
1910 new_adv_reg |= ADVERTISE_CSMA;
1912 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1914 if ((adv1000_reg != new_adv1000_reg) ||
1915 (adv_reg != new_adv_reg) ||
1916 ((bmcr & BMCR_ANENABLE) == 0)) {
1918 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1919 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1920 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1923 else if (bp->link_up) {
1924 /* Flow ctrl may have changed from auto to forced */
1925 /* or vice-versa. */
1927 bnx2_resolve_flow_ctrl(bp);
1928 bnx2_set_mac_link(bp);
1934 if (bp->req_line_speed == SPEED_100) {
1935 new_bmcr |= BMCR_SPEED100;
1937 if (bp->req_duplex == DUPLEX_FULL) {
1938 new_bmcr |= BMCR_FULLDPLX;
1940 if (new_bmcr != bmcr) {
1943 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1944 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1946 if (bmsr & BMSR_LSTATUS) {
1947 /* Force link down */
1948 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1949 spin_unlock_bh(&bp->phy_lock);
1951 spin_lock_bh(&bp->phy_lock);
1953 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1954 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1957 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1959 /* Normally, the new speed is setup after the link has
1960 * gone down and up again. In some cases, link will not go
1961 * down so we need to set up the new speed here.
1963 if (bmsr & BMSR_LSTATUS) {
1964 bp->line_speed = bp->req_line_speed;
1965 bp->duplex = bp->req_duplex;
1966 bnx2_resolve_flow_ctrl(bp);
1967 bnx2_set_mac_link(bp);
1970 bnx2_resolve_flow_ctrl(bp);
1971 bnx2_set_mac_link(bp);
1977 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1978 __releases(&bp->phy_lock)
1979 __acquires(&bp->phy_lock)
1981 if (bp->loopback == MAC_LOOPBACK)
1984 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1985 return (bnx2_setup_serdes_phy(bp, port));
1988 return (bnx2_setup_copper_phy(bp));
1993 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
1997 bp->mii_bmcr = MII_BMCR + 0x10;
1998 bp->mii_bmsr = MII_BMSR + 0x10;
1999 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2000 bp->mii_adv = MII_ADVERTISE + 0x10;
2001 bp->mii_lpa = MII_LPA + 0x10;
2002 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2004 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2005 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2007 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2011 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2013 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2014 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2015 val |= MII_BNX2_SD_1000XCTL1_FIBER;
2016 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2018 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2019 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2020 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2021 val |= BCM5708S_UP1_2G5;
2023 val &= ~BCM5708S_UP1_2G5;
2024 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2026 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2027 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2028 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2029 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2031 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2033 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2034 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2035 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2037 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2043 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2050 bp->mii_up1 = BCM5708S_UP1;
2052 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2053 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2054 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2056 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2057 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2058 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2060 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2061 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2062 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2064 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2065 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2066 val |= BCM5708S_UP1_2G5;
2067 bnx2_write_phy(bp, BCM5708S_UP1, val);
2070 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2071 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2072 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2073 /* increase tx signal amplitude */
2074 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2075 BCM5708S_BLK_ADDR_TX_MISC);
2076 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2077 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2078 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2079 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2082 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2083 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2088 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2089 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2090 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2091 BCM5708S_BLK_ADDR_TX_MISC);
2092 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2093 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2094 BCM5708S_BLK_ADDR_DIG);
2101 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2106 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2108 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2109 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2111 if (bp->dev->mtu > 1500) {
2114 /* Set extended packet length bit */
2115 bnx2_write_phy(bp, 0x18, 0x7);
2116 bnx2_read_phy(bp, 0x18, &val);
2117 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2119 bnx2_write_phy(bp, 0x1c, 0x6c00);
2120 bnx2_read_phy(bp, 0x1c, &val);
2121 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2126 bnx2_write_phy(bp, 0x18, 0x7);
2127 bnx2_read_phy(bp, 0x18, &val);
2128 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2130 bnx2_write_phy(bp, 0x1c, 0x6c00);
2131 bnx2_read_phy(bp, 0x1c, &val);
2132 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2139 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2146 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2147 bnx2_write_phy(bp, 0x18, 0x0c00);
2148 bnx2_write_phy(bp, 0x17, 0x000a);
2149 bnx2_write_phy(bp, 0x15, 0x310b);
2150 bnx2_write_phy(bp, 0x17, 0x201f);
2151 bnx2_write_phy(bp, 0x15, 0x9506);
2152 bnx2_write_phy(bp, 0x17, 0x401f);
2153 bnx2_write_phy(bp, 0x15, 0x14e2);
2154 bnx2_write_phy(bp, 0x18, 0x0400);
2157 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2158 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2159 MII_BNX2_DSP_EXPAND_REG | 0x8);
2160 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2162 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2165 if (bp->dev->mtu > 1500) {
2166 /* Set extended packet length bit */
2167 bnx2_write_phy(bp, 0x18, 0x7);
2168 bnx2_read_phy(bp, 0x18, &val);
2169 bnx2_write_phy(bp, 0x18, val | 0x4000);
2171 bnx2_read_phy(bp, 0x10, &val);
2172 bnx2_write_phy(bp, 0x10, val | 0x1);
2175 bnx2_write_phy(bp, 0x18, 0x7);
2176 bnx2_read_phy(bp, 0x18, &val);
2177 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2179 bnx2_read_phy(bp, 0x10, &val);
2180 bnx2_write_phy(bp, 0x10, val & ~0x1);
2183 /* ethernet@wirespeed */
2184 bnx2_write_phy(bp, 0x18, 0x7007);
2185 bnx2_read_phy(bp, 0x18, &val);
2186 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2192 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2193 __releases(&bp->phy_lock)
2194 __acquires(&bp->phy_lock)
2199 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2200 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2202 bp->mii_bmcr = MII_BMCR;
2203 bp->mii_bmsr = MII_BMSR;
2204 bp->mii_bmsr1 = MII_BMSR;
2205 bp->mii_adv = MII_ADVERTISE;
2206 bp->mii_lpa = MII_LPA;
2208 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2210 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2213 bnx2_read_phy(bp, MII_PHYSID1, &val);
2214 bp->phy_id = val << 16;
2215 bnx2_read_phy(bp, MII_PHYSID2, &val);
2216 bp->phy_id |= val & 0xffff;
2218 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2219 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2220 rc = bnx2_init_5706s_phy(bp, reset_phy);
2221 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2222 rc = bnx2_init_5708s_phy(bp, reset_phy);
2223 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2224 rc = bnx2_init_5709s_phy(bp, reset_phy);
2227 rc = bnx2_init_copper_phy(bp, reset_phy);
2232 rc = bnx2_setup_phy(bp, bp->phy_port);
2238 bnx2_set_mac_loopback(struct bnx2 *bp)
2242 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2243 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2244 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2245 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2250 static int bnx2_test_link(struct bnx2 *);
2253 bnx2_set_phy_loopback(struct bnx2 *bp)
2258 spin_lock_bh(&bp->phy_lock);
2259 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2261 spin_unlock_bh(&bp->phy_lock);
2265 for (i = 0; i < 10; i++) {
2266 if (bnx2_test_link(bp) == 0)
2271 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2272 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2273 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2274 BNX2_EMAC_MODE_25G_MODE);
2276 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2277 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2283 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2289 msg_data |= bp->fw_wr_seq;
2291 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2296 /* wait for an acknowledgement. */
2297 for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2300 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2302 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2305 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2308 /* If we timed out, inform the firmware that this is the case. */
2309 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2311 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2314 msg_data &= ~BNX2_DRV_MSG_CODE;
2315 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2317 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2322 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2329 bnx2_init_5709_context(struct bnx2 *bp)
2334 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2335 val |= (BCM_PAGE_BITS - 8) << 16;
2336 REG_WR(bp, BNX2_CTX_COMMAND, val);
2337 for (i = 0; i < 10; i++) {
2338 val = REG_RD(bp, BNX2_CTX_COMMAND);
2339 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2343 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2346 for (i = 0; i < bp->ctx_pages; i++) {
2350 memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2354 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2355 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2356 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2357 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2358 (u64) bp->ctx_blk_mapping[i] >> 32);
2359 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2360 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2361 for (j = 0; j < 10; j++) {
2363 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2364 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2368 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2377 bnx2_init_context(struct bnx2 *bp)
2383 u32 vcid_addr, pcid_addr, offset;
2388 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2391 vcid_addr = GET_PCID_ADDR(vcid);
2393 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2398 pcid_addr = GET_PCID_ADDR(new_vcid);
2401 vcid_addr = GET_CID_ADDR(vcid);
2402 pcid_addr = vcid_addr;
2405 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2406 vcid_addr += (i << PHY_CTX_SHIFT);
2407 pcid_addr += (i << PHY_CTX_SHIFT);
2409 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2410 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2412 /* Zero out the context. */
2413 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2414 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2420 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2426 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2427 if (good_mbuf == NULL) {
2428 printk(KERN_ERR PFX "Failed to allocate memory in "
2429 "bnx2_alloc_bad_rbuf\n");
2433 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2434 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2438 /* Allocate a bunch of mbufs and save the good ones in an array. */
2439 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2440 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2441 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2442 BNX2_RBUF_COMMAND_ALLOC_REQ);
2444 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2446 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2448 /* The addresses with Bit 9 set are bad memory blocks. */
2449 if (!(val & (1 << 9))) {
2450 good_mbuf[good_mbuf_cnt] = (u16) val;
2454 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2457 /* Free the good ones back to the mbuf pool thus discarding
2458 * all the bad ones. */
2459 while (good_mbuf_cnt) {
2462 val = good_mbuf[good_mbuf_cnt];
2463 val = (val << 9) | val | 1;
2465 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2472 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2476 val = (mac_addr[0] << 8) | mac_addr[1];
2478 REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2480 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2481 (mac_addr[4] << 8) | mac_addr[5];
2483 REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2487 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2490 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2491 struct rx_bd *rxbd =
2492 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2493 struct page *page = alloc_page(GFP_ATOMIC);
2497 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2498 PCI_DMA_FROMDEVICE);
2499 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2505 pci_unmap_addr_set(rx_pg, mapping, mapping);
2506 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2507 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2512 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2514 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2515 struct page *page = rx_pg->page;
2520 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2521 PCI_DMA_FROMDEVICE);
2528 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2530 struct sk_buff *skb;
2531 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2533 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2534 unsigned long align;
2536 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2541 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2542 skb_reserve(skb, BNX2_RX_ALIGN - align);
2544 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2545 PCI_DMA_FROMDEVICE);
2546 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2552 pci_unmap_addr_set(rx_buf, mapping, mapping);
2554 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2555 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2557 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2563 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2565 struct status_block *sblk = bnapi->status_blk.msi;
2566 u32 new_link_state, old_link_state;
2569 new_link_state = sblk->status_attn_bits & event;
2570 old_link_state = sblk->status_attn_bits_ack & event;
2571 if (new_link_state != old_link_state) {
2573 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2575 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2583 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2585 spin_lock(&bp->phy_lock);
2587 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2589 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2590 bnx2_set_remote_link(bp);
2592 spin_unlock(&bp->phy_lock);
2597 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2601 /* Tell compiler that status block fields can change. */
2603 cons = *bnapi->hw_tx_cons_ptr;
2605 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2611 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2613 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2614 u16 hw_cons, sw_cons, sw_ring_cons;
2615 int tx_pkt = 0, index;
2616 struct netdev_queue *txq;
2618 index = (bnapi - bp->bnx2_napi);
2619 txq = netdev_get_tx_queue(bp->dev, index);
2621 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2622 sw_cons = txr->tx_cons;
2624 while (sw_cons != hw_cons) {
2625 struct sw_tx_bd *tx_buf;
2626 struct sk_buff *skb;
2629 sw_ring_cons = TX_RING_IDX(sw_cons);
2631 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2634 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2635 prefetch(&skb->end);
2637 /* partial BD completions possible with TSO packets */
2638 if (tx_buf->is_gso) {
2639 u16 last_idx, last_ring_idx;
2641 last_idx = sw_cons + tx_buf->nr_frags + 1;
2642 last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2643 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2646 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2651 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
2654 last = tx_buf->nr_frags;
2656 for (i = 0; i < last; i++) {
2657 sw_cons = NEXT_TX_BD(sw_cons);
2660 sw_cons = NEXT_TX_BD(sw_cons);
2664 if (tx_pkt == budget)
2667 if (hw_cons == sw_cons)
2668 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2671 txr->hw_tx_cons = hw_cons;
2672 txr->tx_cons = sw_cons;
2674 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2675 * before checking for netif_tx_queue_stopped(). Without the
2676 * memory barrier, there is a small possibility that bnx2_start_xmit()
2677 * will miss it and cause the queue to be stopped forever.
2681 if (unlikely(netif_tx_queue_stopped(txq)) &&
2682 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2683 __netif_tx_lock(txq, smp_processor_id());
2684 if ((netif_tx_queue_stopped(txq)) &&
2685 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2686 netif_tx_wake_queue(txq);
2687 __netif_tx_unlock(txq);
2694 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2695 struct sk_buff *skb, int count)
2697 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2698 struct rx_bd *cons_bd, *prod_bd;
2701 u16 cons = rxr->rx_pg_cons;
2703 cons_rx_pg = &rxr->rx_pg_ring[cons];
2705 /* The caller was unable to allocate a new page to replace the
2706 * last one in the frags array, so we need to recycle that page
2707 * and then free the skb.
2711 struct skb_shared_info *shinfo;
2713 shinfo = skb_shinfo(skb);
2715 page = shinfo->frags[shinfo->nr_frags].page;
2716 shinfo->frags[shinfo->nr_frags].page = NULL;
2718 cons_rx_pg->page = page;
2722 hw_prod = rxr->rx_pg_prod;
2724 for (i = 0; i < count; i++) {
2725 prod = RX_PG_RING_IDX(hw_prod);
2727 prod_rx_pg = &rxr->rx_pg_ring[prod];
2728 cons_rx_pg = &rxr->rx_pg_ring[cons];
2729 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2730 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2733 prod_rx_pg->page = cons_rx_pg->page;
2734 cons_rx_pg->page = NULL;
2735 pci_unmap_addr_set(prod_rx_pg, mapping,
2736 pci_unmap_addr(cons_rx_pg, mapping));
2738 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2739 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2742 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2743 hw_prod = NEXT_RX_BD(hw_prod);
2745 rxr->rx_pg_prod = hw_prod;
2746 rxr->rx_pg_cons = cons;
2750 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2751 struct sk_buff *skb, u16 cons, u16 prod)
2753 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2754 struct rx_bd *cons_bd, *prod_bd;
2756 cons_rx_buf = &rxr->rx_buf_ring[cons];
2757 prod_rx_buf = &rxr->rx_buf_ring[prod];
2759 pci_dma_sync_single_for_device(bp->pdev,
2760 pci_unmap_addr(cons_rx_buf, mapping),
2761 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2763 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2765 prod_rx_buf->skb = skb;
2770 pci_unmap_addr_set(prod_rx_buf, mapping,
2771 pci_unmap_addr(cons_rx_buf, mapping));
2773 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2774 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2775 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2776 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2780 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2781 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2785 u16 prod = ring_idx & 0xffff;
2787 err = bnx2_alloc_rx_skb(bp, rxr, prod);
2788 if (unlikely(err)) {
2789 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2791 unsigned int raw_len = len + 4;
2792 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2794 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2799 skb_reserve(skb, BNX2_RX_OFFSET);
2800 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2801 PCI_DMA_FROMDEVICE);
2807 unsigned int i, frag_len, frag_size, pages;
2808 struct sw_pg *rx_pg;
2809 u16 pg_cons = rxr->rx_pg_cons;
2810 u16 pg_prod = rxr->rx_pg_prod;
2812 frag_size = len + 4 - hdr_len;
2813 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2814 skb_put(skb, hdr_len);
2816 for (i = 0; i < pages; i++) {
2817 dma_addr_t mapping_old;
2819 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2820 if (unlikely(frag_len <= 4)) {
2821 unsigned int tail = 4 - frag_len;
2823 rxr->rx_pg_cons = pg_cons;
2824 rxr->rx_pg_prod = pg_prod;
2825 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
2832 &skb_shinfo(skb)->frags[i - 1];
2834 skb->data_len -= tail;
2835 skb->truesize -= tail;
2839 rx_pg = &rxr->rx_pg_ring[pg_cons];
2841 /* Don't unmap yet. If we're unable to allocate a new
2842 * page, we need to recycle the page and the DMA addr.
2844 mapping_old = pci_unmap_addr(rx_pg, mapping);
2848 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2851 err = bnx2_alloc_rx_page(bp, rxr,
2852 RX_PG_RING_IDX(pg_prod));
2853 if (unlikely(err)) {
2854 rxr->rx_pg_cons = pg_cons;
2855 rxr->rx_pg_prod = pg_prod;
2856 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
2861 pci_unmap_page(bp->pdev, mapping_old,
2862 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2864 frag_size -= frag_len;
2865 skb->data_len += frag_len;
2866 skb->truesize += frag_len;
2867 skb->len += frag_len;
2869 pg_prod = NEXT_RX_BD(pg_prod);
2870 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2872 rxr->rx_pg_prod = pg_prod;
2873 rxr->rx_pg_cons = pg_cons;
2879 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2883 /* Tell compiler that status block fields can change. */
2885 cons = *bnapi->hw_rx_cons_ptr;
2887 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2893 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2895 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
2896 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2897 struct l2_fhdr *rx_hdr;
2898 int rx_pkt = 0, pg_ring_used = 0;
2900 hw_cons = bnx2_get_hw_rx_cons(bnapi);
2901 sw_cons = rxr->rx_cons;
2902 sw_prod = rxr->rx_prod;
2904 /* Memory barrier necessary as speculative reads of the rx
2905 * buffer can be ahead of the index in the status block
2908 while (sw_cons != hw_cons) {
2909 unsigned int len, hdr_len;
2911 struct sw_bd *rx_buf;
2912 struct sk_buff *skb;
2913 dma_addr_t dma_addr;
2915 int hw_vlan __maybe_unused = 0;
2917 sw_ring_cons = RX_RING_IDX(sw_cons);
2918 sw_ring_prod = RX_RING_IDX(sw_prod);
2920 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
2925 dma_addr = pci_unmap_addr(rx_buf, mapping);
2927 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2928 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
2929 PCI_DMA_FROMDEVICE);
2931 rx_hdr = (struct l2_fhdr *) skb->data;
2932 len = rx_hdr->l2_fhdr_pkt_len;
2933 status = rx_hdr->l2_fhdr_status;
2936 if (status & L2_FHDR_STATUS_SPLIT) {
2937 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2939 } else if (len > bp->rx_jumbo_thresh) {
2940 hdr_len = bp->rx_jumbo_thresh;
2944 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
2945 L2_FHDR_ERRORS_PHY_DECODE |
2946 L2_FHDR_ERRORS_ALIGNMENT |
2947 L2_FHDR_ERRORS_TOO_SHORT |
2948 L2_FHDR_ERRORS_GIANT_FRAME))) {
2950 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2955 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
2957 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2964 if (len <= bp->rx_copy_thresh) {
2965 struct sk_buff *new_skb;
2967 new_skb = netdev_alloc_skb(bp->dev, len + 6);
2968 if (new_skb == NULL) {
2969 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2975 skb_copy_from_linear_data_offset(skb,
2977 new_skb->data, len + 6);
2978 skb_reserve(new_skb, 6);
2979 skb_put(new_skb, len);
2981 bnx2_reuse_rx_skb(bp, rxr, skb,
2982 sw_ring_cons, sw_ring_prod);
2985 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
2986 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2989 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
2990 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
2991 vtag = rx_hdr->l2_fhdr_vlan_tag;
2998 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
3001 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3002 ve->h_vlan_proto = htons(ETH_P_8021Q);
3003 ve->h_vlan_TCI = htons(vtag);
3008 skb->protocol = eth_type_trans(skb, bp->dev);
3010 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3011 (ntohs(skb->protocol) != 0x8100)) {
3018 skb->ip_summed = CHECKSUM_NONE;
3020 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3021 L2_FHDR_STATUS_UDP_DATAGRAM))) {
3023 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3024 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3025 skb->ip_summed = CHECKSUM_UNNECESSARY;
3028 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3032 vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
3035 netif_receive_skb(skb);
3040 sw_cons = NEXT_RX_BD(sw_cons);
3041 sw_prod = NEXT_RX_BD(sw_prod);
3043 if ((rx_pkt == budget))
3046 /* Refresh hw_cons to see if there is new work */
3047 if (sw_cons == hw_cons) {
3048 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3052 rxr->rx_cons = sw_cons;
3053 rxr->rx_prod = sw_prod;
3056 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3058 REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3060 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3068 /* MSI ISR - The only difference between this and the INTx ISR
3069 * is that the MSI interrupt is always serviced.
3072 bnx2_msi(int irq, void *dev_instance)
3074 struct bnx2_napi *bnapi = dev_instance;
3075 struct bnx2 *bp = bnapi->bp;
3077 prefetch(bnapi->status_blk.msi);
3078 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3079 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3080 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3082 /* Return here if interrupt is disabled. */
3083 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3086 napi_schedule(&bnapi->napi);
3092 bnx2_msi_1shot(int irq, void *dev_instance)
3094 struct bnx2_napi *bnapi = dev_instance;
3095 struct bnx2 *bp = bnapi->bp;
3097 prefetch(bnapi->status_blk.msi);
3099 /* Return here if interrupt is disabled. */
3100 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3103 napi_schedule(&bnapi->napi);
3109 bnx2_interrupt(int irq, void *dev_instance)
3111 struct bnx2_napi *bnapi = dev_instance;
3112 struct bnx2 *bp = bnapi->bp;
3113 struct status_block *sblk = bnapi->status_blk.msi;
3115 /* When using INTx, it is possible for the interrupt to arrive
3116 * at the CPU before the status block posted prior to the
3117 * interrupt. Reading a register will flush the status block.
3118 * When using MSI, the MSI message will always complete after
3119 * the status block write.
3121 if ((sblk->status_idx == bnapi->last_status_idx) &&
3122 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3123 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3126 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3127 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3128 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3130 /* Read back to deassert IRQ immediately to avoid too many
3131 * spurious interrupts.
3133 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3135 /* Return here if interrupt is shared and is disabled. */
3136 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3139 if (napi_schedule_prep(&bnapi->napi)) {
3140 bnapi->last_status_idx = sblk->status_idx;
3141 __napi_schedule(&bnapi->napi);
3148 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3150 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3151 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3153 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3154 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3159 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3160 STATUS_ATTN_BITS_TIMER_ABORT)
3163 bnx2_has_work(struct bnx2_napi *bnapi)
3165 struct status_block *sblk = bnapi->status_blk.msi;
3167 if (bnx2_has_fast_work(bnapi))
3170 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3171 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3178 bnx2_chk_missed_msi(struct bnx2 *bp)
3180 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3183 if (bnx2_has_work(bnapi)) {
3184 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3185 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3188 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3189 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3190 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3191 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3192 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3196 bp->idle_chk_status_idx = bnapi->last_status_idx;
3199 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3201 struct status_block *sblk = bnapi->status_blk.msi;
3202 u32 status_attn_bits = sblk->status_attn_bits;
3203 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3205 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3206 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3208 bnx2_phy_int(bp, bnapi);
3210 /* This is needed to take care of transient status
3211 * during link changes.
3213 REG_WR(bp, BNX2_HC_COMMAND,
3214 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3215 REG_RD(bp, BNX2_HC_COMMAND);
3219 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3220 int work_done, int budget)
3222 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3223 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3225 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3226 bnx2_tx_int(bp, bnapi, 0);
3228 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3229 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3234 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3236 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3237 struct bnx2 *bp = bnapi->bp;
3239 struct status_block_msix *sblk = bnapi->status_blk.msix;
3242 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3243 if (unlikely(work_done >= budget))
3246 bnapi->last_status_idx = sblk->status_idx;
3247 /* status idx must be read before checking for more work. */
3249 if (likely(!bnx2_has_fast_work(bnapi))) {
3251 napi_complete(napi);
3252 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3253 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3254 bnapi->last_status_idx);
3261 static int bnx2_poll(struct napi_struct *napi, int budget)
3263 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3264 struct bnx2 *bp = bnapi->bp;
3266 struct status_block *sblk = bnapi->status_blk.msi;
3269 bnx2_poll_link(bp, bnapi);
3271 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3273 /* bnapi->last_status_idx is used below to tell the hw how
3274 * much work has been processed, so we must read it before
3275 * checking for more work.
3277 bnapi->last_status_idx = sblk->status_idx;
3279 if (unlikely(work_done >= budget))
3283 if (likely(!bnx2_has_work(bnapi))) {
3284 napi_complete(napi);
3285 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3286 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3287 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3288 bnapi->last_status_idx);
3291 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3292 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3293 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3294 bnapi->last_status_idx);
3296 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3297 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3298 bnapi->last_status_idx);
3306 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3307 * from set_multicast.
3310 bnx2_set_rx_mode(struct net_device *dev)
3312 struct bnx2 *bp = netdev_priv(dev);
3313 u32 rx_mode, sort_mode;
3314 struct netdev_hw_addr *ha;
3317 if (!netif_running(dev))
3320 spin_lock_bh(&bp->phy_lock);
3322 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3323 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3324 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3326 if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3327 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3329 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3330 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3332 if (dev->flags & IFF_PROMISC) {
3333 /* Promiscuous mode. */
3334 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3335 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3336 BNX2_RPM_SORT_USER0_PROM_VLAN;
3338 else if (dev->flags & IFF_ALLMULTI) {
3339 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3340 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3343 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3346 /* Accept one or more multicast(s). */
3347 struct dev_mc_list *mclist;
3348 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3353 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3355 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3356 i++, mclist = mclist->next) {
3358 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3360 regidx = (bit & 0xe0) >> 5;
3362 mc_filter[regidx] |= (1 << bit);
3365 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3366 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3370 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3373 if (dev->uc_count > BNX2_MAX_UNICAST_ADDRESSES) {
3374 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3375 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3376 BNX2_RPM_SORT_USER0_PROM_VLAN;
3377 } else if (!(dev->flags & IFF_PROMISC)) {
3378 /* Add all entries into to the match filter list */
3380 list_for_each_entry(ha, &dev->uc_list, list) {
3381 bnx2_set_mac_addr(bp, ha->addr,
3382 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3384 (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3390 if (rx_mode != bp->rx_mode) {
3391 bp->rx_mode = rx_mode;
3392 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3395 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3396 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3397 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3399 spin_unlock_bh(&bp->phy_lock);
3402 static int __devinit
3403 check_fw_section(const struct firmware *fw,
3404 const struct bnx2_fw_file_section *section,
3405 u32 alignment, bool non_empty)
3407 u32 offset = be32_to_cpu(section->offset);
3408 u32 len = be32_to_cpu(section->len);
3410 if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3412 if ((non_empty && len == 0) || len > fw->size - offset ||
3413 len & (alignment - 1))
3418 static int __devinit
3419 check_mips_fw_entry(const struct firmware *fw,
3420 const struct bnx2_mips_fw_file_entry *entry)
3422 if (check_fw_section(fw, &entry->text, 4, true) ||
3423 check_fw_section(fw, &entry->data, 4, false) ||
3424 check_fw_section(fw, &entry->rodata, 4, false))
3429 static int __devinit
3430 bnx2_request_firmware(struct bnx2 *bp)
3432 const char *mips_fw_file, *rv2p_fw_file;
3433 const struct bnx2_mips_fw_file *mips_fw;
3434 const struct bnx2_rv2p_fw_file *rv2p_fw;
3437 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3438 mips_fw_file = FW_MIPS_FILE_09;
3439 rv2p_fw_file = FW_RV2P_FILE_09;
3441 mips_fw_file = FW_MIPS_FILE_06;
3442 rv2p_fw_file = FW_RV2P_FILE_06;
3445 rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3447 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3452 rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3454 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3458 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3459 rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3460 if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3461 check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3462 check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3463 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3464 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3465 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3466 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3470 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3471 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3472 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3473 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3482 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3485 case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3486 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3487 rv2p_code |= RV2P_BD_PAGE_SIZE;
3494 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3495 const struct bnx2_rv2p_fw_file_entry *fw_entry)
3497 u32 rv2p_code_len, file_offset;
3502 rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3503 file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3505 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3507 if (rv2p_proc == RV2P_PROC1) {
3508 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3509 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3511 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3512 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3515 for (i = 0; i < rv2p_code_len; i += 8) {
3516 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3518 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3521 val = (i / 8) | cmd;
3522 REG_WR(bp, addr, val);
3525 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3526 for (i = 0; i < 8; i++) {
3529 loc = be32_to_cpu(fw_entry->fixup[i]);
3530 if (loc && ((loc * 4) < rv2p_code_len)) {
3531 code = be32_to_cpu(*(rv2p_code + loc - 1));
3532 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3533 code = be32_to_cpu(*(rv2p_code + loc));
3534 code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3535 REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3537 val = (loc / 2) | cmd;
3538 REG_WR(bp, addr, val);
3542 /* Reset the processor, un-stall is done later. */
3543 if (rv2p_proc == RV2P_PROC1) {
3544 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3547 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3554 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3555 const struct bnx2_mips_fw_file_entry *fw_entry)
3557 u32 addr, len, file_offset;
3563 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3564 val |= cpu_reg->mode_value_halt;
3565 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3566 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3568 /* Load the Text area. */
3569 addr = be32_to_cpu(fw_entry->text.addr);
3570 len = be32_to_cpu(fw_entry->text.len);
3571 file_offset = be32_to_cpu(fw_entry->text.offset);
3572 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3574 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3578 for (j = 0; j < (len / 4); j++, offset += 4)
3579 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3582 /* Load the Data area. */
3583 addr = be32_to_cpu(fw_entry->data.addr);
3584 len = be32_to_cpu(fw_entry->data.len);
3585 file_offset = be32_to_cpu(fw_entry->data.offset);
3586 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3588 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3592 for (j = 0; j < (len / 4); j++, offset += 4)
3593 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3596 /* Load the Read-Only area. */
3597 addr = be32_to_cpu(fw_entry->rodata.addr);
3598 len = be32_to_cpu(fw_entry->rodata.len);
3599 file_offset = be32_to_cpu(fw_entry->rodata.offset);
3600 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3602 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3606 for (j = 0; j < (len / 4); j++, offset += 4)
3607 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3610 /* Clear the pre-fetch instruction. */
3611 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3613 val = be32_to_cpu(fw_entry->start_addr);
3614 bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3616 /* Start the CPU. */
3617 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3618 val &= ~cpu_reg->mode_value_halt;
3619 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3620 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3626 bnx2_init_cpus(struct bnx2 *bp)
3628 const struct bnx2_mips_fw_file *mips_fw =
3629 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3630 const struct bnx2_rv2p_fw_file *rv2p_fw =
3631 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3634 /* Initialize the RV2P processor. */
3635 load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3636 load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3638 /* Initialize the RX Processor. */
3639 rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3643 /* Initialize the TX Processor. */
3644 rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3648 /* Initialize the TX Patch-up Processor. */
3649 rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3653 /* Initialize the Completion Processor. */
3654 rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3658 /* Initialize the Command Processor. */
3659 rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3666 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3670 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3676 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3677 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3678 PCI_PM_CTRL_PME_STATUS);
3680 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3681 /* delay required during transition out of D3hot */
3684 val = REG_RD(bp, BNX2_EMAC_MODE);
3685 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3686 val &= ~BNX2_EMAC_MODE_MPKT;
3687 REG_WR(bp, BNX2_EMAC_MODE, val);
3689 val = REG_RD(bp, BNX2_RPM_CONFIG);
3690 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3691 REG_WR(bp, BNX2_RPM_CONFIG, val);
3702 autoneg = bp->autoneg;
3703 advertising = bp->advertising;
3705 if (bp->phy_port == PORT_TP) {
3706 bp->autoneg = AUTONEG_SPEED;
3707 bp->advertising = ADVERTISED_10baseT_Half |
3708 ADVERTISED_10baseT_Full |
3709 ADVERTISED_100baseT_Half |
3710 ADVERTISED_100baseT_Full |
3714 spin_lock_bh(&bp->phy_lock);
3715 bnx2_setup_phy(bp, bp->phy_port);
3716 spin_unlock_bh(&bp->phy_lock);
3718 bp->autoneg = autoneg;
3719 bp->advertising = advertising;
3721 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3723 val = REG_RD(bp, BNX2_EMAC_MODE);
3725 /* Enable port mode. */
3726 val &= ~BNX2_EMAC_MODE_PORT;
3727 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3728 BNX2_EMAC_MODE_ACPI_RCVD |
3729 BNX2_EMAC_MODE_MPKT;
3730 if (bp->phy_port == PORT_TP)
3731 val |= BNX2_EMAC_MODE_PORT_MII;
3733 val |= BNX2_EMAC_MODE_PORT_GMII;
3734 if (bp->line_speed == SPEED_2500)
3735 val |= BNX2_EMAC_MODE_25G_MODE;
3738 REG_WR(bp, BNX2_EMAC_MODE, val);
3740 /* receive all multicast */
3741 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3742 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3745 REG_WR(bp, BNX2_EMAC_RX_MODE,
3746 BNX2_EMAC_RX_MODE_SORT_MODE);
3748 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3749 BNX2_RPM_SORT_USER0_MC_EN;
3750 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3751 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3752 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3753 BNX2_RPM_SORT_USER0_ENA);
3755 /* Need to enable EMAC and RPM for WOL. */
3756 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3757 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3758 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3759 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3761 val = REG_RD(bp, BNX2_RPM_CONFIG);
3762 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3763 REG_WR(bp, BNX2_RPM_CONFIG, val);
3765 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3768 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3771 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3772 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3775 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3776 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3777 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3786 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3788 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3791 /* No more memory access after this point until
3792 * device is brought back to D0.
3804 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3809 /* Request access to the flash interface. */
3810 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3811 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3812 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3813 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3819 if (j >= NVRAM_TIMEOUT_COUNT)
3826 bnx2_release_nvram_lock(struct bnx2 *bp)
3831 /* Relinquish nvram interface. */
3832 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3834 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3835 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3836 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3842 if (j >= NVRAM_TIMEOUT_COUNT)
3850 bnx2_enable_nvram_write(struct bnx2 *bp)
3854 val = REG_RD(bp, BNX2_MISC_CFG);
3855 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3857 if (bp->flash_info->flags & BNX2_NV_WREN) {
3860 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3861 REG_WR(bp, BNX2_NVM_COMMAND,
3862 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3864 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3867 val = REG_RD(bp, BNX2_NVM_COMMAND);
3868 if (val & BNX2_NVM_COMMAND_DONE)
3872 if (j >= NVRAM_TIMEOUT_COUNT)
3879 bnx2_disable_nvram_write(struct bnx2 *bp)
3883 val = REG_RD(bp, BNX2_MISC_CFG);
3884 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3889 bnx2_enable_nvram_access(struct bnx2 *bp)
3893 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3894 /* Enable both bits, even on read. */
3895 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3896 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3900 bnx2_disable_nvram_access(struct bnx2 *bp)
3904 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3905 /* Disable both bits, even after read. */
3906 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3907 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3908 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3912 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3917 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3918 /* Buffered flash, no erase needed */
3921 /* Build an erase command */
3922 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3923 BNX2_NVM_COMMAND_DOIT;
3925 /* Need to clear DONE bit separately. */
3926 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3928 /* Address of the NVRAM to read from. */
3929 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3931 /* Issue an erase command. */
3932 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3934 /* Wait for completion. */
3935 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3940 val = REG_RD(bp, BNX2_NVM_COMMAND);
3941 if (val & BNX2_NVM_COMMAND_DONE)
3945 if (j >= NVRAM_TIMEOUT_COUNT)
3952 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3957 /* Build the command word. */
3958 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3960 /* Calculate an offset of a buffered flash, not needed for 5709. */
3961 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3962 offset = ((offset / bp->flash_info->page_size) <<
3963 bp->flash_info->page_bits) +
3964 (offset % bp->flash_info->page_size);
3967 /* Need to clear DONE bit separately. */
3968 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3970 /* Address of the NVRAM to read from. */
3971 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3973 /* Issue a read command. */
3974 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3976 /* Wait for completion. */
3977 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3982 val = REG_RD(bp, BNX2_NVM_COMMAND);
3983 if (val & BNX2_NVM_COMMAND_DONE) {
3984 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
3985 memcpy(ret_val, &v, 4);
3989 if (j >= NVRAM_TIMEOUT_COUNT)
3997 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4003 /* Build the command word. */
4004 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4006 /* Calculate an offset of a buffered flash, not needed for 5709. */
4007 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4008 offset = ((offset / bp->flash_info->page_size) <<
4009 bp->flash_info->page_bits) +
4010 (offset % bp->flash_info->page_size);
4013 /* Need to clear DONE bit separately. */
4014 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4016 memcpy(&val32, val, 4);
4018 /* Write the data. */
4019 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4021 /* Address of the NVRAM to write to. */
4022 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4024 /* Issue the write command. */
4025 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4027 /* Wait for completion. */
4028 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4031 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4034 if (j >= NVRAM_TIMEOUT_COUNT)
4041 bnx2_init_nvram(struct bnx2 *bp)
4044 int j, entry_count, rc = 0;
4045 struct flash_spec *flash;
4047 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4048 bp->flash_info = &flash_5709;
4049 goto get_flash_size;
4052 /* Determine the selected interface. */
4053 val = REG_RD(bp, BNX2_NVM_CFG1);
4055 entry_count = ARRAY_SIZE(flash_table);
4057 if (val & 0x40000000) {
4059 /* Flash interface has been reconfigured */
4060 for (j = 0, flash = &flash_table[0]; j < entry_count;
4062 if ((val & FLASH_BACKUP_STRAP_MASK) ==
4063 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4064 bp->flash_info = flash;
4071 /* Not yet been reconfigured */
4073 if (val & (1 << 23))
4074 mask = FLASH_BACKUP_STRAP_MASK;
4076 mask = FLASH_STRAP_MASK;
4078 for (j = 0, flash = &flash_table[0]; j < entry_count;
4081 if ((val & mask) == (flash->strapping & mask)) {
4082 bp->flash_info = flash;
4084 /* Request access to the flash interface. */
4085 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4088 /* Enable access to flash interface */
4089 bnx2_enable_nvram_access(bp);
4091 /* Reconfigure the flash interface */
4092 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4093 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4094 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4095 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4097 /* Disable access to flash interface */
4098 bnx2_disable_nvram_access(bp);
4099 bnx2_release_nvram_lock(bp);
4104 } /* if (val & 0x40000000) */
4106 if (j == entry_count) {
4107 bp->flash_info = NULL;
4108 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
4113 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4114 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4116 bp->flash_size = val;
4118 bp->flash_size = bp->flash_info->total_size;
4124 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4128 u32 cmd_flags, offset32, len32, extra;
4133 /* Request access to the flash interface. */
4134 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4137 /* Enable access to flash interface */
4138 bnx2_enable_nvram_access(bp);
4151 pre_len = 4 - (offset & 3);
4153 if (pre_len >= len32) {
4155 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4156 BNX2_NVM_COMMAND_LAST;
4159 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4162 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4167 memcpy(ret_buf, buf + (offset & 3), pre_len);
4174 extra = 4 - (len32 & 3);
4175 len32 = (len32 + 4) & ~3;
4182 cmd_flags = BNX2_NVM_COMMAND_LAST;
4184 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4185 BNX2_NVM_COMMAND_LAST;
4187 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4189 memcpy(ret_buf, buf, 4 - extra);
4191 else if (len32 > 0) {
4194 /* Read the first word. */
4198 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4200 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4202 /* Advance to the next dword. */
4207 while (len32 > 4 && rc == 0) {
4208 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4210 /* Advance to the next dword. */
4219 cmd_flags = BNX2_NVM_COMMAND_LAST;
4220 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4222 memcpy(ret_buf, buf, 4 - extra);
4225 /* Disable access to flash interface */
4226 bnx2_disable_nvram_access(bp);
4228 bnx2_release_nvram_lock(bp);
4234 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4237 u32 written, offset32, len32;
4238 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4240 int align_start, align_end;
4245 align_start = align_end = 0;
4247 if ((align_start = (offset32 & 3))) {
4249 len32 += align_start;
4252 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4257 align_end = 4 - (len32 & 3);
4259 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4263 if (align_start || align_end) {
4264 align_buf = kmalloc(len32, GFP_KERNEL);
4265 if (align_buf == NULL)
4268 memcpy(align_buf, start, 4);
4271 memcpy(align_buf + len32 - 4, end, 4);
4273 memcpy(align_buf + align_start, data_buf, buf_size);
4277 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4278 flash_buffer = kmalloc(264, GFP_KERNEL);
4279 if (flash_buffer == NULL) {
4281 goto nvram_write_end;
4286 while ((written < len32) && (rc == 0)) {
4287 u32 page_start, page_end, data_start, data_end;
4288 u32 addr, cmd_flags;
4291 /* Find the page_start addr */
4292 page_start = offset32 + written;
4293 page_start -= (page_start % bp->flash_info->page_size);
4294 /* Find the page_end addr */
4295 page_end = page_start + bp->flash_info->page_size;
4296 /* Find the data_start addr */
4297 data_start = (written == 0) ? offset32 : page_start;
4298 /* Find the data_end addr */
4299 data_end = (page_end > offset32 + len32) ?
4300 (offset32 + len32) : page_end;
4302 /* Request access to the flash interface. */
4303 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4304 goto nvram_write_end;
4306 /* Enable access to flash interface */
4307 bnx2_enable_nvram_access(bp);
4309 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4310 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4313 /* Read the whole page into the buffer
4314 * (non-buffer flash only) */
4315 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4316 if (j == (bp->flash_info->page_size - 4)) {
4317 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4319 rc = bnx2_nvram_read_dword(bp,
4325 goto nvram_write_end;
4331 /* Enable writes to flash interface (unlock write-protect) */
4332 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4333 goto nvram_write_end;
4335 /* Loop to write back the buffer data from page_start to
4338 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4339 /* Erase the page */
4340 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4341 goto nvram_write_end;
4343 /* Re-enable the write again for the actual write */
4344 bnx2_enable_nvram_write(bp);
4346 for (addr = page_start; addr < data_start;
4347 addr += 4, i += 4) {
4349 rc = bnx2_nvram_write_dword(bp, addr,
4350 &flash_buffer[i], cmd_flags);
4353 goto nvram_write_end;
4359 /* Loop to write the new data from data_start to data_end */
4360 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4361 if ((addr == page_end - 4) ||
4362 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4363 (addr == data_end - 4))) {
4365 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4367 rc = bnx2_nvram_write_dword(bp, addr, buf,
4371 goto nvram_write_end;
4377 /* Loop to write back the buffer data from data_end
4379 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4380 for (addr = data_end; addr < page_end;
4381 addr += 4, i += 4) {
4383 if (addr == page_end-4) {
4384 cmd_flags = BNX2_NVM_COMMAND_LAST;
4386 rc = bnx2_nvram_write_dword(bp, addr,
4387 &flash_buffer[i], cmd_flags);
4390 goto nvram_write_end;
4396 /* Disable writes to flash interface (lock write-protect) */
4397 bnx2_disable_nvram_write(bp);
4399 /* Disable access to flash interface */
4400 bnx2_disable_nvram_access(bp);
4401 bnx2_release_nvram_lock(bp);
4403 /* Increment written */
4404 written += data_end - data_start;
4408 kfree(flash_buffer);
4414 bnx2_init_fw_cap(struct bnx2 *bp)
4418 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4419 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4421 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4422 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4424 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4425 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4428 if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4429 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4430 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4433 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4434 (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4437 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4439 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4440 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4441 bp->phy_port = PORT_FIBRE;
4443 bp->phy_port = PORT_TP;
4445 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4446 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4449 if (netif_running(bp->dev) && sig)
4450 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4454 bnx2_setup_msix_tbl(struct bnx2 *bp)
4456 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4458 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4459 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4463 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4469 /* Wait for the current PCI transaction to complete before
4470 * issuing a reset. */
4471 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4472 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4473 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4474 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4475 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4476 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4479 /* Wait for the firmware to tell us it is ok to issue a reset. */
4480 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4482 /* Deposit a driver reset signature so the firmware knows that
4483 * this is a soft reset. */
4484 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4485 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4487 /* Do a dummy read to force the chip to complete all current transaction
4488 * before we issue a reset. */
4489 val = REG_RD(bp, BNX2_MISC_ID);
4491 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4492 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4493 REG_RD(bp, BNX2_MISC_COMMAND);
4496 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4497 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4499 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4502 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4503 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4504 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4507 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4509 /* Reading back any register after chip reset will hang the
4510 * bus on 5706 A0 and A1. The msleep below provides plenty
4511 * of margin for write posting.
4513 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4514 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4517 /* Reset takes approximate 30 usec */
4518 for (i = 0; i < 10; i++) {
4519 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4520 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4521 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4526 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4527 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4528 printk(KERN_ERR PFX "Chip reset did not complete\n");
4533 /* Make sure byte swapping is properly configured. */
4534 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4535 if (val != 0x01020304) {
4536 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4540 /* Wait for the firmware to finish its initialization. */
4541 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4545 spin_lock_bh(&bp->phy_lock);
4546 old_port = bp->phy_port;
4547 bnx2_init_fw_cap(bp);
4548 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4549 old_port != bp->phy_port)
4550 bnx2_set_default_remote_link(bp);
4551 spin_unlock_bh(&bp->phy_lock);
4553 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4554 /* Adjust the voltage regular to two steps lower. The default
4555 * of this register is 0x0000000e. */
4556 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4558 /* Remove bad rbuf memory from the free pool. */
4559 rc = bnx2_alloc_bad_rbuf(bp);
4562 if (bp->flags & BNX2_FLAG_USING_MSIX)
4563 bnx2_setup_msix_tbl(bp);
4569 bnx2_init_chip(struct bnx2 *bp)
4574 /* Make sure the interrupt is not active. */
4575 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4577 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4578 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4580 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4582 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4583 DMA_READ_CHANS << 12 |
4584 DMA_WRITE_CHANS << 16;
4586 val |= (0x2 << 20) | (1 << 11);
4588 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4591 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4592 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4593 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4595 REG_WR(bp, BNX2_DMA_CONFIG, val);
4597 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4598 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4599 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4600 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4603 if (bp->flags & BNX2_FLAG_PCIX) {
4606 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4608 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4609 val16 & ~PCI_X_CMD_ERO);
4612 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4613 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4614 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4615 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4617 /* Initialize context mapping and zero out the quick contexts. The
4618 * context block must have already been enabled. */
4619 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4620 rc = bnx2_init_5709_context(bp);
4624 bnx2_init_context(bp);
4626 if ((rc = bnx2_init_cpus(bp)) != 0)
4629 bnx2_init_nvram(bp);
4631 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4633 val = REG_RD(bp, BNX2_MQ_CONFIG);
4634 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4635 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4636 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4637 val |= BNX2_MQ_CONFIG_HALT_DIS;
4639 REG_WR(bp, BNX2_MQ_CONFIG, val);
4641 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4642 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4643 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4645 val = (BCM_PAGE_BITS - 8) << 24;
4646 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4648 /* Configure page size. */
4649 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4650 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4651 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4652 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4654 val = bp->mac_addr[0] +
4655 (bp->mac_addr[1] << 8) +
4656 (bp->mac_addr[2] << 16) +
4658 (bp->mac_addr[4] << 8) +
4659 (bp->mac_addr[5] << 16);
4660 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4662 /* Program the MTU. Also include 4 bytes for CRC32. */
4664 val = mtu + ETH_HLEN + ETH_FCS_LEN;
4665 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4666 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4667 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4672 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4673 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4674 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4676 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4677 bp->bnx2_napi[i].last_status_idx = 0;
4679 bp->idle_chk_status_idx = 0xffff;
4681 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4683 /* Set up how to generate a link change interrupt. */
4684 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4686 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4687 (u64) bp->status_blk_mapping & 0xffffffff);
4688 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4690 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4691 (u64) bp->stats_blk_mapping & 0xffffffff);
4692 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4693 (u64) bp->stats_blk_mapping >> 32);
4695 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4696 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4698 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4699 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4701 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4702 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4704 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4706 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4708 REG_WR(bp, BNX2_HC_COM_TICKS,
4709 (bp->com_ticks_int << 16) | bp->com_ticks);
4711 REG_WR(bp, BNX2_HC_CMD_TICKS,
4712 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4714 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4715 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4717 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4718 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4720 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4721 val = BNX2_HC_CONFIG_COLLECT_STATS;
4723 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4724 BNX2_HC_CONFIG_COLLECT_STATS;
4727 if (bp->irq_nvecs > 1) {
4728 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4729 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4731 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4734 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4735 val |= BNX2_HC_CONFIG_ONE_SHOT;
4737 REG_WR(bp, BNX2_HC_CONFIG, val);
4739 for (i = 1; i < bp->irq_nvecs; i++) {
4740 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4741 BNX2_HC_SB_CONFIG_1;
4744 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4745 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4746 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4748 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4749 (bp->tx_quick_cons_trip_int << 16) |
4750 bp->tx_quick_cons_trip);
4752 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4753 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4755 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4756 (bp->rx_quick_cons_trip_int << 16) |
4757 bp->rx_quick_cons_trip);
4759 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4760 (bp->rx_ticks_int << 16) | bp->rx_ticks);
4763 /* Clear internal stats counters. */
4764 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4766 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4768 /* Initialize the receive filter. */
4769 bnx2_set_rx_mode(bp->dev);
4771 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4772 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4773 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4774 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4776 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4779 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4780 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4784 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4790 bnx2_clear_ring_states(struct bnx2 *bp)
4792 struct bnx2_napi *bnapi;
4793 struct bnx2_tx_ring_info *txr;
4794 struct bnx2_rx_ring_info *rxr;
4797 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4798 bnapi = &bp->bnx2_napi[i];
4799 txr = &bnapi->tx_ring;
4800 rxr = &bnapi->rx_ring;
4803 txr->hw_tx_cons = 0;
4804 rxr->rx_prod_bseq = 0;
4807 rxr->rx_pg_prod = 0;
4808 rxr->rx_pg_cons = 0;
4813 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
4815 u32 val, offset0, offset1, offset2, offset3;
4816 u32 cid_addr = GET_CID_ADDR(cid);
4818 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4819 offset0 = BNX2_L2CTX_TYPE_XI;
4820 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4821 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4822 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4824 offset0 = BNX2_L2CTX_TYPE;
4825 offset1 = BNX2_L2CTX_CMD_TYPE;
4826 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4827 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4829 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4830 bnx2_ctx_wr(bp, cid_addr, offset0, val);
4832 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4833 bnx2_ctx_wr(bp, cid_addr, offset1, val);
4835 val = (u64) txr->tx_desc_mapping >> 32;
4836 bnx2_ctx_wr(bp, cid_addr, offset2, val);
4838 val = (u64) txr->tx_desc_mapping & 0xffffffff;
4839 bnx2_ctx_wr(bp, cid_addr, offset3, val);
4843 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
4847 struct bnx2_napi *bnapi;
4848 struct bnx2_tx_ring_info *txr;
4850 bnapi = &bp->bnx2_napi[ring_num];
4851 txr = &bnapi->tx_ring;
4856 cid = TX_TSS_CID + ring_num - 1;
4858 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4860 txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
4862 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
4863 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
4866 txr->tx_prod_bseq = 0;
4868 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4869 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4871 bnx2_init_tx_context(bp, cid, txr);
4875 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4881 for (i = 0; i < num_rings; i++) {
4884 rxbd = &rx_ring[i][0];
4885 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4886 rxbd->rx_bd_len = buf_size;
4887 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4889 if (i == (num_rings - 1))
4893 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4894 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4899 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
4902 u16 prod, ring_prod;
4903 u32 cid, rx_cid_addr, val;
4904 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
4905 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4910 cid = RX_RSS_CID + ring_num - 1;
4912 rx_cid_addr = GET_CID_ADDR(cid);
4914 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
4915 bp->rx_buf_use_size, bp->rx_max_ring);
4917 bnx2_init_rx_context(bp, cid);
4919 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4920 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
4921 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
4924 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4925 if (bp->rx_pg_ring_size) {
4926 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
4927 rxr->rx_pg_desc_mapping,
4928 PAGE_SIZE, bp->rx_max_pg_ring);
4929 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4930 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4931 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4932 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
4934 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
4935 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4937 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
4938 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4940 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4941 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4944 val = (u64) rxr->rx_desc_mapping[0] >> 32;
4945 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4947 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
4948 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4950 ring_prod = prod = rxr->rx_pg_prod;
4951 for (i = 0; i < bp->rx_pg_ring_size; i++) {
4952 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
4954 prod = NEXT_RX_BD(prod);
4955 ring_prod = RX_PG_RING_IDX(prod);
4957 rxr->rx_pg_prod = prod;
4959 ring_prod = prod = rxr->rx_prod;
4960 for (i = 0; i < bp->rx_ring_size; i++) {
4961 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
4963 prod = NEXT_RX_BD(prod);
4964 ring_prod = RX_RING_IDX(prod);
4966 rxr->rx_prod = prod;
4968 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
4969 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
4970 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
4972 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
4973 REG_WR16(bp, rxr->rx_bidx_addr, prod);
4975 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
4979 bnx2_init_all_rings(struct bnx2 *bp)
4984 bnx2_clear_ring_states(bp);
4986 REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
4987 for (i = 0; i < bp->num_tx_rings; i++)
4988 bnx2_init_tx_ring(bp, i);
4990 if (bp->num_tx_rings > 1)
4991 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
4994 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
4995 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
4997 for (i = 0; i < bp->num_rx_rings; i++)
4998 bnx2_init_rx_ring(bp, i);
5000 if (bp->num_rx_rings > 1) {
5002 u8 *tbl = (u8 *) &tbl_32;
5004 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5005 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5007 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5008 tbl[i % 4] = i % (bp->num_rx_rings - 1);
5011 BNX2_RXP_SCRATCH_RSS_TBL + i,
5012 cpu_to_be32(tbl_32));
5015 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5016 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5018 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5023 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5025 u32 max, num_rings = 1;
5027 while (ring_size > MAX_RX_DESC_CNT) {
5028 ring_size -= MAX_RX_DESC_CNT;
5031 /* round to next power of 2 */
5033 while ((max & num_rings) == 0)
5036 if (num_rings != max)
5043 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5045 u32 rx_size, rx_space, jumbo_size;
5047 /* 8 for CRC and VLAN */
5048 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5050 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5051 sizeof(struct skb_shared_info);
5053 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5054 bp->rx_pg_ring_size = 0;
5055 bp->rx_max_pg_ring = 0;
5056 bp->rx_max_pg_ring_idx = 0;
5057 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5058 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5060 jumbo_size = size * pages;
5061 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5062 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5064 bp->rx_pg_ring_size = jumbo_size;
5065 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5067 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5068 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5069 bp->rx_copy_thresh = 0;
5072 bp->rx_buf_use_size = rx_size;
5074 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5075 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5076 bp->rx_ring_size = size;
5077 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5078 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5082 bnx2_free_tx_skbs(struct bnx2 *bp)
5086 for (i = 0; i < bp->num_tx_rings; i++) {
5087 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5088 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5091 if (txr->tx_buf_ring == NULL)
5094 for (j = 0; j < TX_DESC_CNT; ) {
5095 struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5096 struct sk_buff *skb = tx_buf->skb;
5103 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
5107 j += skb_shinfo(skb)->nr_frags + 1;
5114 bnx2_free_rx_skbs(struct bnx2 *bp)
5118 for (i = 0; i < bp->num_rx_rings; i++) {
5119 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5120 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5123 if (rxr->rx_buf_ring == NULL)
5126 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5127 struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5128 struct sk_buff *skb = rx_buf->skb;
5133 pci_unmap_single(bp->pdev,
5134 pci_unmap_addr(rx_buf, mapping),
5135 bp->rx_buf_use_size,
5136 PCI_DMA_FROMDEVICE);
5142 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5143 bnx2_free_rx_page(bp, rxr, j);
5148 bnx2_free_skbs(struct bnx2 *bp)
5150 bnx2_free_tx_skbs(bp);
5151 bnx2_free_rx_skbs(bp);
5155 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5159 rc = bnx2_reset_chip(bp, reset_code);
5164 if ((rc = bnx2_init_chip(bp)) != 0)
5167 bnx2_init_all_rings(bp);
5172 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5176 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5179 spin_lock_bh(&bp->phy_lock);
5180 bnx2_init_phy(bp, reset_phy);
5182 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5183 bnx2_remote_phy_event(bp);
5184 spin_unlock_bh(&bp->phy_lock);
5189 bnx2_shutdown_chip(struct bnx2 *bp)
5193 if (bp->flags & BNX2_FLAG_NO_WOL)
5194 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5196 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5198 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5200 return bnx2_reset_chip(bp, reset_code);
5204 bnx2_test_registers(struct bnx2 *bp)
5208 static const struct {
5211 #define BNX2_FL_NOT_5709 1
5215 { 0x006c, 0, 0x00000000, 0x0000003f },
5216 { 0x0090, 0, 0xffffffff, 0x00000000 },
5217 { 0x0094, 0, 0x00000000, 0x00000000 },
5219 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5220 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5221 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5222 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5223 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5224 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5225 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5226 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5227 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5229 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5230 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5231 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5232 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5233 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5234 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5236 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5237 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5238 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
5240 { 0x1000, 0, 0x00000000, 0x00000001 },
5241 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5243 { 0x1408, 0, 0x01c00800, 0x00000000 },
5244 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5245 { 0x14a8, 0, 0x00000000, 0x000001ff },
5246 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5247 { 0x14b0, 0, 0x00000002, 0x00000001 },
5248 { 0x14b8, 0, 0x00000000, 0x00000000 },
5249 { 0x14c0, 0, 0x00000000, 0x00000009 },
5250 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5251 { 0x14cc, 0, 0x00000000, 0x00000001 },
5252 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5254 { 0x1800, 0, 0x00000000, 0x00000001 },
5255 { 0x1804, 0, 0x00000000, 0x00000003 },
5257 { 0x2800, 0, 0x00000000, 0x00000001 },
5258 { 0x2804, 0, 0x00000000, 0x00003f01 },
5259 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5260 { 0x2810, 0, 0xffff0000, 0x00000000 },
5261 { 0x2814, 0, 0xffff0000, 0x00000000 },
5262 { 0x2818, 0, 0xffff0000, 0x00000000 },
5263 { 0x281c, 0, 0xffff0000, 0x00000000 },
5264 { 0x2834, 0, 0xffffffff, 0x00000000 },
5265 { 0x2840, 0, 0x00000000, 0xffffffff },
5266 { 0x2844, 0, 0x00000000, 0xffffffff },
5267 { 0x2848, 0, 0xffffffff, 0x00000000 },
5268 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5270 { 0x2c00, 0, 0x00000000, 0x00000011 },
5271 { 0x2c04, 0, 0x00000000, 0x00030007 },
5273 { 0x3c00, 0, 0x00000000, 0x00000001 },
5274 { 0x3c04, 0, 0x00000000, 0x00070000 },
5275 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5276 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5277 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5278 { 0x3c14, 0, 0x00000000, 0xffffffff },
5279 { 0x3c18, 0, 0x00000000, 0xffffffff },
5280 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5281 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5283 { 0x5004, 0, 0x00000000, 0x0000007f },
5284 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5286 { 0x5c00, 0, 0x00000000, 0x00000001 },
5287 { 0x5c04, 0, 0x00000000, 0x0003000f },
5288 { 0x5c08, 0, 0x00000003, 0x00000000 },
5289 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5290 { 0x5c10, 0, 0x00000000, 0xffffffff },
5291 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5292 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5293 { 0x5c88, 0, 0x00000000, 0x00077373 },
5294 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5296 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5297 { 0x680c, 0, 0xffffffff, 0x00000000 },
5298 { 0x6810, 0, 0xffffffff, 0x00000000 },
5299 { 0x6814, 0, 0xffffffff, 0x00000000 },
5300 { 0x6818, 0, 0xffffffff, 0x00000000 },
5301 { 0x681c, 0, 0xffffffff, 0x00000000 },
5302 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5303 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5304 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5305 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5306 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5307 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5308 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5309 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5310 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5311 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5312 { 0x684c, 0, 0xffffffff, 0x00000000 },
5313 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5314 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5315 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5316 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5317 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5318 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5320 { 0xffff, 0, 0x00000000, 0x00000000 },
5325 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5328 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5329 u32 offset, rw_mask, ro_mask, save_val, val;
5330 u16 flags = reg_tbl[i].flags;
5332 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5335 offset = (u32) reg_tbl[i].offset;
5336 rw_mask = reg_tbl[i].rw_mask;
5337 ro_mask = reg_tbl[i].ro_mask;
5339 save_val = readl(bp->regview + offset);
5341 writel(0, bp->regview + offset);
5343 val = readl(bp->regview + offset);
5344 if ((val & rw_mask) != 0) {
5348 if ((val & ro_mask) != (save_val & ro_mask)) {
5352 writel(0xffffffff, bp->regview + offset);
5354 val = readl(bp->regview + offset);
5355 if ((val & rw_mask) != rw_mask) {
5359 if ((val & ro_mask) != (save_val & ro_mask)) {
5363 writel(save_val, bp->regview + offset);
5367 writel(save_val, bp->regview + offset);
5375 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5377 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5378 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5381 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5384 for (offset = 0; offset < size; offset += 4) {
5386 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5388 if (bnx2_reg_rd_ind(bp, start + offset) !=
5398 bnx2_test_memory(struct bnx2 *bp)
5402 static struct mem_entry {
5405 } mem_tbl_5706[] = {
5406 { 0x60000, 0x4000 },
5407 { 0xa0000, 0x3000 },
5408 { 0xe0000, 0x4000 },
5409 { 0x120000, 0x4000 },
5410 { 0x1a0000, 0x4000 },
5411 { 0x160000, 0x4000 },
5415 { 0x60000, 0x4000 },
5416 { 0xa0000, 0x3000 },
5417 { 0xe0000, 0x4000 },
5418 { 0x120000, 0x4000 },
5419 { 0x1a0000, 0x4000 },
5422 struct mem_entry *mem_tbl;
5424 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5425 mem_tbl = mem_tbl_5709;
5427 mem_tbl = mem_tbl_5706;
5429 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5430 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5431 mem_tbl[i].len)) != 0) {
5439 #define BNX2_MAC_LOOPBACK 0
5440 #define BNX2_PHY_LOOPBACK 1
5443 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5445 unsigned int pkt_size, num_pkts, i;
5446 struct sk_buff *skb, *rx_skb;
5447 unsigned char *packet;
5448 u16 rx_start_idx, rx_idx;
5451 struct sw_bd *rx_buf;
5452 struct l2_fhdr *rx_hdr;
5454 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5455 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5456 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5460 txr = &tx_napi->tx_ring;
5461 rxr = &bnapi->rx_ring;
5462 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5463 bp->loopback = MAC_LOOPBACK;
5464 bnx2_set_mac_loopback(bp);
5466 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5467 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5470 bp->loopback = PHY_LOOPBACK;
5471 bnx2_set_phy_loopback(bp);
5476 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5477 skb = netdev_alloc_skb(bp->dev, pkt_size);
5480 packet = skb_put(skb, pkt_size);
5481 memcpy(packet, bp->dev->dev_addr, 6);
5482 memset(packet + 6, 0x0, 8);
5483 for (i = 14; i < pkt_size; i++)
5484 packet[i] = (unsigned char) (i & 0xff);
5486 if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
5490 map = skb_shinfo(skb)->dma_head;
5492 REG_WR(bp, BNX2_HC_COMMAND,
5493 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5495 REG_RD(bp, BNX2_HC_COMMAND);
5498 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5502 txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5504 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5505 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5506 txbd->tx_bd_mss_nbytes = pkt_size;
5507 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5510 txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5511 txr->tx_prod_bseq += pkt_size;
5513 REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5514 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5518 REG_WR(bp, BNX2_HC_COMMAND,
5519 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5521 REG_RD(bp, BNX2_HC_COMMAND);
5525 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
5528 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5529 goto loopback_test_done;
5531 rx_idx = bnx2_get_hw_rx_cons(bnapi);
5532 if (rx_idx != rx_start_idx + num_pkts) {
5533 goto loopback_test_done;
5536 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5537 rx_skb = rx_buf->skb;
5539 rx_hdr = (struct l2_fhdr *) rx_skb->data;
5540 skb_reserve(rx_skb, BNX2_RX_OFFSET);
5542 pci_dma_sync_single_for_cpu(bp->pdev,
5543 pci_unmap_addr(rx_buf, mapping),
5544 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5546 if (rx_hdr->l2_fhdr_status &
5547 (L2_FHDR_ERRORS_BAD_CRC |
5548 L2_FHDR_ERRORS_PHY_DECODE |
5549 L2_FHDR_ERRORS_ALIGNMENT |
5550 L2_FHDR_ERRORS_TOO_SHORT |
5551 L2_FHDR_ERRORS_GIANT_FRAME)) {
5553 goto loopback_test_done;
5556 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5557 goto loopback_test_done;
5560 for (i = 14; i < pkt_size; i++) {
5561 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5562 goto loopback_test_done;
5573 #define BNX2_MAC_LOOPBACK_FAILED 1
5574 #define BNX2_PHY_LOOPBACK_FAILED 2
5575 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5576 BNX2_PHY_LOOPBACK_FAILED)
5579 bnx2_test_loopback(struct bnx2 *bp)
5583 if (!netif_running(bp->dev))
5584 return BNX2_LOOPBACK_FAILED;
5586 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5587 spin_lock_bh(&bp->phy_lock);
5588 bnx2_init_phy(bp, 1);
5589 spin_unlock_bh(&bp->phy_lock);
5590 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5591 rc |= BNX2_MAC_LOOPBACK_FAILED;
5592 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5593 rc |= BNX2_PHY_LOOPBACK_FAILED;
5597 #define NVRAM_SIZE 0x200
5598 #define CRC32_RESIDUAL 0xdebb20e3
5601 bnx2_test_nvram(struct bnx2 *bp)
5603 __be32 buf[NVRAM_SIZE / 4];
5604 u8 *data = (u8 *) buf;
5608 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5609 goto test_nvram_done;
5611 magic = be32_to_cpu(buf[0]);
5612 if (magic != 0x669955aa) {
5614 goto test_nvram_done;
5617 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5618 goto test_nvram_done;
5620 csum = ether_crc_le(0x100, data);
5621 if (csum != CRC32_RESIDUAL) {
5623 goto test_nvram_done;
5626 csum = ether_crc_le(0x100, data + 0x100);
5627 if (csum != CRC32_RESIDUAL) {
5636 bnx2_test_link(struct bnx2 *bp)
5640 if (!netif_running(bp->dev))
5643 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5648 spin_lock_bh(&bp->phy_lock);
5649 bnx2_enable_bmsr1(bp);
5650 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5651 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5652 bnx2_disable_bmsr1(bp);
5653 spin_unlock_bh(&bp->phy_lock);
5655 if (bmsr & BMSR_LSTATUS) {
5662 bnx2_test_intr(struct bnx2 *bp)
5667 if (!netif_running(bp->dev))
5670 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5672 /* This register is not touched during run-time. */
5673 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5674 REG_RD(bp, BNX2_HC_COMMAND);
5676 for (i = 0; i < 10; i++) {
5677 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5683 msleep_interruptible(10);
5691 /* Determining link for parallel detection. */
5693 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5695 u32 mode_ctl, an_dbg, exp;
5697 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5700 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5701 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5703 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5706 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5707 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5708 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5710 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5713 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5714 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5715 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5717 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
5724 bnx2_5706_serdes_timer(struct bnx2 *bp)
5728 spin_lock(&bp->phy_lock);
5729 if (bp->serdes_an_pending) {
5730 bp->serdes_an_pending--;
5732 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5735 bp->current_interval = BNX2_TIMER_INTERVAL;
5737 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5739 if (bmcr & BMCR_ANENABLE) {
5740 if (bnx2_5706_serdes_has_link(bp)) {
5741 bmcr &= ~BMCR_ANENABLE;
5742 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5743 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5744 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5748 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5749 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5752 bnx2_write_phy(bp, 0x17, 0x0f01);
5753 bnx2_read_phy(bp, 0x15, &phy2);
5757 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5758 bmcr |= BMCR_ANENABLE;
5759 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5761 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5764 bp->current_interval = BNX2_TIMER_INTERVAL;
5769 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5770 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5771 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5773 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5774 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5775 bnx2_5706s_force_link_dn(bp, 1);
5776 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5779 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5782 spin_unlock(&bp->phy_lock);
5786 bnx2_5708_serdes_timer(struct bnx2 *bp)
5788 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5791 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5792 bp->serdes_an_pending = 0;
5796 spin_lock(&bp->phy_lock);
5797 if (bp->serdes_an_pending)
5798 bp->serdes_an_pending--;
5799 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5802 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5803 if (bmcr & BMCR_ANENABLE) {
5804 bnx2_enable_forced_2g5(bp);
5805 bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
5807 bnx2_disable_forced_2g5(bp);
5808 bp->serdes_an_pending = 2;
5809 bp->current_interval = BNX2_TIMER_INTERVAL;
5813 bp->current_interval = BNX2_TIMER_INTERVAL;
5815 spin_unlock(&bp->phy_lock);
5819 bnx2_timer(unsigned long data)
5821 struct bnx2 *bp = (struct bnx2 *) data;
5823 if (!netif_running(bp->dev))
5826 if (atomic_read(&bp->intr_sem) != 0)
5827 goto bnx2_restart_timer;
5829 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
5830 BNX2_FLAG_USING_MSI)
5831 bnx2_chk_missed_msi(bp);
5833 bnx2_send_heart_beat(bp);
5835 bp->stats_blk->stat_FwRxDrop =
5836 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
5838 /* workaround occasional corrupted counters */
5839 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5840 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5841 BNX2_HC_COMMAND_STATS_NOW);
5843 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5844 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5845 bnx2_5706_serdes_timer(bp);
5847 bnx2_5708_serdes_timer(bp);
5851 mod_timer(&bp->timer, jiffies + bp->current_interval);
5855 bnx2_request_irq(struct bnx2 *bp)
5857 unsigned long flags;
5858 struct bnx2_irq *irq;
5861 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
5864 flags = IRQF_SHARED;
5866 for (i = 0; i < bp->irq_nvecs; i++) {
5867 irq = &bp->irq_tbl[i];
5868 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5878 bnx2_free_irq(struct bnx2 *bp)
5880 struct bnx2_irq *irq;
5883 for (i = 0; i < bp->irq_nvecs; i++) {
5884 irq = &bp->irq_tbl[i];
5886 free_irq(irq->vector, &bp->bnx2_napi[i]);
5889 if (bp->flags & BNX2_FLAG_USING_MSI)
5890 pci_disable_msi(bp->pdev);
5891 else if (bp->flags & BNX2_FLAG_USING_MSIX)
5892 pci_disable_msix(bp->pdev);
5894 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
5898 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
5901 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5902 struct net_device *dev = bp->dev;
5903 const int len = sizeof(bp->irq_tbl[0].name);
5905 bnx2_setup_msix_tbl(bp);
5906 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5907 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5908 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5910 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5911 msix_ent[i].entry = i;
5912 msix_ent[i].vector = 0;
5915 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5919 bp->irq_nvecs = msix_vecs;
5920 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
5921 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5922 bp->irq_tbl[i].vector = msix_ent[i].vector;
5923 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
5924 bp->irq_tbl[i].handler = bnx2_msi_1shot;
5929 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5931 int cpus = num_online_cpus();
5932 int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
5934 bp->irq_tbl[0].handler = bnx2_interrupt;
5935 strcpy(bp->irq_tbl[0].name, bp->dev->name);
5937 bp->irq_tbl[0].vector = bp->pdev->irq;
5939 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
5940 bnx2_enable_msix(bp, msix_vecs);
5942 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5943 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
5944 if (pci_enable_msi(bp->pdev) == 0) {
5945 bp->flags |= BNX2_FLAG_USING_MSI;
5946 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5947 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
5948 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5950 bp->irq_tbl[0].handler = bnx2_msi;
5952 bp->irq_tbl[0].vector = bp->pdev->irq;
5956 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
5957 bp->dev->real_num_tx_queues = bp->num_tx_rings;
5959 bp->num_rx_rings = bp->irq_nvecs;
5962 /* Called with rtnl_lock */
5964 bnx2_open(struct net_device *dev)
5966 struct bnx2 *bp = netdev_priv(dev);
5969 netif_carrier_off(dev);
5971 bnx2_set_power_state(bp, PCI_D0);
5972 bnx2_disable_int(bp);
5974 bnx2_setup_int_mode(bp, disable_msi);
5975 bnx2_napi_enable(bp);
5976 rc = bnx2_alloc_mem(bp);
5980 rc = bnx2_request_irq(bp);
5984 rc = bnx2_init_nic(bp, 1);
5988 mod_timer(&bp->timer, jiffies + bp->current_interval);
5990 atomic_set(&bp->intr_sem, 0);
5992 bnx2_enable_int(bp);
5994 if (bp->flags & BNX2_FLAG_USING_MSI) {
5995 /* Test MSI to make sure it is working
5996 * If MSI test fails, go back to INTx mode
5998 if (bnx2_test_intr(bp) != 0) {
5999 printk(KERN_WARNING PFX "%s: No interrupt was generated"
6000 " using MSI, switching to INTx mode. Please"
6001 " report this failure to the PCI maintainer"
6002 " and include system chipset information.\n",
6005 bnx2_disable_int(bp);
6008 bnx2_setup_int_mode(bp, 1);
6010 rc = bnx2_init_nic(bp, 0);
6013 rc = bnx2_request_irq(bp);
6016 del_timer_sync(&bp->timer);
6019 bnx2_enable_int(bp);
6022 if (bp->flags & BNX2_FLAG_USING_MSI)
6023 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
6024 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6025 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
6027 netif_tx_start_all_queues(dev);
6032 bnx2_napi_disable(bp);
6040 bnx2_reset_task(struct work_struct *work)
6042 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6044 if (!netif_running(bp->dev))
6047 bnx2_netif_stop(bp);
6049 bnx2_init_nic(bp, 1);
6051 atomic_set(&bp->intr_sem, 1);
6052 bnx2_netif_start(bp);
6056 bnx2_tx_timeout(struct net_device *dev)
6058 struct bnx2 *bp = netdev_priv(dev);
6060 /* This allows the netif to be shutdown gracefully before resetting */
6061 schedule_work(&bp->reset_task);
6065 /* Called with rtnl_lock */
6067 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6069 struct bnx2 *bp = netdev_priv(dev);
6071 bnx2_netif_stop(bp);
6074 bnx2_set_rx_mode(dev);
6075 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6076 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6078 bnx2_netif_start(bp);
6082 /* Called with netif_tx_lock.
6083 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6084 * netif_wake_queue().
6087 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6089 struct bnx2 *bp = netdev_priv(dev);
6092 struct sw_tx_bd *tx_buf;
6093 u32 len, vlan_tag_flags, last_frag, mss;
6094 u16 prod, ring_prod;
6096 struct bnx2_napi *bnapi;
6097 struct bnx2_tx_ring_info *txr;
6098 struct netdev_queue *txq;
6099 struct skb_shared_info *sp;
6101 /* Determine which tx ring we will be placed on */
6102 i = skb_get_queue_mapping(skb);
6103 bnapi = &bp->bnx2_napi[i];
6104 txr = &bnapi->tx_ring;
6105 txq = netdev_get_tx_queue(dev, i);
6107 if (unlikely(bnx2_tx_avail(bp, txr) <
6108 (skb_shinfo(skb)->nr_frags + 1))) {
6109 netif_tx_stop_queue(txq);
6110 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
6113 return NETDEV_TX_BUSY;
6115 len = skb_headlen(skb);
6116 prod = txr->tx_prod;
6117 ring_prod = TX_RING_IDX(prod);
6120 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6121 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6125 if (bp->vlgrp && vlan_tx_tag_present(skb)) {
6127 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6130 if ((mss = skb_shinfo(skb)->gso_size)) {
6134 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6136 tcp_opt_len = tcp_optlen(skb);
6138 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6139 u32 tcp_off = skb_transport_offset(skb) -
6140 sizeof(struct ipv6hdr) - ETH_HLEN;
6142 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6143 TX_BD_FLAGS_SW_FLAGS;
6144 if (likely(tcp_off == 0))
6145 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6148 vlan_tag_flags |= ((tcp_off & 0x3) <<
6149 TX_BD_FLAGS_TCP6_OFF0_SHL) |
6150 ((tcp_off & 0x10) <<
6151 TX_BD_FLAGS_TCP6_OFF4_SHL);
6152 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6156 if (tcp_opt_len || (iph->ihl > 5)) {
6157 vlan_tag_flags |= ((iph->ihl - 5) +
6158 (tcp_opt_len >> 2)) << 8;
6164 if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
6166 return NETDEV_TX_OK;
6169 sp = skb_shinfo(skb);
6170 mapping = sp->dma_head;
6172 tx_buf = &txr->tx_buf_ring[ring_prod];
6175 txbd = &txr->tx_desc_ring[ring_prod];
6177 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6178 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6179 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6180 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6182 last_frag = skb_shinfo(skb)->nr_frags;
6183 tx_buf->nr_frags = last_frag;
6184 tx_buf->is_gso = skb_is_gso(skb);
6186 for (i = 0; i < last_frag; i++) {
6187 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6189 prod = NEXT_TX_BD(prod);
6190 ring_prod = TX_RING_IDX(prod);
6191 txbd = &txr->tx_desc_ring[ring_prod];
6194 mapping = sp->dma_maps[i];
6196 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6197 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6198 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6199 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6202 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6204 prod = NEXT_TX_BD(prod);
6205 txr->tx_prod_bseq += skb->len;
6207 REG_WR16(bp, txr->tx_bidx_addr, prod);
6208 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6212 txr->tx_prod = prod;
6214 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6215 netif_tx_stop_queue(txq);
6216 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6217 netif_tx_wake_queue(txq);
6220 return NETDEV_TX_OK;
6223 /* Called with rtnl_lock */
6225 bnx2_close(struct net_device *dev)
6227 struct bnx2 *bp = netdev_priv(dev);
6229 cancel_work_sync(&bp->reset_task);
6231 bnx2_disable_int_sync(bp);
6232 bnx2_napi_disable(bp);
6233 del_timer_sync(&bp->timer);
6234 bnx2_shutdown_chip(bp);
6239 netif_carrier_off(bp->dev);
6240 bnx2_set_power_state(bp, PCI_D3hot);
6244 #define GET_NET_STATS64(ctr) \
6245 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
6246 (unsigned long) (ctr##_lo)
6248 #define GET_NET_STATS32(ctr) \
6251 #if (BITS_PER_LONG == 64)
6252 #define GET_NET_STATS GET_NET_STATS64
6254 #define GET_NET_STATS GET_NET_STATS32
6257 static struct net_device_stats *
6258 bnx2_get_stats(struct net_device *dev)
6260 struct bnx2 *bp = netdev_priv(dev);
6261 struct statistics_block *stats_blk = bp->stats_blk;
6262 struct net_device_stats *net_stats = &dev->stats;
6264 if (bp->stats_blk == NULL) {
6267 net_stats->rx_packets =
6268 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
6269 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
6270 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
6272 net_stats->tx_packets =
6273 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
6274 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
6275 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
6277 net_stats->rx_bytes =
6278 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
6280 net_stats->tx_bytes =
6281 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
6283 net_stats->multicast =
6284 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
6286 net_stats->collisions =
6287 (unsigned long) stats_blk->stat_EtherStatsCollisions;
6289 net_stats->rx_length_errors =
6290 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
6291 stats_blk->stat_EtherStatsOverrsizePkts);
6293 net_stats->rx_over_errors =
6294 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
6296 net_stats->rx_frame_errors =
6297 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
6299 net_stats->rx_crc_errors =
6300 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
6302 net_stats->rx_errors = net_stats->rx_length_errors +
6303 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6304 net_stats->rx_crc_errors;
6306 net_stats->tx_aborted_errors =
6307 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
6308 stats_blk->stat_Dot3StatsLateCollisions);
6310 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6311 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6312 net_stats->tx_carrier_errors = 0;
6314 net_stats->tx_carrier_errors =
6316 stats_blk->stat_Dot3StatsCarrierSenseErrors;
6319 net_stats->tx_errors =
6321 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6323 net_stats->tx_aborted_errors +
6324 net_stats->tx_carrier_errors;
6326 net_stats->rx_missed_errors =
6327 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
6328 stats_blk->stat_FwRxDrop);
6333 /* All ethtool functions called with rtnl_lock */
6336 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6338 struct bnx2 *bp = netdev_priv(dev);
6339 int support_serdes = 0, support_copper = 0;
6341 cmd->supported = SUPPORTED_Autoneg;
6342 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6345 } else if (bp->phy_port == PORT_FIBRE)
6350 if (support_serdes) {
6351 cmd->supported |= SUPPORTED_1000baseT_Full |
6353 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6354 cmd->supported |= SUPPORTED_2500baseX_Full;
6357 if (support_copper) {
6358 cmd->supported |= SUPPORTED_10baseT_Half |
6359 SUPPORTED_10baseT_Full |
6360 SUPPORTED_100baseT_Half |
6361 SUPPORTED_100baseT_Full |
6362 SUPPORTED_1000baseT_Full |
6367 spin_lock_bh(&bp->phy_lock);
6368 cmd->port = bp->phy_port;
6369 cmd->advertising = bp->advertising;
6371 if (bp->autoneg & AUTONEG_SPEED) {
6372 cmd->autoneg = AUTONEG_ENABLE;
6375 cmd->autoneg = AUTONEG_DISABLE;
6378 if (netif_carrier_ok(dev)) {
6379 cmd->speed = bp->line_speed;
6380 cmd->duplex = bp->duplex;
6386 spin_unlock_bh(&bp->phy_lock);
6388 cmd->transceiver = XCVR_INTERNAL;
6389 cmd->phy_address = bp->phy_addr;
6395 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6397 struct bnx2 *bp = netdev_priv(dev);
6398 u8 autoneg = bp->autoneg;
6399 u8 req_duplex = bp->req_duplex;
6400 u16 req_line_speed = bp->req_line_speed;
6401 u32 advertising = bp->advertising;
6404 spin_lock_bh(&bp->phy_lock);
6406 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6407 goto err_out_unlock;
6409 if (cmd->port != bp->phy_port &&
6410 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6411 goto err_out_unlock;
6413 /* If device is down, we can store the settings only if the user
6414 * is setting the currently active port.
6416 if (!netif_running(dev) && cmd->port != bp->phy_port)
6417 goto err_out_unlock;
6419 if (cmd->autoneg == AUTONEG_ENABLE) {
6420 autoneg |= AUTONEG_SPEED;
6422 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6424 /* allow advertising 1 speed */
6425 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6426 (cmd->advertising == ADVERTISED_10baseT_Full) ||
6427 (cmd->advertising == ADVERTISED_100baseT_Half) ||
6428 (cmd->advertising == ADVERTISED_100baseT_Full)) {
6430 if (cmd->port == PORT_FIBRE)
6431 goto err_out_unlock;
6433 advertising = cmd->advertising;
6435 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6436 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6437 (cmd->port == PORT_TP))
6438 goto err_out_unlock;
6439 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6440 advertising = cmd->advertising;
6441 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6442 goto err_out_unlock;
6444 if (cmd->port == PORT_FIBRE)
6445 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6447 advertising = ETHTOOL_ALL_COPPER_SPEED;
6449 advertising |= ADVERTISED_Autoneg;
6452 if (cmd->port == PORT_FIBRE) {
6453 if ((cmd->speed != SPEED_1000 &&
6454 cmd->speed != SPEED_2500) ||
6455 (cmd->duplex != DUPLEX_FULL))
6456 goto err_out_unlock;
6458 if (cmd->speed == SPEED_2500 &&
6459 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6460 goto err_out_unlock;
6462 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6463 goto err_out_unlock;
6465 autoneg &= ~AUTONEG_SPEED;
6466 req_line_speed = cmd->speed;
6467 req_duplex = cmd->duplex;
6471 bp->autoneg = autoneg;
6472 bp->advertising = advertising;
6473 bp->req_line_speed = req_line_speed;
6474 bp->req_duplex = req_duplex;
6477 /* If device is down, the new settings will be picked up when it is
6480 if (netif_running(dev))
6481 err = bnx2_setup_phy(bp, cmd->port);
6484 spin_unlock_bh(&bp->phy_lock);
6490 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6492 struct bnx2 *bp = netdev_priv(dev);
6494 strcpy(info->driver, DRV_MODULE_NAME);
6495 strcpy(info->version, DRV_MODULE_VERSION);
6496 strcpy(info->bus_info, pci_name(bp->pdev));
6497 strcpy(info->fw_version, bp->fw_version);
6500 #define BNX2_REGDUMP_LEN (32 * 1024)
6503 bnx2_get_regs_len(struct net_device *dev)
6505 return BNX2_REGDUMP_LEN;
6509 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6511 u32 *p = _p, i, offset;
6513 struct bnx2 *bp = netdev_priv(dev);
6514 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6515 0x0800, 0x0880, 0x0c00, 0x0c10,
6516 0x0c30, 0x0d08, 0x1000, 0x101c,
6517 0x1040, 0x1048, 0x1080, 0x10a4,
6518 0x1400, 0x1490, 0x1498, 0x14f0,
6519 0x1500, 0x155c, 0x1580, 0x15dc,
6520 0x1600, 0x1658, 0x1680, 0x16d8,
6521 0x1800, 0x1820, 0x1840, 0x1854,
6522 0x1880, 0x1894, 0x1900, 0x1984,
6523 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6524 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6525 0x2000, 0x2030, 0x23c0, 0x2400,
6526 0x2800, 0x2820, 0x2830, 0x2850,
6527 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6528 0x3c00, 0x3c94, 0x4000, 0x4010,
6529 0x4080, 0x4090, 0x43c0, 0x4458,
6530 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6531 0x4fc0, 0x5010, 0x53c0, 0x5444,
6532 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6533 0x5fc0, 0x6000, 0x6400, 0x6428,
6534 0x6800, 0x6848, 0x684c, 0x6860,
6535 0x6888, 0x6910, 0x8000 };
6539 memset(p, 0, BNX2_REGDUMP_LEN);
6541 if (!netif_running(bp->dev))
6545 offset = reg_boundaries[0];
6547 while (offset < BNX2_REGDUMP_LEN) {
6548 *p++ = REG_RD(bp, offset);
6550 if (offset == reg_boundaries[i + 1]) {
6551 offset = reg_boundaries[i + 2];
6552 p = (u32 *) (orig_p + offset);
6559 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6561 struct bnx2 *bp = netdev_priv(dev);
6563 if (bp->flags & BNX2_FLAG_NO_WOL) {
6568 wol->supported = WAKE_MAGIC;
6570 wol->wolopts = WAKE_MAGIC;
6574 memset(&wol->sopass, 0, sizeof(wol->sopass));
6578 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6580 struct bnx2 *bp = netdev_priv(dev);
6582 if (wol->wolopts & ~WAKE_MAGIC)
6585 if (wol->wolopts & WAKE_MAGIC) {
6586 if (bp->flags & BNX2_FLAG_NO_WOL)
6598 bnx2_nway_reset(struct net_device *dev)
6600 struct bnx2 *bp = netdev_priv(dev);
6603 if (!netif_running(dev))
6606 if (!(bp->autoneg & AUTONEG_SPEED)) {
6610 spin_lock_bh(&bp->phy_lock);
6612 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6615 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6616 spin_unlock_bh(&bp->phy_lock);
6620 /* Force a link down visible on the other side */
6621 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6622 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6623 spin_unlock_bh(&bp->phy_lock);
6627 spin_lock_bh(&bp->phy_lock);
6629 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6630 bp->serdes_an_pending = 1;
6631 mod_timer(&bp->timer, jiffies + bp->current_interval);
6634 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6635 bmcr &= ~BMCR_LOOPBACK;
6636 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6638 spin_unlock_bh(&bp->phy_lock);
6644 bnx2_get_eeprom_len(struct net_device *dev)
6646 struct bnx2 *bp = netdev_priv(dev);
6648 if (bp->flash_info == NULL)
6651 return (int) bp->flash_size;
6655 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6658 struct bnx2 *bp = netdev_priv(dev);
6661 if (!netif_running(dev))
6664 /* parameters already validated in ethtool_get_eeprom */
6666 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6672 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6675 struct bnx2 *bp = netdev_priv(dev);
6678 if (!netif_running(dev))
6681 /* parameters already validated in ethtool_set_eeprom */
6683 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6689 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6691 struct bnx2 *bp = netdev_priv(dev);
6693 memset(coal, 0, sizeof(struct ethtool_coalesce));
6695 coal->rx_coalesce_usecs = bp->rx_ticks;
6696 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6697 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6698 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6700 coal->tx_coalesce_usecs = bp->tx_ticks;
6701 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6702 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6703 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6705 coal->stats_block_coalesce_usecs = bp->stats_ticks;
6711 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6713 struct bnx2 *bp = netdev_priv(dev);
6715 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6716 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6718 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6719 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6721 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6722 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6724 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6725 if (bp->rx_quick_cons_trip_int > 0xff)
6726 bp->rx_quick_cons_trip_int = 0xff;
6728 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6729 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6731 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6732 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6734 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6735 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6737 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6738 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6741 bp->stats_ticks = coal->stats_block_coalesce_usecs;
6742 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6743 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6744 bp->stats_ticks = USEC_PER_SEC;
6746 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6747 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6748 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6750 if (netif_running(bp->dev)) {
6751 bnx2_netif_stop(bp);
6752 bnx2_init_nic(bp, 0);
6753 bnx2_netif_start(bp);
6760 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6762 struct bnx2 *bp = netdev_priv(dev);
6764 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6765 ering->rx_mini_max_pending = 0;
6766 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6768 ering->rx_pending = bp->rx_ring_size;
6769 ering->rx_mini_pending = 0;
6770 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6772 ering->tx_max_pending = MAX_TX_DESC_CNT;
6773 ering->tx_pending = bp->tx_ring_size;
6777 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6779 if (netif_running(bp->dev)) {
6780 bnx2_netif_stop(bp);
6781 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6786 bnx2_set_rx_ring_size(bp, rx);
6787 bp->tx_ring_size = tx;
6789 if (netif_running(bp->dev)) {
6792 rc = bnx2_alloc_mem(bp);
6795 bnx2_init_nic(bp, 0);
6796 bnx2_netif_start(bp);
6802 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6804 struct bnx2 *bp = netdev_priv(dev);
6807 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6808 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6809 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6813 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6818 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6820 struct bnx2 *bp = netdev_priv(dev);
6822 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6823 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6824 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6828 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6830 struct bnx2 *bp = netdev_priv(dev);
6832 bp->req_flow_ctrl = 0;
6833 if (epause->rx_pause)
6834 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6835 if (epause->tx_pause)
6836 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6838 if (epause->autoneg) {
6839 bp->autoneg |= AUTONEG_FLOW_CTRL;
6842 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6845 if (netif_running(dev)) {
6846 spin_lock_bh(&bp->phy_lock);
6847 bnx2_setup_phy(bp, bp->phy_port);
6848 spin_unlock_bh(&bp->phy_lock);
6855 bnx2_get_rx_csum(struct net_device *dev)
6857 struct bnx2 *bp = netdev_priv(dev);
6863 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6865 struct bnx2 *bp = netdev_priv(dev);
6872 bnx2_set_tso(struct net_device *dev, u32 data)
6874 struct bnx2 *bp = netdev_priv(dev);
6877 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6878 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6879 dev->features |= NETIF_F_TSO6;
6881 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6886 #define BNX2_NUM_STATS 46
6889 char string[ETH_GSTRING_LEN];
6890 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6892 { "rx_error_bytes" },
6894 { "tx_error_bytes" },
6895 { "rx_ucast_packets" },
6896 { "rx_mcast_packets" },
6897 { "rx_bcast_packets" },
6898 { "tx_ucast_packets" },
6899 { "tx_mcast_packets" },
6900 { "tx_bcast_packets" },
6901 { "tx_mac_errors" },
6902 { "tx_carrier_errors" },
6903 { "rx_crc_errors" },
6904 { "rx_align_errors" },
6905 { "tx_single_collisions" },
6906 { "tx_multi_collisions" },
6908 { "tx_excess_collisions" },
6909 { "tx_late_collisions" },
6910 { "tx_total_collisions" },
6913 { "rx_undersize_packets" },
6914 { "rx_oversize_packets" },
6915 { "rx_64_byte_packets" },
6916 { "rx_65_to_127_byte_packets" },
6917 { "rx_128_to_255_byte_packets" },
6918 { "rx_256_to_511_byte_packets" },
6919 { "rx_512_to_1023_byte_packets" },
6920 { "rx_1024_to_1522_byte_packets" },
6921 { "rx_1523_to_9022_byte_packets" },
6922 { "tx_64_byte_packets" },
6923 { "tx_65_to_127_byte_packets" },
6924 { "tx_128_to_255_byte_packets" },
6925 { "tx_256_to_511_byte_packets" },
6926 { "tx_512_to_1023_byte_packets" },
6927 { "tx_1024_to_1522_byte_packets" },
6928 { "tx_1523_to_9022_byte_packets" },
6929 { "rx_xon_frames" },
6930 { "rx_xoff_frames" },
6931 { "tx_xon_frames" },
6932 { "tx_xoff_frames" },
6933 { "rx_mac_ctrl_frames" },
6934 { "rx_filtered_packets" },
6936 { "rx_fw_discards" },
6939 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6941 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6942 STATS_OFFSET32(stat_IfHCInOctets_hi),
6943 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6944 STATS_OFFSET32(stat_IfHCOutOctets_hi),
6945 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6946 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6947 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6948 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6949 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6950 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6951 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6952 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6953 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6954 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6955 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6956 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6957 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6958 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6959 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6960 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6961 STATS_OFFSET32(stat_EtherStatsCollisions),
6962 STATS_OFFSET32(stat_EtherStatsFragments),
6963 STATS_OFFSET32(stat_EtherStatsJabbers),
6964 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6965 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6966 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6967 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6968 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6969 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6970 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6971 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6972 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6973 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6974 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6975 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6976 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6977 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6978 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6979 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6980 STATS_OFFSET32(stat_XonPauseFramesReceived),
6981 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6982 STATS_OFFSET32(stat_OutXonSent),
6983 STATS_OFFSET32(stat_OutXoffSent),
6984 STATS_OFFSET32(stat_MacControlFramesReceived),
6985 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6986 STATS_OFFSET32(stat_IfInMBUFDiscards),
6987 STATS_OFFSET32(stat_FwRxDrop),
6990 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6991 * skipped because of errata.
6993 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6994 8,0,8,8,8,8,8,8,8,8,
6995 4,0,4,4,4,4,4,4,4,4,
6996 4,4,4,4,4,4,4,4,4,4,
6997 4,4,4,4,4,4,4,4,4,4,
7001 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7002 8,0,8,8,8,8,8,8,8,8,
7003 4,4,4,4,4,4,4,4,4,4,
7004 4,4,4,4,4,4,4,4,4,4,
7005 4,4,4,4,4,4,4,4,4,4,
7009 #define BNX2_NUM_TESTS 6
7012 char string[ETH_GSTRING_LEN];
7013 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7014 { "register_test (offline)" },
7015 { "memory_test (offline)" },
7016 { "loopback_test (offline)" },
7017 { "nvram_test (online)" },
7018 { "interrupt_test (online)" },
7019 { "link_test (online)" },
7023 bnx2_get_sset_count(struct net_device *dev, int sset)
7027 return BNX2_NUM_TESTS;
7029 return BNX2_NUM_STATS;
7036 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7038 struct bnx2 *bp = netdev_priv(dev);
7040 bnx2_set_power_state(bp, PCI_D0);
7042 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7043 if (etest->flags & ETH_TEST_FL_OFFLINE) {
7046 bnx2_netif_stop(bp);
7047 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7050 if (bnx2_test_registers(bp) != 0) {
7052 etest->flags |= ETH_TEST_FL_FAILED;
7054 if (bnx2_test_memory(bp) != 0) {
7056 etest->flags |= ETH_TEST_FL_FAILED;
7058 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7059 etest->flags |= ETH_TEST_FL_FAILED;
7061 if (!netif_running(bp->dev))
7062 bnx2_shutdown_chip(bp);
7064 bnx2_init_nic(bp, 1);
7065 bnx2_netif_start(bp);
7068 /* wait for link up */
7069 for (i = 0; i < 7; i++) {
7072 msleep_interruptible(1000);
7076 if (bnx2_test_nvram(bp) != 0) {
7078 etest->flags |= ETH_TEST_FL_FAILED;
7080 if (bnx2_test_intr(bp) != 0) {
7082 etest->flags |= ETH_TEST_FL_FAILED;
7085 if (bnx2_test_link(bp) != 0) {
7087 etest->flags |= ETH_TEST_FL_FAILED;
7090 if (!netif_running(bp->dev))
7091 bnx2_set_power_state(bp, PCI_D3hot);
7095 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7097 switch (stringset) {
7099 memcpy(buf, bnx2_stats_str_arr,
7100 sizeof(bnx2_stats_str_arr));
7103 memcpy(buf, bnx2_tests_str_arr,
7104 sizeof(bnx2_tests_str_arr));
7110 bnx2_get_ethtool_stats(struct net_device *dev,
7111 struct ethtool_stats *stats, u64 *buf)
7113 struct bnx2 *bp = netdev_priv(dev);
7115 u32 *hw_stats = (u32 *) bp->stats_blk;
7116 u8 *stats_len_arr = NULL;
7118 if (hw_stats == NULL) {
7119 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7123 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7124 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7125 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7126 (CHIP_ID(bp) == CHIP_ID_5708_A0))
7127 stats_len_arr = bnx2_5706_stats_len_arr;
7129 stats_len_arr = bnx2_5708_stats_len_arr;
7131 for (i = 0; i < BNX2_NUM_STATS; i++) {
7132 if (stats_len_arr[i] == 0) {
7133 /* skip this counter */
7137 if (stats_len_arr[i] == 4) {
7138 /* 4-byte counter */
7140 *(hw_stats + bnx2_stats_offset_arr[i]);
7143 /* 8-byte counter */
7144 buf[i] = (((u64) *(hw_stats +
7145 bnx2_stats_offset_arr[i])) << 32) +
7146 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
7151 bnx2_phys_id(struct net_device *dev, u32 data)
7153 struct bnx2 *bp = netdev_priv(dev);
7157 bnx2_set_power_state(bp, PCI_D0);
7162 save = REG_RD(bp, BNX2_MISC_CFG);
7163 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7165 for (i = 0; i < (data * 2); i++) {
7167 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7170 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7171 BNX2_EMAC_LED_1000MB_OVERRIDE |
7172 BNX2_EMAC_LED_100MB_OVERRIDE |
7173 BNX2_EMAC_LED_10MB_OVERRIDE |
7174 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7175 BNX2_EMAC_LED_TRAFFIC);
7177 msleep_interruptible(500);
7178 if (signal_pending(current))
7181 REG_WR(bp, BNX2_EMAC_LED, 0);
7182 REG_WR(bp, BNX2_MISC_CFG, save);
7184 if (!netif_running(dev))
7185 bnx2_set_power_state(bp, PCI_D3hot);
7191 bnx2_set_tx_csum(struct net_device *dev, u32 data)
7193 struct bnx2 *bp = netdev_priv(dev);
7195 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7196 return (ethtool_op_set_tx_ipv6_csum(dev, data));
7198 return (ethtool_op_set_tx_csum(dev, data));
7201 static const struct ethtool_ops bnx2_ethtool_ops = {
7202 .get_settings = bnx2_get_settings,
7203 .set_settings = bnx2_set_settings,
7204 .get_drvinfo = bnx2_get_drvinfo,
7205 .get_regs_len = bnx2_get_regs_len,
7206 .get_regs = bnx2_get_regs,
7207 .get_wol = bnx2_get_wol,
7208 .set_wol = bnx2_set_wol,
7209 .nway_reset = bnx2_nway_reset,
7210 .get_link = ethtool_op_get_link,
7211 .get_eeprom_len = bnx2_get_eeprom_len,
7212 .get_eeprom = bnx2_get_eeprom,
7213 .set_eeprom = bnx2_set_eeprom,
7214 .get_coalesce = bnx2_get_coalesce,
7215 .set_coalesce = bnx2_set_coalesce,
7216 .get_ringparam = bnx2_get_ringparam,
7217 .set_ringparam = bnx2_set_ringparam,
7218 .get_pauseparam = bnx2_get_pauseparam,
7219 .set_pauseparam = bnx2_set_pauseparam,
7220 .get_rx_csum = bnx2_get_rx_csum,
7221 .set_rx_csum = bnx2_set_rx_csum,
7222 .set_tx_csum = bnx2_set_tx_csum,
7223 .set_sg = ethtool_op_set_sg,
7224 .set_tso = bnx2_set_tso,
7225 .self_test = bnx2_self_test,
7226 .get_strings = bnx2_get_strings,
7227 .phys_id = bnx2_phys_id,
7228 .get_ethtool_stats = bnx2_get_ethtool_stats,
7229 .get_sset_count = bnx2_get_sset_count,
7232 /* Called with rtnl_lock */
7234 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7236 struct mii_ioctl_data *data = if_mii(ifr);
7237 struct bnx2 *bp = netdev_priv(dev);
7242 data->phy_id = bp->phy_addr;
7248 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7251 if (!netif_running(dev))
7254 spin_lock_bh(&bp->phy_lock);
7255 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7256 spin_unlock_bh(&bp->phy_lock);
7258 data->val_out = mii_regval;
7264 if (!capable(CAP_NET_ADMIN))
7267 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7270 if (!netif_running(dev))
7273 spin_lock_bh(&bp->phy_lock);
7274 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7275 spin_unlock_bh(&bp->phy_lock);
7286 /* Called with rtnl_lock */
7288 bnx2_change_mac_addr(struct net_device *dev, void *p)
7290 struct sockaddr *addr = p;
7291 struct bnx2 *bp = netdev_priv(dev);
7293 if (!is_valid_ether_addr(addr->sa_data))
7296 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7297 if (netif_running(dev))
7298 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7303 /* Called with rtnl_lock */
7305 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7307 struct bnx2 *bp = netdev_priv(dev);
7309 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7310 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7314 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7317 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7319 poll_bnx2(struct net_device *dev)
7321 struct bnx2 *bp = netdev_priv(dev);
7324 for (i = 0; i < bp->irq_nvecs; i++) {
7325 disable_irq(bp->irq_tbl[i].vector);
7326 bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]);
7327 enable_irq(bp->irq_tbl[i].vector);
7332 static void __devinit
7333 bnx2_get_5709_media(struct bnx2 *bp)
7335 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7336 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7339 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7341 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7342 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7346 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7347 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7349 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7351 if (PCI_FUNC(bp->pdev->devfn) == 0) {
7356 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7364 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7370 static void __devinit
7371 bnx2_get_pci_speed(struct bnx2 *bp)
7375 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7376 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7379 bp->flags |= BNX2_FLAG_PCIX;
7381 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7383 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7385 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7386 bp->bus_speed_mhz = 133;
7389 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7390 bp->bus_speed_mhz = 100;
7393 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7394 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7395 bp->bus_speed_mhz = 66;
7398 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7399 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7400 bp->bus_speed_mhz = 50;
7403 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7404 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7405 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7406 bp->bus_speed_mhz = 33;
7411 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7412 bp->bus_speed_mhz = 66;
7414 bp->bus_speed_mhz = 33;
7417 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7418 bp->flags |= BNX2_FLAG_PCI_32BIT;
7422 static int __devinit
7423 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7426 unsigned long mem_len;
7429 u64 dma_mask, persist_dma_mask;
7431 SET_NETDEV_DEV(dev, &pdev->dev);
7432 bp = netdev_priv(dev);
7437 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7438 rc = pci_enable_device(pdev);
7440 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7444 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7446 "Cannot find PCI device base address, aborting.\n");
7448 goto err_out_disable;
7451 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7453 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7454 goto err_out_disable;
7457 pci_set_master(pdev);
7458 pci_save_state(pdev);
7460 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7461 if (bp->pm_cap == 0) {
7463 "Cannot find power management capability, aborting.\n");
7465 goto err_out_release;
7471 spin_lock_init(&bp->phy_lock);
7472 spin_lock_init(&bp->indirect_lock);
7473 INIT_WORK(&bp->reset_task, bnx2_reset_task);
7475 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7476 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS);
7477 dev->mem_end = dev->mem_start + mem_len;
7478 dev->irq = pdev->irq;
7480 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7483 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7485 goto err_out_release;
7488 /* Configure byte swap and enable write to the reg_window registers.
7489 * Rely on CPU to do target byte swapping on big endian systems
7490 * The chip's target access swapping will not swap all accesses
7492 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7493 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7494 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7496 bnx2_set_power_state(bp, PCI_D0);
7498 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7500 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7501 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7503 "Cannot find PCIE capability, aborting.\n");
7507 bp->flags |= BNX2_FLAG_PCIE;
7508 if (CHIP_REV(bp) == CHIP_REV_Ax)
7509 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7511 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7512 if (bp->pcix_cap == 0) {
7514 "Cannot find PCIX capability, aborting.\n");
7520 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7521 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7522 bp->flags |= BNX2_FLAG_MSIX_CAP;
7525 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7526 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7527 bp->flags |= BNX2_FLAG_MSI_CAP;
7530 /* 5708 cannot support DMA addresses > 40-bit. */
7531 if (CHIP_NUM(bp) == CHIP_NUM_5708)
7532 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
7534 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
7536 /* Configure DMA attributes. */
7537 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7538 dev->features |= NETIF_F_HIGHDMA;
7539 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7542 "pci_set_consistent_dma_mask failed, aborting.\n");
7545 } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
7546 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7550 if (!(bp->flags & BNX2_FLAG_PCIE))
7551 bnx2_get_pci_speed(bp);
7553 /* 5706A0 may falsely detect SERR and PERR. */
7554 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7555 reg = REG_RD(bp, PCI_COMMAND);
7556 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7557 REG_WR(bp, PCI_COMMAND, reg);
7559 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7560 !(bp->flags & BNX2_FLAG_PCIX)) {
7563 "5706 A1 can only be used in a PCIX bus, aborting.\n");
7567 bnx2_init_nvram(bp);
7569 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7571 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7572 BNX2_SHM_HDR_SIGNATURE_SIG) {
7573 u32 off = PCI_FUNC(pdev->devfn) << 2;
7575 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7577 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7579 /* Get the permanent MAC address. First we need to make sure the
7580 * firmware is actually running.
7582 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7584 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7585 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7586 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7591 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7592 for (i = 0, j = 0; i < 3; i++) {
7595 num = (u8) (reg >> (24 - (i * 8)));
7596 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7597 if (num >= k || !skip0 || k == 1) {
7598 bp->fw_version[j++] = (num / k) + '0';
7603 bp->fw_version[j++] = '.';
7605 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
7606 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7609 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7610 bp->flags |= BNX2_FLAG_ASF_ENABLE;
7612 for (i = 0; i < 30; i++) {
7613 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7614 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7619 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7620 reg &= BNX2_CONDITION_MFW_RUN_MASK;
7621 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7622 reg != BNX2_CONDITION_MFW_RUN_NONE) {
7623 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7625 bp->fw_version[j++] = ' ';
7626 for (i = 0; i < 3; i++) {
7627 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
7629 memcpy(&bp->fw_version[j], ®, 4);
7634 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
7635 bp->mac_addr[0] = (u8) (reg >> 8);
7636 bp->mac_addr[1] = (u8) reg;
7638 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
7639 bp->mac_addr[2] = (u8) (reg >> 24);
7640 bp->mac_addr[3] = (u8) (reg >> 16);
7641 bp->mac_addr[4] = (u8) (reg >> 8);
7642 bp->mac_addr[5] = (u8) reg;
7644 bp->tx_ring_size = MAX_TX_DESC_CNT;
7645 bnx2_set_rx_ring_size(bp, 255);
7649 bp->tx_quick_cons_trip_int = 20;
7650 bp->tx_quick_cons_trip = 20;
7651 bp->tx_ticks_int = 80;
7654 bp->rx_quick_cons_trip_int = 6;
7655 bp->rx_quick_cons_trip = 6;
7656 bp->rx_ticks_int = 18;
7659 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7661 bp->current_interval = BNX2_TIMER_INTERVAL;
7665 /* Disable WOL support if we are running on a SERDES chip. */
7666 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7667 bnx2_get_5709_media(bp);
7668 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7669 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7671 bp->phy_port = PORT_TP;
7672 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7673 bp->phy_port = PORT_FIBRE;
7674 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
7675 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7676 bp->flags |= BNX2_FLAG_NO_WOL;
7679 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7680 /* Don't do parallel detect on this board because of
7681 * some board problems. The link will not go down
7682 * if we do parallel detect.
7684 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7685 pdev->subsystem_device == 0x310c)
7686 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7689 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7690 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7692 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7693 CHIP_NUM(bp) == CHIP_NUM_5708)
7694 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
7695 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7696 (CHIP_REV(bp) == CHIP_REV_Ax ||
7697 CHIP_REV(bp) == CHIP_REV_Bx))
7698 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7700 bnx2_init_fw_cap(bp);
7702 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7703 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7704 (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
7705 !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
7706 bp->flags |= BNX2_FLAG_NO_WOL;
7710 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7711 bp->tx_quick_cons_trip_int =
7712 bp->tx_quick_cons_trip;
7713 bp->tx_ticks_int = bp->tx_ticks;
7714 bp->rx_quick_cons_trip_int =
7715 bp->rx_quick_cons_trip;
7716 bp->rx_ticks_int = bp->rx_ticks;
7717 bp->comp_prod_trip_int = bp->comp_prod_trip;
7718 bp->com_ticks_int = bp->com_ticks;
7719 bp->cmd_ticks_int = bp->cmd_ticks;
7722 /* Disable MSI on 5706 if AMD 8132 bridge is found.
7724 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
7725 * with byte enables disabled on the unused 32-bit word. This is legal
7726 * but causes problems on the AMD 8132 which will eventually stop
7727 * responding after a while.
7729 * AMD believes this incompatibility is unique to the 5706, and
7730 * prefers to locally disable MSI rather than globally disabling it.
7732 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7733 struct pci_dev *amd_8132 = NULL;
7735 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7736 PCI_DEVICE_ID_AMD_8132_BRIDGE,
7739 if (amd_8132->revision >= 0x10 &&
7740 amd_8132->revision <= 0x13) {
7742 pci_dev_put(amd_8132);
7748 bnx2_set_default_link(bp);
7749 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7751 init_timer(&bp->timer);
7752 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
7753 bp->timer.data = (unsigned long) bp;
7754 bp->timer.function = bnx2_timer;
7760 iounmap(bp->regview);
7765 pci_release_regions(pdev);
7768 pci_disable_device(pdev);
7769 pci_set_drvdata(pdev, NULL);
7775 static char * __devinit
7776 bnx2_bus_string(struct bnx2 *bp, char *str)
7780 if (bp->flags & BNX2_FLAG_PCIE) {
7781 s += sprintf(s, "PCI Express");
7783 s += sprintf(s, "PCI");
7784 if (bp->flags & BNX2_FLAG_PCIX)
7785 s += sprintf(s, "-X");
7786 if (bp->flags & BNX2_FLAG_PCI_32BIT)
7787 s += sprintf(s, " 32-bit");
7789 s += sprintf(s, " 64-bit");
7790 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7795 static void __devinit
7796 bnx2_init_napi(struct bnx2 *bp)
7800 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7801 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
7802 int (*poll)(struct napi_struct *, int);
7807 poll = bnx2_poll_msix;
7809 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
7814 static const struct net_device_ops bnx2_netdev_ops = {
7815 .ndo_open = bnx2_open,
7816 .ndo_start_xmit = bnx2_start_xmit,
7817 .ndo_stop = bnx2_close,
7818 .ndo_get_stats = bnx2_get_stats,
7819 .ndo_set_rx_mode = bnx2_set_rx_mode,
7820 .ndo_do_ioctl = bnx2_ioctl,
7821 .ndo_validate_addr = eth_validate_addr,
7822 .ndo_set_mac_address = bnx2_change_mac_addr,
7823 .ndo_change_mtu = bnx2_change_mtu,
7824 .ndo_tx_timeout = bnx2_tx_timeout,
7826 .ndo_vlan_rx_register = bnx2_vlan_rx_register,
7828 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7829 .ndo_poll_controller = poll_bnx2,
7833 static int __devinit
7834 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7836 static int version_printed = 0;
7837 struct net_device *dev = NULL;
7842 if (version_printed++ == 0)
7843 printk(KERN_INFO "%s", version);
7845 /* dev zeroed in init_etherdev */
7846 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
7851 rc = bnx2_init_board(pdev, dev);
7857 dev->netdev_ops = &bnx2_netdev_ops;
7858 dev->watchdog_timeo = TX_TIMEOUT;
7859 dev->ethtool_ops = &bnx2_ethtool_ops;
7861 bp = netdev_priv(dev);
7864 pci_set_drvdata(pdev, dev);
7866 rc = bnx2_request_firmware(bp);
7870 memcpy(dev->dev_addr, bp->mac_addr, 6);
7871 memcpy(dev->perm_addr, bp->mac_addr, 6);
7873 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7874 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7875 dev->features |= NETIF_F_IPV6_CSUM;
7878 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7880 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7881 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7882 dev->features |= NETIF_F_TSO6;
7884 if ((rc = register_netdev(dev))) {
7885 dev_err(&pdev->dev, "Cannot register net device\n");
7889 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7890 "IRQ %d, node addr %pM\n",
7892 board_info[ent->driver_data].name,
7893 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7894 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7895 bnx2_bus_string(bp, str),
7897 bp->pdev->irq, dev->dev_addr);
7902 if (bp->mips_firmware)
7903 release_firmware(bp->mips_firmware);
7904 if (bp->rv2p_firmware)
7905 release_firmware(bp->rv2p_firmware);
7908 iounmap(bp->regview);
7909 pci_release_regions(pdev);
7910 pci_disable_device(pdev);
7911 pci_set_drvdata(pdev, NULL);
7916 static void __devexit
7917 bnx2_remove_one(struct pci_dev *pdev)
7919 struct net_device *dev = pci_get_drvdata(pdev);
7920 struct bnx2 *bp = netdev_priv(dev);
7922 flush_scheduled_work();
7924 unregister_netdev(dev);
7926 if (bp->mips_firmware)
7927 release_firmware(bp->mips_firmware);
7928 if (bp->rv2p_firmware)
7929 release_firmware(bp->rv2p_firmware);
7932 iounmap(bp->regview);
7935 pci_release_regions(pdev);
7936 pci_disable_device(pdev);
7937 pci_set_drvdata(pdev, NULL);
7941 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7943 struct net_device *dev = pci_get_drvdata(pdev);
7944 struct bnx2 *bp = netdev_priv(dev);
7946 /* PCI register 4 needs to be saved whether netif_running() or not.
7947 * MSI address and data need to be saved if using MSI and
7950 pci_save_state(pdev);
7951 if (!netif_running(dev))
7954 flush_scheduled_work();
7955 bnx2_netif_stop(bp);
7956 netif_device_detach(dev);
7957 del_timer_sync(&bp->timer);
7958 bnx2_shutdown_chip(bp);
7960 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7965 bnx2_resume(struct pci_dev *pdev)
7967 struct net_device *dev = pci_get_drvdata(pdev);
7968 struct bnx2 *bp = netdev_priv(dev);
7970 pci_restore_state(pdev);
7971 if (!netif_running(dev))
7974 bnx2_set_power_state(bp, PCI_D0);
7975 netif_device_attach(dev);
7976 bnx2_init_nic(bp, 1);
7977 bnx2_netif_start(bp);
7982 * bnx2_io_error_detected - called when PCI error is detected
7983 * @pdev: Pointer to PCI device
7984 * @state: The current pci connection state
7986 * This function is called after a PCI bus error affecting
7987 * this device has been detected.
7989 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
7990 pci_channel_state_t state)
7992 struct net_device *dev = pci_get_drvdata(pdev);
7993 struct bnx2 *bp = netdev_priv(dev);
7996 netif_device_detach(dev);
7998 if (netif_running(dev)) {
7999 bnx2_netif_stop(bp);
8000 del_timer_sync(&bp->timer);
8001 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8004 pci_disable_device(pdev);
8007 /* Request a slot slot reset. */
8008 return PCI_ERS_RESULT_NEED_RESET;
8012 * bnx2_io_slot_reset - called after the pci bus has been reset.
8013 * @pdev: Pointer to PCI device
8015 * Restart the card from scratch, as if from a cold-boot.
8017 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8019 struct net_device *dev = pci_get_drvdata(pdev);
8020 struct bnx2 *bp = netdev_priv(dev);
8023 if (pci_enable_device(pdev)) {
8025 "Cannot re-enable PCI device after reset.\n");
8027 return PCI_ERS_RESULT_DISCONNECT;
8029 pci_set_master(pdev);
8030 pci_restore_state(pdev);
8032 if (netif_running(dev)) {
8033 bnx2_set_power_state(bp, PCI_D0);
8034 bnx2_init_nic(bp, 1);
8038 return PCI_ERS_RESULT_RECOVERED;
8042 * bnx2_io_resume - called when traffic can start flowing again.
8043 * @pdev: Pointer to PCI device
8045 * This callback is called when the error recovery driver tells us that
8046 * its OK to resume normal operation.
8048 static void bnx2_io_resume(struct pci_dev *pdev)
8050 struct net_device *dev = pci_get_drvdata(pdev);
8051 struct bnx2 *bp = netdev_priv(dev);
8054 if (netif_running(dev))
8055 bnx2_netif_start(bp);
8057 netif_device_attach(dev);
8061 static struct pci_error_handlers bnx2_err_handler = {
8062 .error_detected = bnx2_io_error_detected,
8063 .slot_reset = bnx2_io_slot_reset,
8064 .resume = bnx2_io_resume,
8067 static struct pci_driver bnx2_pci_driver = {
8068 .name = DRV_MODULE_NAME,
8069 .id_table = bnx2_pci_tbl,
8070 .probe = bnx2_init_one,
8071 .remove = __devexit_p(bnx2_remove_one),
8072 .suspend = bnx2_suspend,
8073 .resume = bnx2_resume,
8074 .err_handler = &bnx2_err_handler,
8077 static int __init bnx2_init(void)
8079 return pci_register_driver(&bnx2_pci_driver);
8082 static void __exit bnx2_cleanup(void)
8084 pci_unregister_driver(&bnx2_pci_driver);
8087 module_init(bnx2_init);
8088 module_exit(bnx2_cleanup);