1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2008 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
55 #define FW_BUF_SIZE 0x10000
57 #define DRV_MODULE_NAME "bnx2"
58 #define PFX DRV_MODULE_NAME ": "
59 #define DRV_MODULE_VERSION "1.7.6"
60 #define DRV_MODULE_RELDATE "May 16, 2008"
62 #define RUN_AT(x) (jiffies + (x))
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT (5*HZ)
67 static char version[] __devinitdata =
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
75 static int disable_msi = 0;
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
92 /* indexed by board_t, above */
95 } board_info[] __devinitdata = {
96 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97 { "HP NC370T Multifunction Gigabit Server Adapter" },
98 { "HP NC370i Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100 { "HP NC370F Multifunction Gigabit Server Adapter" },
101 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
107 static struct pci_device_id bnx2_pci_tbl[] = {
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
129 static struct flash_spec flash_table[] =
131 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
134 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
138 /* Expansion entry 0001 */
139 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
143 /* Saifun SA25F010 (non-buffered flash) */
144 /* strap, cfg1, & write1 need updates */
145 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148 "Non-buffered flash (128kB)"},
149 /* Saifun SA25F020 (non-buffered flash) */
150 /* strap, cfg1, & write1 need updates */
151 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154 "Non-buffered flash (256kB)"},
155 /* Expansion entry 0100 */
156 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
160 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170 /* Saifun SA25F005 (non-buffered flash) */
171 /* strap, cfg1, & write1 need updates */
172 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175 "Non-buffered flash (64kB)"},
177 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
181 /* Expansion entry 1001 */
182 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186 /* Expansion entry 1010 */
187 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
191 /* ATMEL AT45DB011B (buffered flash) */
192 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195 "Buffered flash (128kB)"},
196 /* Expansion entry 1100 */
197 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201 /* Expansion entry 1101 */
202 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
206 /* Ateml Expansion entry 1110 */
207 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1110 (Atmel)"},
211 /* ATMEL AT45DB021B (buffered flash) */
212 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215 "Buffered flash (256kB)"},
218 static struct flash_spec flash_5709 = {
219 .flags = BNX2_NV_BUFFERED,
220 .page_bits = BCM5709_FLASH_PAGE_BITS,
221 .page_size = BCM5709_FLASH_PAGE_SIZE,
222 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
223 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
224 .name = "5709 Buffered flash (256kB)",
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_napi *bnapi)
235 /* The ring uses 256 indices for 255 entries, one of them
236 * needs to be skipped.
238 diff = bp->tx_prod - bnapi->tx_cons;
239 if (unlikely(diff >= TX_DESC_CNT)) {
241 if (diff == TX_DESC_CNT)
242 diff = MAX_TX_DESC_CNT;
244 return (bp->tx_ring_size - diff);
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
252 spin_lock_bh(&bp->indirect_lock);
253 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255 spin_unlock_bh(&bp->indirect_lock);
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
262 spin_lock_bh(&bp->indirect_lock);
263 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265 spin_unlock_bh(&bp->indirect_lock);
269 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
271 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
275 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
277 return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
281 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
284 spin_lock_bh(&bp->indirect_lock);
285 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
288 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
289 REG_WR(bp, BNX2_CTX_CTX_CTRL,
290 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
291 for (i = 0; i < 5; i++) {
293 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
294 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
299 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
300 REG_WR(bp, BNX2_CTX_DATA, val);
302 spin_unlock_bh(&bp->indirect_lock);
306 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
311 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
312 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
313 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
315 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
316 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
321 val1 = (bp->phy_addr << 21) | (reg << 16) |
322 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
323 BNX2_EMAC_MDIO_COMM_START_BUSY;
324 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
326 for (i = 0; i < 50; i++) {
329 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
330 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
333 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
334 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
340 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
349 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
350 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
351 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
353 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
354 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
363 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
368 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
369 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
370 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
372 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
373 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
378 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
379 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
380 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
381 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
383 for (i = 0; i < 50; i++) {
386 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
387 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
393 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
398 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
399 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
400 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
402 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
403 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
412 bnx2_disable_int(struct bnx2 *bp)
415 struct bnx2_napi *bnapi;
417 for (i = 0; i < bp->irq_nvecs; i++) {
418 bnapi = &bp->bnx2_napi[i];
419 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
420 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
422 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
426 bnx2_enable_int(struct bnx2 *bp)
429 struct bnx2_napi *bnapi;
431 for (i = 0; i < bp->irq_nvecs; i++) {
432 bnapi = &bp->bnx2_napi[i];
434 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
435 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
436 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
437 bnapi->last_status_idx);
439 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
440 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
441 bnapi->last_status_idx);
443 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
447 bnx2_disable_int_sync(struct bnx2 *bp)
451 atomic_inc(&bp->intr_sem);
452 bnx2_disable_int(bp);
453 for (i = 0; i < bp->irq_nvecs; i++)
454 synchronize_irq(bp->irq_tbl[i].vector);
458 bnx2_napi_disable(struct bnx2 *bp)
462 for (i = 0; i < bp->irq_nvecs; i++)
463 napi_disable(&bp->bnx2_napi[i].napi);
467 bnx2_napi_enable(struct bnx2 *bp)
471 for (i = 0; i < bp->irq_nvecs; i++)
472 napi_enable(&bp->bnx2_napi[i].napi);
476 bnx2_netif_stop(struct bnx2 *bp)
478 bnx2_disable_int_sync(bp);
479 if (netif_running(bp->dev)) {
480 bnx2_napi_disable(bp);
481 netif_tx_disable(bp->dev);
482 bp->dev->trans_start = jiffies; /* prevent tx timeout */
487 bnx2_netif_start(struct bnx2 *bp)
489 if (atomic_dec_and_test(&bp->intr_sem)) {
490 if (netif_running(bp->dev)) {
491 netif_wake_queue(bp->dev);
492 bnx2_napi_enable(bp);
499 bnx2_free_mem(struct bnx2 *bp)
503 for (i = 0; i < bp->ctx_pages; i++) {
504 if (bp->ctx_blk[i]) {
505 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
507 bp->ctx_blk_mapping[i]);
508 bp->ctx_blk[i] = NULL;
511 if (bp->status_blk) {
512 pci_free_consistent(bp->pdev, bp->status_stats_size,
513 bp->status_blk, bp->status_blk_mapping);
514 bp->status_blk = NULL;
515 bp->stats_blk = NULL;
517 if (bp->tx_desc_ring) {
518 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
519 bp->tx_desc_ring, bp->tx_desc_mapping);
520 bp->tx_desc_ring = NULL;
522 kfree(bp->tx_buf_ring);
523 bp->tx_buf_ring = NULL;
524 for (i = 0; i < bp->rx_max_ring; i++) {
525 if (bp->rx_desc_ring[i])
526 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
528 bp->rx_desc_mapping[i]);
529 bp->rx_desc_ring[i] = NULL;
531 vfree(bp->rx_buf_ring);
532 bp->rx_buf_ring = NULL;
533 for (i = 0; i < bp->rx_max_pg_ring; i++) {
534 if (bp->rx_pg_desc_ring[i])
535 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
536 bp->rx_pg_desc_ring[i],
537 bp->rx_pg_desc_mapping[i]);
538 bp->rx_pg_desc_ring[i] = NULL;
541 vfree(bp->rx_pg_ring);
542 bp->rx_pg_ring = NULL;
546 bnx2_alloc_mem(struct bnx2 *bp)
548 int i, status_blk_size;
550 bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
551 if (bp->tx_buf_ring == NULL)
554 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
555 &bp->tx_desc_mapping);
556 if (bp->tx_desc_ring == NULL)
559 bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
560 if (bp->rx_buf_ring == NULL)
563 memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
565 for (i = 0; i < bp->rx_max_ring; i++) {
566 bp->rx_desc_ring[i] =
567 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
568 &bp->rx_desc_mapping[i]);
569 if (bp->rx_desc_ring[i] == NULL)
574 if (bp->rx_pg_ring_size) {
575 bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
577 if (bp->rx_pg_ring == NULL)
580 memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
584 for (i = 0; i < bp->rx_max_pg_ring; i++) {
585 bp->rx_pg_desc_ring[i] =
586 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
587 &bp->rx_pg_desc_mapping[i]);
588 if (bp->rx_pg_desc_ring[i] == NULL)
593 /* Combine status and statistics blocks into one allocation. */
594 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
595 if (bp->flags & BNX2_FLAG_MSIX_CAP)
596 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
597 BNX2_SBLK_MSIX_ALIGN_SIZE);
598 bp->status_stats_size = status_blk_size +
599 sizeof(struct statistics_block);
601 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
602 &bp->status_blk_mapping);
603 if (bp->status_blk == NULL)
606 memset(bp->status_blk, 0, bp->status_stats_size);
608 bp->bnx2_napi[0].status_blk = bp->status_blk;
609 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
610 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
611 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
613 bnapi->status_blk_msix = (void *)
614 ((unsigned long) bp->status_blk +
615 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
616 bnapi->int_num = i << 24;
620 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
623 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
625 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
626 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
627 if (bp->ctx_pages == 0)
629 for (i = 0; i < bp->ctx_pages; i++) {
630 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
632 &bp->ctx_blk_mapping[i]);
633 if (bp->ctx_blk[i] == NULL)
645 bnx2_report_fw_link(struct bnx2 *bp)
647 u32 fw_link_status = 0;
649 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
655 switch (bp->line_speed) {
657 if (bp->duplex == DUPLEX_HALF)
658 fw_link_status = BNX2_LINK_STATUS_10HALF;
660 fw_link_status = BNX2_LINK_STATUS_10FULL;
663 if (bp->duplex == DUPLEX_HALF)
664 fw_link_status = BNX2_LINK_STATUS_100HALF;
666 fw_link_status = BNX2_LINK_STATUS_100FULL;
669 if (bp->duplex == DUPLEX_HALF)
670 fw_link_status = BNX2_LINK_STATUS_1000HALF;
672 fw_link_status = BNX2_LINK_STATUS_1000FULL;
675 if (bp->duplex == DUPLEX_HALF)
676 fw_link_status = BNX2_LINK_STATUS_2500HALF;
678 fw_link_status = BNX2_LINK_STATUS_2500FULL;
682 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
685 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
687 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
688 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
690 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
691 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
692 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
694 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
698 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
700 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
704 bnx2_xceiver_str(struct bnx2 *bp)
706 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
707 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
712 bnx2_report_link(struct bnx2 *bp)
715 netif_carrier_on(bp->dev);
716 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
717 bnx2_xceiver_str(bp));
719 printk("%d Mbps ", bp->line_speed);
721 if (bp->duplex == DUPLEX_FULL)
722 printk("full duplex");
724 printk("half duplex");
727 if (bp->flow_ctrl & FLOW_CTRL_RX) {
728 printk(", receive ");
729 if (bp->flow_ctrl & FLOW_CTRL_TX)
730 printk("& transmit ");
733 printk(", transmit ");
735 printk("flow control ON");
740 netif_carrier_off(bp->dev);
741 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
742 bnx2_xceiver_str(bp));
745 bnx2_report_fw_link(bp);
749 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
751 u32 local_adv, remote_adv;
754 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
755 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
757 if (bp->duplex == DUPLEX_FULL) {
758 bp->flow_ctrl = bp->req_flow_ctrl;
763 if (bp->duplex != DUPLEX_FULL) {
767 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
768 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
771 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
772 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
773 bp->flow_ctrl |= FLOW_CTRL_TX;
774 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
775 bp->flow_ctrl |= FLOW_CTRL_RX;
779 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
780 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
782 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
783 u32 new_local_adv = 0;
784 u32 new_remote_adv = 0;
786 if (local_adv & ADVERTISE_1000XPAUSE)
787 new_local_adv |= ADVERTISE_PAUSE_CAP;
788 if (local_adv & ADVERTISE_1000XPSE_ASYM)
789 new_local_adv |= ADVERTISE_PAUSE_ASYM;
790 if (remote_adv & ADVERTISE_1000XPAUSE)
791 new_remote_adv |= ADVERTISE_PAUSE_CAP;
792 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
793 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
795 local_adv = new_local_adv;
796 remote_adv = new_remote_adv;
799 /* See Table 28B-3 of 802.3ab-1999 spec. */
800 if (local_adv & ADVERTISE_PAUSE_CAP) {
801 if(local_adv & ADVERTISE_PAUSE_ASYM) {
802 if (remote_adv & ADVERTISE_PAUSE_CAP) {
803 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
805 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
806 bp->flow_ctrl = FLOW_CTRL_RX;
810 if (remote_adv & ADVERTISE_PAUSE_CAP) {
811 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
815 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
816 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
817 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
819 bp->flow_ctrl = FLOW_CTRL_TX;
825 bnx2_5709s_linkup(struct bnx2 *bp)
831 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
832 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
833 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
835 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
836 bp->line_speed = bp->req_line_speed;
837 bp->duplex = bp->req_duplex;
840 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
842 case MII_BNX2_GP_TOP_AN_SPEED_10:
843 bp->line_speed = SPEED_10;
845 case MII_BNX2_GP_TOP_AN_SPEED_100:
846 bp->line_speed = SPEED_100;
848 case MII_BNX2_GP_TOP_AN_SPEED_1G:
849 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
850 bp->line_speed = SPEED_1000;
852 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
853 bp->line_speed = SPEED_2500;
856 if (val & MII_BNX2_GP_TOP_AN_FD)
857 bp->duplex = DUPLEX_FULL;
859 bp->duplex = DUPLEX_HALF;
864 bnx2_5708s_linkup(struct bnx2 *bp)
869 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
870 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
871 case BCM5708S_1000X_STAT1_SPEED_10:
872 bp->line_speed = SPEED_10;
874 case BCM5708S_1000X_STAT1_SPEED_100:
875 bp->line_speed = SPEED_100;
877 case BCM5708S_1000X_STAT1_SPEED_1G:
878 bp->line_speed = SPEED_1000;
880 case BCM5708S_1000X_STAT1_SPEED_2G5:
881 bp->line_speed = SPEED_2500;
884 if (val & BCM5708S_1000X_STAT1_FD)
885 bp->duplex = DUPLEX_FULL;
887 bp->duplex = DUPLEX_HALF;
893 bnx2_5706s_linkup(struct bnx2 *bp)
895 u32 bmcr, local_adv, remote_adv, common;
898 bp->line_speed = SPEED_1000;
900 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
901 if (bmcr & BMCR_FULLDPLX) {
902 bp->duplex = DUPLEX_FULL;
905 bp->duplex = DUPLEX_HALF;
908 if (!(bmcr & BMCR_ANENABLE)) {
912 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
913 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
915 common = local_adv & remote_adv;
916 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
918 if (common & ADVERTISE_1000XFULL) {
919 bp->duplex = DUPLEX_FULL;
922 bp->duplex = DUPLEX_HALF;
930 bnx2_copper_linkup(struct bnx2 *bp)
934 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
935 if (bmcr & BMCR_ANENABLE) {
936 u32 local_adv, remote_adv, common;
938 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
939 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
941 common = local_adv & (remote_adv >> 2);
942 if (common & ADVERTISE_1000FULL) {
943 bp->line_speed = SPEED_1000;
944 bp->duplex = DUPLEX_FULL;
946 else if (common & ADVERTISE_1000HALF) {
947 bp->line_speed = SPEED_1000;
948 bp->duplex = DUPLEX_HALF;
951 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
952 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
954 common = local_adv & remote_adv;
955 if (common & ADVERTISE_100FULL) {
956 bp->line_speed = SPEED_100;
957 bp->duplex = DUPLEX_FULL;
959 else if (common & ADVERTISE_100HALF) {
960 bp->line_speed = SPEED_100;
961 bp->duplex = DUPLEX_HALF;
963 else if (common & ADVERTISE_10FULL) {
964 bp->line_speed = SPEED_10;
965 bp->duplex = DUPLEX_FULL;
967 else if (common & ADVERTISE_10HALF) {
968 bp->line_speed = SPEED_10;
969 bp->duplex = DUPLEX_HALF;
978 if (bmcr & BMCR_SPEED100) {
979 bp->line_speed = SPEED_100;
982 bp->line_speed = SPEED_10;
984 if (bmcr & BMCR_FULLDPLX) {
985 bp->duplex = DUPLEX_FULL;
988 bp->duplex = DUPLEX_HALF;
996 bnx2_init_rx_context0(struct bnx2 *bp)
998 u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
1000 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1001 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1004 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1005 u32 lo_water, hi_water;
1007 if (bp->flow_ctrl & FLOW_CTRL_TX)
1008 lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1010 lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1011 if (lo_water >= bp->rx_ring_size)
1014 hi_water = bp->rx_ring_size / 4;
1016 if (hi_water <= lo_water)
1019 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1020 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1024 else if (hi_water == 0)
1026 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1028 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1032 bnx2_set_mac_link(struct bnx2 *bp)
1036 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1037 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1038 (bp->duplex == DUPLEX_HALF)) {
1039 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1042 /* Configure the EMAC mode register. */
1043 val = REG_RD(bp, BNX2_EMAC_MODE);
1045 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1046 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1047 BNX2_EMAC_MODE_25G_MODE);
1050 switch (bp->line_speed) {
1052 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1053 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1058 val |= BNX2_EMAC_MODE_PORT_MII;
1061 val |= BNX2_EMAC_MODE_25G_MODE;
1064 val |= BNX2_EMAC_MODE_PORT_GMII;
1069 val |= BNX2_EMAC_MODE_PORT_GMII;
1072 /* Set the MAC to operate in the appropriate duplex mode. */
1073 if (bp->duplex == DUPLEX_HALF)
1074 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1075 REG_WR(bp, BNX2_EMAC_MODE, val);
1077 /* Enable/disable rx PAUSE. */
1078 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1080 if (bp->flow_ctrl & FLOW_CTRL_RX)
1081 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1082 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1084 /* Enable/disable tx PAUSE. */
1085 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1086 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1088 if (bp->flow_ctrl & FLOW_CTRL_TX)
1089 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1090 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1092 /* Acknowledge the interrupt. */
1093 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1095 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1096 bnx2_init_rx_context0(bp);
1102 bnx2_enable_bmsr1(struct bnx2 *bp)
1104 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1105 (CHIP_NUM(bp) == CHIP_NUM_5709))
1106 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1107 MII_BNX2_BLK_ADDR_GP_STATUS);
1111 bnx2_disable_bmsr1(struct bnx2 *bp)
1113 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1114 (CHIP_NUM(bp) == CHIP_NUM_5709))
1115 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1116 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1120 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1125 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1128 if (bp->autoneg & AUTONEG_SPEED)
1129 bp->advertising |= ADVERTISED_2500baseX_Full;
1131 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1132 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1134 bnx2_read_phy(bp, bp->mii_up1, &up1);
1135 if (!(up1 & BCM5708S_UP1_2G5)) {
1136 up1 |= BCM5708S_UP1_2G5;
1137 bnx2_write_phy(bp, bp->mii_up1, up1);
1141 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1142 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1143 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1149 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1154 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1157 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1158 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1160 bnx2_read_phy(bp, bp->mii_up1, &up1);
1161 if (up1 & BCM5708S_UP1_2G5) {
1162 up1 &= ~BCM5708S_UP1_2G5;
1163 bnx2_write_phy(bp, bp->mii_up1, up1);
1167 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1168 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1169 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1175 bnx2_enable_forced_2g5(struct bnx2 *bp)
1179 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1182 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1185 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1186 MII_BNX2_BLK_ADDR_SERDES_DIG);
1187 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1188 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1189 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1190 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1192 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1193 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1194 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1196 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1197 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1198 bmcr |= BCM5708S_BMCR_FORCE_2500;
1201 if (bp->autoneg & AUTONEG_SPEED) {
1202 bmcr &= ~BMCR_ANENABLE;
1203 if (bp->req_duplex == DUPLEX_FULL)
1204 bmcr |= BMCR_FULLDPLX;
1206 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1210 bnx2_disable_forced_2g5(struct bnx2 *bp)
1214 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1217 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1220 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1221 MII_BNX2_BLK_ADDR_SERDES_DIG);
1222 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1223 val &= ~MII_BNX2_SD_MISC1_FORCE;
1224 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1226 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1227 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1228 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1230 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1231 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1232 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1235 if (bp->autoneg & AUTONEG_SPEED)
1236 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1237 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1241 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1245 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1246 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1248 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1250 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1254 bnx2_set_link(struct bnx2 *bp)
1259 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1264 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1267 link_up = bp->link_up;
1269 bnx2_enable_bmsr1(bp);
1270 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1271 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1272 bnx2_disable_bmsr1(bp);
1274 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1275 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1278 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1279 bnx2_5706s_force_link_dn(bp, 0);
1280 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1282 val = REG_RD(bp, BNX2_EMAC_STATUS);
1284 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1285 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1286 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1288 if ((val & BNX2_EMAC_STATUS_LINK) &&
1289 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1290 bmsr |= BMSR_LSTATUS;
1292 bmsr &= ~BMSR_LSTATUS;
1295 if (bmsr & BMSR_LSTATUS) {
1298 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1299 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1300 bnx2_5706s_linkup(bp);
1301 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1302 bnx2_5708s_linkup(bp);
1303 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1304 bnx2_5709s_linkup(bp);
1307 bnx2_copper_linkup(bp);
1309 bnx2_resolve_flow_ctrl(bp);
1312 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1313 (bp->autoneg & AUTONEG_SPEED))
1314 bnx2_disable_forced_2g5(bp);
1316 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1319 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1320 bmcr |= BMCR_ANENABLE;
1321 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1323 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1328 if (bp->link_up != link_up) {
1329 bnx2_report_link(bp);
1332 bnx2_set_mac_link(bp);
1338 bnx2_reset_phy(struct bnx2 *bp)
1343 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1345 #define PHY_RESET_MAX_WAIT 100
1346 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1349 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1350 if (!(reg & BMCR_RESET)) {
1355 if (i == PHY_RESET_MAX_WAIT) {
1362 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1366 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1367 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1369 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1370 adv = ADVERTISE_1000XPAUSE;
1373 adv = ADVERTISE_PAUSE_CAP;
1376 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1377 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1378 adv = ADVERTISE_1000XPSE_ASYM;
1381 adv = ADVERTISE_PAUSE_ASYM;
1384 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1385 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1386 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1389 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1395 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1398 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1400 u32 speed_arg = 0, pause_adv;
1402 pause_adv = bnx2_phy_get_pause_adv(bp);
1404 if (bp->autoneg & AUTONEG_SPEED) {
1405 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1406 if (bp->advertising & ADVERTISED_10baseT_Half)
1407 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1408 if (bp->advertising & ADVERTISED_10baseT_Full)
1409 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1410 if (bp->advertising & ADVERTISED_100baseT_Half)
1411 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1412 if (bp->advertising & ADVERTISED_100baseT_Full)
1413 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1414 if (bp->advertising & ADVERTISED_1000baseT_Full)
1415 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1416 if (bp->advertising & ADVERTISED_2500baseX_Full)
1417 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1419 if (bp->req_line_speed == SPEED_2500)
1420 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1421 else if (bp->req_line_speed == SPEED_1000)
1422 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1423 else if (bp->req_line_speed == SPEED_100) {
1424 if (bp->req_duplex == DUPLEX_FULL)
1425 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1427 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1428 } else if (bp->req_line_speed == SPEED_10) {
1429 if (bp->req_duplex == DUPLEX_FULL)
1430 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1432 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1436 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1437 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1438 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1439 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1441 if (port == PORT_TP)
1442 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1443 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1445 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1447 spin_unlock_bh(&bp->phy_lock);
1448 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1449 spin_lock_bh(&bp->phy_lock);
1455 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1460 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1461 return (bnx2_setup_remote_phy(bp, port));
1463 if (!(bp->autoneg & AUTONEG_SPEED)) {
1465 int force_link_down = 0;
1467 if (bp->req_line_speed == SPEED_2500) {
1468 if (!bnx2_test_and_enable_2g5(bp))
1469 force_link_down = 1;
1470 } else if (bp->req_line_speed == SPEED_1000) {
1471 if (bnx2_test_and_disable_2g5(bp))
1472 force_link_down = 1;
1474 bnx2_read_phy(bp, bp->mii_adv, &adv);
1475 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1477 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1478 new_bmcr = bmcr & ~BMCR_ANENABLE;
1479 new_bmcr |= BMCR_SPEED1000;
1481 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1482 if (bp->req_line_speed == SPEED_2500)
1483 bnx2_enable_forced_2g5(bp);
1484 else if (bp->req_line_speed == SPEED_1000) {
1485 bnx2_disable_forced_2g5(bp);
1486 new_bmcr &= ~0x2000;
1489 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1490 if (bp->req_line_speed == SPEED_2500)
1491 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1493 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1496 if (bp->req_duplex == DUPLEX_FULL) {
1497 adv |= ADVERTISE_1000XFULL;
1498 new_bmcr |= BMCR_FULLDPLX;
1501 adv |= ADVERTISE_1000XHALF;
1502 new_bmcr &= ~BMCR_FULLDPLX;
1504 if ((new_bmcr != bmcr) || (force_link_down)) {
1505 /* Force a link down visible on the other side */
1507 bnx2_write_phy(bp, bp->mii_adv, adv &
1508 ~(ADVERTISE_1000XFULL |
1509 ADVERTISE_1000XHALF));
1510 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1511 BMCR_ANRESTART | BMCR_ANENABLE);
1514 netif_carrier_off(bp->dev);
1515 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1516 bnx2_report_link(bp);
1518 bnx2_write_phy(bp, bp->mii_adv, adv);
1519 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1521 bnx2_resolve_flow_ctrl(bp);
1522 bnx2_set_mac_link(bp);
1527 bnx2_test_and_enable_2g5(bp);
1529 if (bp->advertising & ADVERTISED_1000baseT_Full)
1530 new_adv |= ADVERTISE_1000XFULL;
1532 new_adv |= bnx2_phy_get_pause_adv(bp);
1534 bnx2_read_phy(bp, bp->mii_adv, &adv);
1535 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1537 bp->serdes_an_pending = 0;
1538 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1539 /* Force a link down visible on the other side */
1541 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1542 spin_unlock_bh(&bp->phy_lock);
1544 spin_lock_bh(&bp->phy_lock);
1547 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1548 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1550 /* Speed up link-up time when the link partner
1551 * does not autonegotiate which is very common
1552 * in blade servers. Some blade servers use
1553 * IPMI for kerboard input and it's important
1554 * to minimize link disruptions. Autoneg. involves
1555 * exchanging base pages plus 3 next pages and
1556 * normally completes in about 120 msec.
1558 bp->current_interval = SERDES_AN_TIMEOUT;
1559 bp->serdes_an_pending = 1;
1560 mod_timer(&bp->timer, jiffies + bp->current_interval);
1562 bnx2_resolve_flow_ctrl(bp);
1563 bnx2_set_mac_link(bp);
1569 #define ETHTOOL_ALL_FIBRE_SPEED \
1570 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1571 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1572 (ADVERTISED_1000baseT_Full)
1574 #define ETHTOOL_ALL_COPPER_SPEED \
1575 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1576 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1577 ADVERTISED_1000baseT_Full)
1579 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1580 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1582 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1585 bnx2_set_default_remote_link(struct bnx2 *bp)
1589 if (bp->phy_port == PORT_TP)
1590 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1592 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1594 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1595 bp->req_line_speed = 0;
1596 bp->autoneg |= AUTONEG_SPEED;
1597 bp->advertising = ADVERTISED_Autoneg;
1598 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1599 bp->advertising |= ADVERTISED_10baseT_Half;
1600 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1601 bp->advertising |= ADVERTISED_10baseT_Full;
1602 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1603 bp->advertising |= ADVERTISED_100baseT_Half;
1604 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1605 bp->advertising |= ADVERTISED_100baseT_Full;
1606 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1607 bp->advertising |= ADVERTISED_1000baseT_Full;
1608 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1609 bp->advertising |= ADVERTISED_2500baseX_Full;
1612 bp->advertising = 0;
1613 bp->req_duplex = DUPLEX_FULL;
1614 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1615 bp->req_line_speed = SPEED_10;
1616 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1617 bp->req_duplex = DUPLEX_HALF;
1619 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1620 bp->req_line_speed = SPEED_100;
1621 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1622 bp->req_duplex = DUPLEX_HALF;
1624 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1625 bp->req_line_speed = SPEED_1000;
1626 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1627 bp->req_line_speed = SPEED_2500;
1632 bnx2_set_default_link(struct bnx2 *bp)
1634 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1635 bnx2_set_default_remote_link(bp);
1639 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1640 bp->req_line_speed = 0;
1641 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1644 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1646 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1647 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1648 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1650 bp->req_line_speed = bp->line_speed = SPEED_1000;
1651 bp->req_duplex = DUPLEX_FULL;
1654 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1658 bnx2_send_heart_beat(struct bnx2 *bp)
1663 spin_lock(&bp->indirect_lock);
1664 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1665 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1666 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1667 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1668 spin_unlock(&bp->indirect_lock);
1672 bnx2_remote_phy_event(struct bnx2 *bp)
1675 u8 link_up = bp->link_up;
1678 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1680 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1681 bnx2_send_heart_beat(bp);
1683 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1685 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1691 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1692 bp->duplex = DUPLEX_FULL;
1694 case BNX2_LINK_STATUS_10HALF:
1695 bp->duplex = DUPLEX_HALF;
1696 case BNX2_LINK_STATUS_10FULL:
1697 bp->line_speed = SPEED_10;
1699 case BNX2_LINK_STATUS_100HALF:
1700 bp->duplex = DUPLEX_HALF;
1701 case BNX2_LINK_STATUS_100BASE_T4:
1702 case BNX2_LINK_STATUS_100FULL:
1703 bp->line_speed = SPEED_100;
1705 case BNX2_LINK_STATUS_1000HALF:
1706 bp->duplex = DUPLEX_HALF;
1707 case BNX2_LINK_STATUS_1000FULL:
1708 bp->line_speed = SPEED_1000;
1710 case BNX2_LINK_STATUS_2500HALF:
1711 bp->duplex = DUPLEX_HALF;
1712 case BNX2_LINK_STATUS_2500FULL:
1713 bp->line_speed = SPEED_2500;
1721 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1722 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1723 if (bp->duplex == DUPLEX_FULL)
1724 bp->flow_ctrl = bp->req_flow_ctrl;
1726 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1727 bp->flow_ctrl |= FLOW_CTRL_TX;
1728 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1729 bp->flow_ctrl |= FLOW_CTRL_RX;
1732 old_port = bp->phy_port;
1733 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1734 bp->phy_port = PORT_FIBRE;
1736 bp->phy_port = PORT_TP;
1738 if (old_port != bp->phy_port)
1739 bnx2_set_default_link(bp);
1742 if (bp->link_up != link_up)
1743 bnx2_report_link(bp);
1745 bnx2_set_mac_link(bp);
1749 bnx2_set_remote_link(struct bnx2 *bp)
1753 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
1755 case BNX2_FW_EVT_CODE_LINK_EVENT:
1756 bnx2_remote_phy_event(bp);
1758 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1760 bnx2_send_heart_beat(bp);
1767 bnx2_setup_copper_phy(struct bnx2 *bp)
1772 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1774 if (bp->autoneg & AUTONEG_SPEED) {
1775 u32 adv_reg, adv1000_reg;
1776 u32 new_adv_reg = 0;
1777 u32 new_adv1000_reg = 0;
1779 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1780 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1781 ADVERTISE_PAUSE_ASYM);
1783 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1784 adv1000_reg &= PHY_ALL_1000_SPEED;
1786 if (bp->advertising & ADVERTISED_10baseT_Half)
1787 new_adv_reg |= ADVERTISE_10HALF;
1788 if (bp->advertising & ADVERTISED_10baseT_Full)
1789 new_adv_reg |= ADVERTISE_10FULL;
1790 if (bp->advertising & ADVERTISED_100baseT_Half)
1791 new_adv_reg |= ADVERTISE_100HALF;
1792 if (bp->advertising & ADVERTISED_100baseT_Full)
1793 new_adv_reg |= ADVERTISE_100FULL;
1794 if (bp->advertising & ADVERTISED_1000baseT_Full)
1795 new_adv1000_reg |= ADVERTISE_1000FULL;
1797 new_adv_reg |= ADVERTISE_CSMA;
1799 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1801 if ((adv1000_reg != new_adv1000_reg) ||
1802 (adv_reg != new_adv_reg) ||
1803 ((bmcr & BMCR_ANENABLE) == 0)) {
1805 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1806 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1807 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1810 else if (bp->link_up) {
1811 /* Flow ctrl may have changed from auto to forced */
1812 /* or vice-versa. */
1814 bnx2_resolve_flow_ctrl(bp);
1815 bnx2_set_mac_link(bp);
1821 if (bp->req_line_speed == SPEED_100) {
1822 new_bmcr |= BMCR_SPEED100;
1824 if (bp->req_duplex == DUPLEX_FULL) {
1825 new_bmcr |= BMCR_FULLDPLX;
1827 if (new_bmcr != bmcr) {
1830 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1831 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1833 if (bmsr & BMSR_LSTATUS) {
1834 /* Force link down */
1835 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1836 spin_unlock_bh(&bp->phy_lock);
1838 spin_lock_bh(&bp->phy_lock);
1840 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1841 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1844 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1846 /* Normally, the new speed is setup after the link has
1847 * gone down and up again. In some cases, link will not go
1848 * down so we need to set up the new speed here.
1850 if (bmsr & BMSR_LSTATUS) {
1851 bp->line_speed = bp->req_line_speed;
1852 bp->duplex = bp->req_duplex;
1853 bnx2_resolve_flow_ctrl(bp);
1854 bnx2_set_mac_link(bp);
1857 bnx2_resolve_flow_ctrl(bp);
1858 bnx2_set_mac_link(bp);
1864 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1866 if (bp->loopback == MAC_LOOPBACK)
1869 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1870 return (bnx2_setup_serdes_phy(bp, port));
1873 return (bnx2_setup_copper_phy(bp));
1878 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
1882 bp->mii_bmcr = MII_BMCR + 0x10;
1883 bp->mii_bmsr = MII_BMSR + 0x10;
1884 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1885 bp->mii_adv = MII_ADVERTISE + 0x10;
1886 bp->mii_lpa = MII_LPA + 0x10;
1887 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1889 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1890 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1892 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1896 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1898 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1899 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1900 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1901 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1903 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1904 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1905 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
1906 val |= BCM5708S_UP1_2G5;
1908 val &= ~BCM5708S_UP1_2G5;
1909 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1911 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1912 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1913 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1914 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1916 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1918 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1919 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1920 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1922 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1928 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
1935 bp->mii_up1 = BCM5708S_UP1;
1937 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1938 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1939 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1941 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1942 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1943 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1945 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1946 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1947 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1949 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
1950 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1951 val |= BCM5708S_UP1_2G5;
1952 bnx2_write_phy(bp, BCM5708S_UP1, val);
1955 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1956 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1957 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1958 /* increase tx signal amplitude */
1959 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1960 BCM5708S_BLK_ADDR_TX_MISC);
1961 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1962 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1963 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1964 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1967 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
1968 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1973 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
1974 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1975 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1976 BCM5708S_BLK_ADDR_TX_MISC);
1977 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1978 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1979 BCM5708S_BLK_ADDR_DIG);
1986 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
1991 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1993 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1994 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1996 if (bp->dev->mtu > 1500) {
1999 /* Set extended packet length bit */
2000 bnx2_write_phy(bp, 0x18, 0x7);
2001 bnx2_read_phy(bp, 0x18, &val);
2002 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2004 bnx2_write_phy(bp, 0x1c, 0x6c00);
2005 bnx2_read_phy(bp, 0x1c, &val);
2006 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2011 bnx2_write_phy(bp, 0x18, 0x7);
2012 bnx2_read_phy(bp, 0x18, &val);
2013 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2015 bnx2_write_phy(bp, 0x1c, 0x6c00);
2016 bnx2_read_phy(bp, 0x1c, &val);
2017 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2024 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2031 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2032 bnx2_write_phy(bp, 0x18, 0x0c00);
2033 bnx2_write_phy(bp, 0x17, 0x000a);
2034 bnx2_write_phy(bp, 0x15, 0x310b);
2035 bnx2_write_phy(bp, 0x17, 0x201f);
2036 bnx2_write_phy(bp, 0x15, 0x9506);
2037 bnx2_write_phy(bp, 0x17, 0x401f);
2038 bnx2_write_phy(bp, 0x15, 0x14e2);
2039 bnx2_write_phy(bp, 0x18, 0x0400);
2042 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2043 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2044 MII_BNX2_DSP_EXPAND_REG | 0x8);
2045 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2047 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2050 if (bp->dev->mtu > 1500) {
2051 /* Set extended packet length bit */
2052 bnx2_write_phy(bp, 0x18, 0x7);
2053 bnx2_read_phy(bp, 0x18, &val);
2054 bnx2_write_phy(bp, 0x18, val | 0x4000);
2056 bnx2_read_phy(bp, 0x10, &val);
2057 bnx2_write_phy(bp, 0x10, val | 0x1);
2060 bnx2_write_phy(bp, 0x18, 0x7);
2061 bnx2_read_phy(bp, 0x18, &val);
2062 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2064 bnx2_read_phy(bp, 0x10, &val);
2065 bnx2_write_phy(bp, 0x10, val & ~0x1);
2068 /* ethernet@wirespeed */
2069 bnx2_write_phy(bp, 0x18, 0x7007);
2070 bnx2_read_phy(bp, 0x18, &val);
2071 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2077 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2082 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2083 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2085 bp->mii_bmcr = MII_BMCR;
2086 bp->mii_bmsr = MII_BMSR;
2087 bp->mii_bmsr1 = MII_BMSR;
2088 bp->mii_adv = MII_ADVERTISE;
2089 bp->mii_lpa = MII_LPA;
2091 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2093 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2096 bnx2_read_phy(bp, MII_PHYSID1, &val);
2097 bp->phy_id = val << 16;
2098 bnx2_read_phy(bp, MII_PHYSID2, &val);
2099 bp->phy_id |= val & 0xffff;
2101 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2102 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2103 rc = bnx2_init_5706s_phy(bp, reset_phy);
2104 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2105 rc = bnx2_init_5708s_phy(bp, reset_phy);
2106 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2107 rc = bnx2_init_5709s_phy(bp, reset_phy);
2110 rc = bnx2_init_copper_phy(bp, reset_phy);
2115 rc = bnx2_setup_phy(bp, bp->phy_port);
2121 bnx2_set_mac_loopback(struct bnx2 *bp)
2125 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2126 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2127 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2128 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2133 static int bnx2_test_link(struct bnx2 *);
2136 bnx2_set_phy_loopback(struct bnx2 *bp)
2141 spin_lock_bh(&bp->phy_lock);
2142 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2144 spin_unlock_bh(&bp->phy_lock);
2148 for (i = 0; i < 10; i++) {
2149 if (bnx2_test_link(bp) == 0)
2154 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2155 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2156 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2157 BNX2_EMAC_MODE_25G_MODE);
2159 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2160 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2166 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2172 msg_data |= bp->fw_wr_seq;
2174 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2176 /* wait for an acknowledgement. */
2177 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2180 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2182 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2185 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2188 /* If we timed out, inform the firmware that this is the case. */
2189 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2191 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2194 msg_data &= ~BNX2_DRV_MSG_CODE;
2195 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2197 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2202 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2209 bnx2_init_5709_context(struct bnx2 *bp)
2214 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2215 val |= (BCM_PAGE_BITS - 8) << 16;
2216 REG_WR(bp, BNX2_CTX_COMMAND, val);
2217 for (i = 0; i < 10; i++) {
2218 val = REG_RD(bp, BNX2_CTX_COMMAND);
2219 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2223 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2226 for (i = 0; i < bp->ctx_pages; i++) {
2230 memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2234 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2235 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2236 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2237 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2238 (u64) bp->ctx_blk_mapping[i] >> 32);
2239 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2240 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2241 for (j = 0; j < 10; j++) {
2243 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2244 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2248 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2257 bnx2_init_context(struct bnx2 *bp)
2263 u32 vcid_addr, pcid_addr, offset;
2268 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2271 vcid_addr = GET_PCID_ADDR(vcid);
2273 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2278 pcid_addr = GET_PCID_ADDR(new_vcid);
2281 vcid_addr = GET_CID_ADDR(vcid);
2282 pcid_addr = vcid_addr;
2285 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2286 vcid_addr += (i << PHY_CTX_SHIFT);
2287 pcid_addr += (i << PHY_CTX_SHIFT);
2289 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2290 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2292 /* Zero out the context. */
2293 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2294 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2300 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2306 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2307 if (good_mbuf == NULL) {
2308 printk(KERN_ERR PFX "Failed to allocate memory in "
2309 "bnx2_alloc_bad_rbuf\n");
2313 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2314 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2318 /* Allocate a bunch of mbufs and save the good ones in an array. */
2319 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2320 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2321 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2322 BNX2_RBUF_COMMAND_ALLOC_REQ);
2324 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2326 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2328 /* The addresses with Bit 9 set are bad memory blocks. */
2329 if (!(val & (1 << 9))) {
2330 good_mbuf[good_mbuf_cnt] = (u16) val;
2334 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2337 /* Free the good ones back to the mbuf pool thus discarding
2338 * all the bad ones. */
2339 while (good_mbuf_cnt) {
2342 val = good_mbuf[good_mbuf_cnt];
2343 val = (val << 9) | val | 1;
2345 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2352 bnx2_set_mac_addr(struct bnx2 *bp)
2355 u8 *mac_addr = bp->dev->dev_addr;
2357 val = (mac_addr[0] << 8) | mac_addr[1];
2359 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2361 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2362 (mac_addr[4] << 8) | mac_addr[5];
2364 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2368 bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
2371 struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2372 struct rx_bd *rxbd =
2373 &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2374 struct page *page = alloc_page(GFP_ATOMIC);
2378 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2379 PCI_DMA_FROMDEVICE);
2381 pci_unmap_addr_set(rx_pg, mapping, mapping);
2382 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2383 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2388 bnx2_free_rx_page(struct bnx2 *bp, u16 index)
2390 struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2391 struct page *page = rx_pg->page;
2396 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2397 PCI_DMA_FROMDEVICE);
2404 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, u16 index)
2406 struct sk_buff *skb;
2407 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2409 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2410 unsigned long align;
2412 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2417 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2418 skb_reserve(skb, BNX2_RX_ALIGN - align);
2420 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2421 PCI_DMA_FROMDEVICE);
2424 pci_unmap_addr_set(rx_buf, mapping, mapping);
2426 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2427 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2429 bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2435 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2437 struct status_block *sblk = bnapi->status_blk;
2438 u32 new_link_state, old_link_state;
2441 new_link_state = sblk->status_attn_bits & event;
2442 old_link_state = sblk->status_attn_bits_ack & event;
2443 if (new_link_state != old_link_state) {
2445 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2447 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2455 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2457 spin_lock(&bp->phy_lock);
2459 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2461 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2462 bnx2_set_remote_link(bp);
2464 spin_unlock(&bp->phy_lock);
2469 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2473 if (bnapi->int_num == 0)
2474 cons = bnapi->status_blk->status_tx_quick_consumer_index0;
2476 cons = bnapi->status_blk_msix->status_tx_quick_consumer_index;
2478 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2484 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2486 u16 hw_cons, sw_cons, sw_ring_cons;
2489 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2490 sw_cons = bnapi->tx_cons;
2492 while (sw_cons != hw_cons) {
2493 struct sw_bd *tx_buf;
2494 struct sk_buff *skb;
2497 sw_ring_cons = TX_RING_IDX(sw_cons);
2499 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2502 /* partial BD completions possible with TSO packets */
2503 if (skb_is_gso(skb)) {
2504 u16 last_idx, last_ring_idx;
2506 last_idx = sw_cons +
2507 skb_shinfo(skb)->nr_frags + 1;
2508 last_ring_idx = sw_ring_cons +
2509 skb_shinfo(skb)->nr_frags + 1;
2510 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2513 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2518 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2519 skb_headlen(skb), PCI_DMA_TODEVICE);
2522 last = skb_shinfo(skb)->nr_frags;
2524 for (i = 0; i < last; i++) {
2525 sw_cons = NEXT_TX_BD(sw_cons);
2527 pci_unmap_page(bp->pdev,
2529 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2531 skb_shinfo(skb)->frags[i].size,
2535 sw_cons = NEXT_TX_BD(sw_cons);
2539 if (tx_pkt == budget)
2542 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2545 bnapi->hw_tx_cons = hw_cons;
2546 bnapi->tx_cons = sw_cons;
2547 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2548 * before checking for netif_queue_stopped(). Without the
2549 * memory barrier, there is a small possibility that bnx2_start_xmit()
2550 * will miss it and cause the queue to be stopped forever.
2554 if (unlikely(netif_queue_stopped(bp->dev)) &&
2555 (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)) {
2556 netif_tx_lock(bp->dev);
2557 if ((netif_queue_stopped(bp->dev)) &&
2558 (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh))
2559 netif_wake_queue(bp->dev);
2560 netif_tx_unlock(bp->dev);
2566 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_napi *bnapi,
2567 struct sk_buff *skb, int count)
2569 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2570 struct rx_bd *cons_bd, *prod_bd;
2573 u16 hw_prod = bnapi->rx_pg_prod, prod;
2574 u16 cons = bnapi->rx_pg_cons;
2576 for (i = 0; i < count; i++) {
2577 prod = RX_PG_RING_IDX(hw_prod);
2579 prod_rx_pg = &bp->rx_pg_ring[prod];
2580 cons_rx_pg = &bp->rx_pg_ring[cons];
2581 cons_bd = &bp->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2582 prod_bd = &bp->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2584 if (i == 0 && skb) {
2586 struct skb_shared_info *shinfo;
2588 shinfo = skb_shinfo(skb);
2590 page = shinfo->frags[shinfo->nr_frags].page;
2591 shinfo->frags[shinfo->nr_frags].page = NULL;
2592 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2593 PCI_DMA_FROMDEVICE);
2594 cons_rx_pg->page = page;
2595 pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2599 prod_rx_pg->page = cons_rx_pg->page;
2600 cons_rx_pg->page = NULL;
2601 pci_unmap_addr_set(prod_rx_pg, mapping,
2602 pci_unmap_addr(cons_rx_pg, mapping));
2604 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2605 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2608 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2609 hw_prod = NEXT_RX_BD(hw_prod);
2611 bnapi->rx_pg_prod = hw_prod;
2612 bnapi->rx_pg_cons = cons;
2616 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2619 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2620 struct rx_bd *cons_bd, *prod_bd;
2622 cons_rx_buf = &bp->rx_buf_ring[cons];
2623 prod_rx_buf = &bp->rx_buf_ring[prod];
2625 pci_dma_sync_single_for_device(bp->pdev,
2626 pci_unmap_addr(cons_rx_buf, mapping),
2627 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2629 bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2631 prod_rx_buf->skb = skb;
2636 pci_unmap_addr_set(prod_rx_buf, mapping,
2637 pci_unmap_addr(cons_rx_buf, mapping));
2639 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2640 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2641 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2642 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2646 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2647 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2651 u16 prod = ring_idx & 0xffff;
2653 err = bnx2_alloc_rx_skb(bp, bnapi, prod);
2654 if (unlikely(err)) {
2655 bnx2_reuse_rx_skb(bp, bnapi, skb, (u16) (ring_idx >> 16), prod);
2657 unsigned int raw_len = len + 4;
2658 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2660 bnx2_reuse_rx_skb_pages(bp, bnapi, NULL, pages);
2665 skb_reserve(skb, BNX2_RX_OFFSET);
2666 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2667 PCI_DMA_FROMDEVICE);
2673 unsigned int i, frag_len, frag_size, pages;
2674 struct sw_pg *rx_pg;
2675 u16 pg_cons = bnapi->rx_pg_cons;
2676 u16 pg_prod = bnapi->rx_pg_prod;
2678 frag_size = len + 4 - hdr_len;
2679 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2680 skb_put(skb, hdr_len);
2682 for (i = 0; i < pages; i++) {
2683 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2684 if (unlikely(frag_len <= 4)) {
2685 unsigned int tail = 4 - frag_len;
2687 bnapi->rx_pg_cons = pg_cons;
2688 bnapi->rx_pg_prod = pg_prod;
2689 bnx2_reuse_rx_skb_pages(bp, bnapi, NULL,
2696 &skb_shinfo(skb)->frags[i - 1];
2698 skb->data_len -= tail;
2699 skb->truesize -= tail;
2703 rx_pg = &bp->rx_pg_ring[pg_cons];
2705 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2706 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2711 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2714 err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod));
2715 if (unlikely(err)) {
2716 bnapi->rx_pg_cons = pg_cons;
2717 bnapi->rx_pg_prod = pg_prod;
2718 bnx2_reuse_rx_skb_pages(bp, bnapi, skb,
2723 frag_size -= frag_len;
2724 skb->data_len += frag_len;
2725 skb->truesize += frag_len;
2726 skb->len += frag_len;
2728 pg_prod = NEXT_RX_BD(pg_prod);
2729 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2731 bnapi->rx_pg_prod = pg_prod;
2732 bnapi->rx_pg_cons = pg_cons;
2738 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2740 u16 cons = bnapi->status_blk->status_rx_quick_consumer_index0;
2742 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2748 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2750 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2751 struct l2_fhdr *rx_hdr;
2752 int rx_pkt = 0, pg_ring_used = 0;
2754 hw_cons = bnx2_get_hw_rx_cons(bnapi);
2755 sw_cons = bnapi->rx_cons;
2756 sw_prod = bnapi->rx_prod;
2758 /* Memory barrier necessary as speculative reads of the rx
2759 * buffer can be ahead of the index in the status block
2762 while (sw_cons != hw_cons) {
2763 unsigned int len, hdr_len;
2765 struct sw_bd *rx_buf;
2766 struct sk_buff *skb;
2767 dma_addr_t dma_addr;
2769 sw_ring_cons = RX_RING_IDX(sw_cons);
2770 sw_ring_prod = RX_RING_IDX(sw_prod);
2772 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2777 dma_addr = pci_unmap_addr(rx_buf, mapping);
2779 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2780 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
2781 PCI_DMA_FROMDEVICE);
2783 rx_hdr = (struct l2_fhdr *) skb->data;
2784 len = rx_hdr->l2_fhdr_pkt_len;
2786 if ((status = rx_hdr->l2_fhdr_status) &
2787 (L2_FHDR_ERRORS_BAD_CRC |
2788 L2_FHDR_ERRORS_PHY_DECODE |
2789 L2_FHDR_ERRORS_ALIGNMENT |
2790 L2_FHDR_ERRORS_TOO_SHORT |
2791 L2_FHDR_ERRORS_GIANT_FRAME)) {
2793 bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2798 if (status & L2_FHDR_STATUS_SPLIT) {
2799 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2801 } else if (len > bp->rx_jumbo_thresh) {
2802 hdr_len = bp->rx_jumbo_thresh;
2808 if (len <= bp->rx_copy_thresh) {
2809 struct sk_buff *new_skb;
2811 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2812 if (new_skb == NULL) {
2813 bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2819 skb_copy_from_linear_data_offset(skb,
2821 new_skb->data, len + 2);
2822 skb_reserve(new_skb, 2);
2823 skb_put(new_skb, len);
2825 bnx2_reuse_rx_skb(bp, bnapi, skb,
2826 sw_ring_cons, sw_ring_prod);
2829 } else if (unlikely(bnx2_rx_skb(bp, bnapi, skb, len, hdr_len,
2830 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2833 skb->protocol = eth_type_trans(skb, bp->dev);
2835 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2836 (ntohs(skb->protocol) != 0x8100)) {
2843 skb->ip_summed = CHECKSUM_NONE;
2845 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2846 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2848 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2849 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2850 skb->ip_summed = CHECKSUM_UNNECESSARY;
2854 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && bp->vlgrp) {
2855 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2856 rx_hdr->l2_fhdr_vlan_tag);
2860 netif_receive_skb(skb);
2862 bp->dev->last_rx = jiffies;
2866 sw_cons = NEXT_RX_BD(sw_cons);
2867 sw_prod = NEXT_RX_BD(sw_prod);
2869 if ((rx_pkt == budget))
2872 /* Refresh hw_cons to see if there is new work */
2873 if (sw_cons == hw_cons) {
2874 hw_cons = bnx2_get_hw_rx_cons(bnapi);
2878 bnapi->rx_cons = sw_cons;
2879 bnapi->rx_prod = sw_prod;
2882 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
2885 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2887 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
2895 /* MSI ISR - The only difference between this and the INTx ISR
2896 * is that the MSI interrupt is always serviced.
2899 bnx2_msi(int irq, void *dev_instance)
2901 struct net_device *dev = dev_instance;
2902 struct bnx2 *bp = netdev_priv(dev);
2903 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2905 prefetch(bnapi->status_blk);
2906 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2907 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2908 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2910 /* Return here if interrupt is disabled. */
2911 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2914 netif_rx_schedule(dev, &bnapi->napi);
2920 bnx2_msi_1shot(int irq, void *dev_instance)
2922 struct net_device *dev = dev_instance;
2923 struct bnx2 *bp = netdev_priv(dev);
2924 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2926 prefetch(bnapi->status_blk);
2928 /* Return here if interrupt is disabled. */
2929 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2932 netif_rx_schedule(dev, &bnapi->napi);
2938 bnx2_interrupt(int irq, void *dev_instance)
2940 struct net_device *dev = dev_instance;
2941 struct bnx2 *bp = netdev_priv(dev);
2942 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2943 struct status_block *sblk = bnapi->status_blk;
2945 /* When using INTx, it is possible for the interrupt to arrive
2946 * at the CPU before the status block posted prior to the
2947 * interrupt. Reading a register will flush the status block.
2948 * When using MSI, the MSI message will always complete after
2949 * the status block write.
2951 if ((sblk->status_idx == bnapi->last_status_idx) &&
2952 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2953 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2956 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2957 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2958 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2960 /* Read back to deassert IRQ immediately to avoid too many
2961 * spurious interrupts.
2963 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2965 /* Return here if interrupt is shared and is disabled. */
2966 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2969 if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
2970 bnapi->last_status_idx = sblk->status_idx;
2971 __netif_rx_schedule(dev, &bnapi->napi);
2978 bnx2_tx_msix(int irq, void *dev_instance)
2980 struct net_device *dev = dev_instance;
2981 struct bnx2 *bp = netdev_priv(dev);
2982 struct bnx2_napi *bnapi = &bp->bnx2_napi[BNX2_TX_VEC];
2984 prefetch(bnapi->status_blk_msix);
2986 /* Return here if interrupt is disabled. */
2987 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2990 netif_rx_schedule(dev, &bnapi->napi);
2994 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2995 STATUS_ATTN_BITS_TIMER_ABORT)
2998 bnx2_has_work(struct bnx2_napi *bnapi)
3000 struct status_block *sblk = bnapi->status_blk;
3002 if ((bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons) ||
3003 (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons))
3006 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3007 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3013 static int bnx2_tx_poll(struct napi_struct *napi, int budget)
3015 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3016 struct bnx2 *bp = bnapi->bp;
3018 struct status_block_msix *sblk = bnapi->status_blk_msix;
3021 work_done += bnx2_tx_int(bp, bnapi, budget - work_done);
3022 if (unlikely(work_done >= budget))
3025 bnapi->last_status_idx = sblk->status_idx;
3027 } while (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons);
3029 netif_rx_complete(bp->dev, napi);
3030 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3031 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3032 bnapi->last_status_idx);
3036 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3037 int work_done, int budget)
3039 struct status_block *sblk = bnapi->status_blk;
3040 u32 status_attn_bits = sblk->status_attn_bits;
3041 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3043 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3044 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3046 bnx2_phy_int(bp, bnapi);
3048 /* This is needed to take care of transient status
3049 * during link changes.
3051 REG_WR(bp, BNX2_HC_COMMAND,
3052 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3053 REG_RD(bp, BNX2_HC_COMMAND);
3056 if (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons)
3057 bnx2_tx_int(bp, bnapi, 0);
3059 if (bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons)
3060 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3065 static int bnx2_poll(struct napi_struct *napi, int budget)
3067 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3068 struct bnx2 *bp = bnapi->bp;
3070 struct status_block *sblk = bnapi->status_blk;
3073 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3075 if (unlikely(work_done >= budget))
3078 /* bnapi->last_status_idx is used below to tell the hw how
3079 * much work has been processed, so we must read it before
3080 * checking for more work.
3082 bnapi->last_status_idx = sblk->status_idx;
3084 if (likely(!bnx2_has_work(bnapi))) {
3085 netif_rx_complete(bp->dev, napi);
3086 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3087 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3088 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3089 bnapi->last_status_idx);
3092 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3093 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3094 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3095 bnapi->last_status_idx);
3097 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3098 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3099 bnapi->last_status_idx);
3107 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3108 * from set_multicast.
3111 bnx2_set_rx_mode(struct net_device *dev)
3113 struct bnx2 *bp = netdev_priv(dev);
3114 u32 rx_mode, sort_mode;
3117 spin_lock_bh(&bp->phy_lock);
3119 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3120 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3121 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3123 if (!bp->vlgrp && !(bp->flags & BNX2_FLAG_ASF_ENABLE))
3124 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3126 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
3127 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3129 if (dev->flags & IFF_PROMISC) {
3130 /* Promiscuous mode. */
3131 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3132 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3133 BNX2_RPM_SORT_USER0_PROM_VLAN;
3135 else if (dev->flags & IFF_ALLMULTI) {
3136 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3137 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3140 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3143 /* Accept one or more multicast(s). */
3144 struct dev_mc_list *mclist;
3145 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3150 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3152 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3153 i++, mclist = mclist->next) {
3155 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3157 regidx = (bit & 0xe0) >> 5;
3159 mc_filter[regidx] |= (1 << bit);
3162 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3163 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3167 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3170 if (rx_mode != bp->rx_mode) {
3171 bp->rx_mode = rx_mode;
3172 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3175 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3176 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3177 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3179 spin_unlock_bh(&bp->phy_lock);
3183 load_rv2p_fw(struct bnx2 *bp, __le32 *rv2p_code, u32 rv2p_code_len,
3189 if (rv2p_proc == RV2P_PROC2 && CHIP_NUM(bp) == CHIP_NUM_5709) {
3190 val = le32_to_cpu(rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC]);
3191 val &= ~XI_RV2P_PROC2_BD_PAGE_SIZE_MSK;
3192 val |= XI_RV2P_PROC2_BD_PAGE_SIZE;
3193 rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC] = cpu_to_le32(val);
3196 for (i = 0; i < rv2p_code_len; i += 8) {
3197 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, le32_to_cpu(*rv2p_code));
3199 REG_WR(bp, BNX2_RV2P_INSTR_LOW, le32_to_cpu(*rv2p_code));
3202 if (rv2p_proc == RV2P_PROC1) {
3203 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3204 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3207 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3208 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3212 /* Reset the processor, un-stall is done later. */
3213 if (rv2p_proc == RV2P_PROC1) {
3214 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3217 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3222 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg, struct fw_info *fw)
3229 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3230 val |= cpu_reg->mode_value_halt;
3231 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3232 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3234 /* Load the Text area. */
3235 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3239 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3244 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3245 bnx2_reg_wr_ind(bp, offset, le32_to_cpu(fw->text[j]));
3249 /* Load the Data area. */
3250 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3254 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3255 bnx2_reg_wr_ind(bp, offset, fw->data[j]);
3259 /* Load the SBSS area. */
3260 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3264 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3265 bnx2_reg_wr_ind(bp, offset, 0);
3269 /* Load the BSS area. */
3270 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3274 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3275 bnx2_reg_wr_ind(bp, offset, 0);
3279 /* Load the Read-Only area. */
3280 offset = cpu_reg->spad_base +
3281 (fw->rodata_addr - cpu_reg->mips_view_base);
3285 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3286 bnx2_reg_wr_ind(bp, offset, fw->rodata[j]);
3290 /* Clear the pre-fetch instruction. */
3291 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3292 bnx2_reg_wr_ind(bp, cpu_reg->pc, fw->start_addr);
3294 /* Start the CPU. */
3295 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3296 val &= ~cpu_reg->mode_value_halt;
3297 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3298 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3304 bnx2_init_cpus(struct bnx2 *bp)
3310 /* Initialize the RV2P processor. */
3311 text = vmalloc(FW_BUF_SIZE);
3314 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3315 rv2p = bnx2_xi_rv2p_proc1;
3316 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3318 rv2p = bnx2_rv2p_proc1;
3319 rv2p_len = sizeof(bnx2_rv2p_proc1);
3321 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3325 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3327 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3328 rv2p = bnx2_xi_rv2p_proc2;
3329 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3331 rv2p = bnx2_rv2p_proc2;
3332 rv2p_len = sizeof(bnx2_rv2p_proc2);
3334 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3338 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3340 /* Initialize the RX Processor. */
3341 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3342 fw = &bnx2_rxp_fw_09;
3344 fw = &bnx2_rxp_fw_06;
3347 rc = load_cpu_fw(bp, &cpu_reg_rxp, fw);
3351 /* Initialize the TX Processor. */
3352 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3353 fw = &bnx2_txp_fw_09;
3355 fw = &bnx2_txp_fw_06;
3358 rc = load_cpu_fw(bp, &cpu_reg_txp, fw);
3362 /* Initialize the TX Patch-up Processor. */
3363 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3364 fw = &bnx2_tpat_fw_09;
3366 fw = &bnx2_tpat_fw_06;
3369 rc = load_cpu_fw(bp, &cpu_reg_tpat, fw);
3373 /* Initialize the Completion Processor. */
3374 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3375 fw = &bnx2_com_fw_09;
3377 fw = &bnx2_com_fw_06;
3380 rc = load_cpu_fw(bp, &cpu_reg_com, fw);
3384 /* Initialize the Command Processor. */
3385 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3386 fw = &bnx2_cp_fw_09;
3388 fw = &bnx2_cp_fw_06;
3391 rc = load_cpu_fw(bp, &cpu_reg_cp, fw);
3399 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3403 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3409 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3410 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3411 PCI_PM_CTRL_PME_STATUS);
3413 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3414 /* delay required during transition out of D3hot */
3417 val = REG_RD(bp, BNX2_EMAC_MODE);
3418 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3419 val &= ~BNX2_EMAC_MODE_MPKT;
3420 REG_WR(bp, BNX2_EMAC_MODE, val);
3422 val = REG_RD(bp, BNX2_RPM_CONFIG);
3423 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3424 REG_WR(bp, BNX2_RPM_CONFIG, val);
3435 autoneg = bp->autoneg;
3436 advertising = bp->advertising;
3438 if (bp->phy_port == PORT_TP) {
3439 bp->autoneg = AUTONEG_SPEED;
3440 bp->advertising = ADVERTISED_10baseT_Half |
3441 ADVERTISED_10baseT_Full |
3442 ADVERTISED_100baseT_Half |
3443 ADVERTISED_100baseT_Full |
3447 spin_lock_bh(&bp->phy_lock);
3448 bnx2_setup_phy(bp, bp->phy_port);
3449 spin_unlock_bh(&bp->phy_lock);
3451 bp->autoneg = autoneg;
3452 bp->advertising = advertising;
3454 bnx2_set_mac_addr(bp);
3456 val = REG_RD(bp, BNX2_EMAC_MODE);
3458 /* Enable port mode. */
3459 val &= ~BNX2_EMAC_MODE_PORT;
3460 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3461 BNX2_EMAC_MODE_ACPI_RCVD |
3462 BNX2_EMAC_MODE_MPKT;
3463 if (bp->phy_port == PORT_TP)
3464 val |= BNX2_EMAC_MODE_PORT_MII;
3466 val |= BNX2_EMAC_MODE_PORT_GMII;
3467 if (bp->line_speed == SPEED_2500)
3468 val |= BNX2_EMAC_MODE_25G_MODE;
3471 REG_WR(bp, BNX2_EMAC_MODE, val);
3473 /* receive all multicast */
3474 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3475 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3478 REG_WR(bp, BNX2_EMAC_RX_MODE,
3479 BNX2_EMAC_RX_MODE_SORT_MODE);
3481 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3482 BNX2_RPM_SORT_USER0_MC_EN;
3483 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3484 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3485 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3486 BNX2_RPM_SORT_USER0_ENA);
3488 /* Need to enable EMAC and RPM for WOL. */
3489 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3490 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3491 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3492 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3494 val = REG_RD(bp, BNX2_RPM_CONFIG);
3495 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3496 REG_WR(bp, BNX2_RPM_CONFIG, val);
3498 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3501 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3504 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3505 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3507 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3508 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3509 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3518 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3520 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3523 /* No more memory access after this point until
3524 * device is brought back to D0.
3536 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3541 /* Request access to the flash interface. */
3542 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3543 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3544 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3545 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3551 if (j >= NVRAM_TIMEOUT_COUNT)
3558 bnx2_release_nvram_lock(struct bnx2 *bp)
3563 /* Relinquish nvram interface. */
3564 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3566 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3567 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3568 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3574 if (j >= NVRAM_TIMEOUT_COUNT)
3582 bnx2_enable_nvram_write(struct bnx2 *bp)
3586 val = REG_RD(bp, BNX2_MISC_CFG);
3587 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3589 if (bp->flash_info->flags & BNX2_NV_WREN) {
3592 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3593 REG_WR(bp, BNX2_NVM_COMMAND,
3594 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3596 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3599 val = REG_RD(bp, BNX2_NVM_COMMAND);
3600 if (val & BNX2_NVM_COMMAND_DONE)
3604 if (j >= NVRAM_TIMEOUT_COUNT)
3611 bnx2_disable_nvram_write(struct bnx2 *bp)
3615 val = REG_RD(bp, BNX2_MISC_CFG);
3616 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3621 bnx2_enable_nvram_access(struct bnx2 *bp)
3625 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3626 /* Enable both bits, even on read. */
3627 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3628 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3632 bnx2_disable_nvram_access(struct bnx2 *bp)
3636 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3637 /* Disable both bits, even after read. */
3638 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3639 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3640 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3644 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3649 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3650 /* Buffered flash, no erase needed */
3653 /* Build an erase command */
3654 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3655 BNX2_NVM_COMMAND_DOIT;
3657 /* Need to clear DONE bit separately. */
3658 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3660 /* Address of the NVRAM to read from. */
3661 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3663 /* Issue an erase command. */
3664 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3666 /* Wait for completion. */
3667 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3672 val = REG_RD(bp, BNX2_NVM_COMMAND);
3673 if (val & BNX2_NVM_COMMAND_DONE)
3677 if (j >= NVRAM_TIMEOUT_COUNT)
3684 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3689 /* Build the command word. */
3690 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3692 /* Calculate an offset of a buffered flash, not needed for 5709. */
3693 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3694 offset = ((offset / bp->flash_info->page_size) <<
3695 bp->flash_info->page_bits) +
3696 (offset % bp->flash_info->page_size);
3699 /* Need to clear DONE bit separately. */
3700 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3702 /* Address of the NVRAM to read from. */
3703 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3705 /* Issue a read command. */
3706 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3708 /* Wait for completion. */
3709 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3714 val = REG_RD(bp, BNX2_NVM_COMMAND);
3715 if (val & BNX2_NVM_COMMAND_DONE) {
3716 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
3717 memcpy(ret_val, &v, 4);
3721 if (j >= NVRAM_TIMEOUT_COUNT)
3729 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3735 /* Build the command word. */
3736 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3738 /* Calculate an offset of a buffered flash, not needed for 5709. */
3739 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3740 offset = ((offset / bp->flash_info->page_size) <<
3741 bp->flash_info->page_bits) +
3742 (offset % bp->flash_info->page_size);
3745 /* Need to clear DONE bit separately. */
3746 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3748 memcpy(&val32, val, 4);
3750 /* Write the data. */
3751 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
3753 /* Address of the NVRAM to write to. */
3754 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3756 /* Issue the write command. */
3757 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3759 /* Wait for completion. */
3760 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3763 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3766 if (j >= NVRAM_TIMEOUT_COUNT)
3773 bnx2_init_nvram(struct bnx2 *bp)
3776 int j, entry_count, rc = 0;
3777 struct flash_spec *flash;
3779 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3780 bp->flash_info = &flash_5709;
3781 goto get_flash_size;
3784 /* Determine the selected interface. */
3785 val = REG_RD(bp, BNX2_NVM_CFG1);
3787 entry_count = ARRAY_SIZE(flash_table);
3789 if (val & 0x40000000) {
3791 /* Flash interface has been reconfigured */
3792 for (j = 0, flash = &flash_table[0]; j < entry_count;
3794 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3795 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3796 bp->flash_info = flash;
3803 /* Not yet been reconfigured */
3805 if (val & (1 << 23))
3806 mask = FLASH_BACKUP_STRAP_MASK;
3808 mask = FLASH_STRAP_MASK;
3810 for (j = 0, flash = &flash_table[0]; j < entry_count;
3813 if ((val & mask) == (flash->strapping & mask)) {
3814 bp->flash_info = flash;
3816 /* Request access to the flash interface. */
3817 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3820 /* Enable access to flash interface */
3821 bnx2_enable_nvram_access(bp);
3823 /* Reconfigure the flash interface */
3824 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3825 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3826 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3827 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3829 /* Disable access to flash interface */
3830 bnx2_disable_nvram_access(bp);
3831 bnx2_release_nvram_lock(bp);
3836 } /* if (val & 0x40000000) */
3838 if (j == entry_count) {
3839 bp->flash_info = NULL;
3840 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3845 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
3846 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3848 bp->flash_size = val;
3850 bp->flash_size = bp->flash_info->total_size;
3856 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3860 u32 cmd_flags, offset32, len32, extra;
3865 /* Request access to the flash interface. */
3866 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3869 /* Enable access to flash interface */
3870 bnx2_enable_nvram_access(bp);
3883 pre_len = 4 - (offset & 3);
3885 if (pre_len >= len32) {
3887 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3888 BNX2_NVM_COMMAND_LAST;
3891 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3894 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3899 memcpy(ret_buf, buf + (offset & 3), pre_len);
3906 extra = 4 - (len32 & 3);
3907 len32 = (len32 + 4) & ~3;
3914 cmd_flags = BNX2_NVM_COMMAND_LAST;
3916 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3917 BNX2_NVM_COMMAND_LAST;
3919 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3921 memcpy(ret_buf, buf, 4 - extra);
3923 else if (len32 > 0) {
3926 /* Read the first word. */
3930 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3932 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3934 /* Advance to the next dword. */
3939 while (len32 > 4 && rc == 0) {
3940 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3942 /* Advance to the next dword. */
3951 cmd_flags = BNX2_NVM_COMMAND_LAST;
3952 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3954 memcpy(ret_buf, buf, 4 - extra);
3957 /* Disable access to flash interface */
3958 bnx2_disable_nvram_access(bp);
3960 bnx2_release_nvram_lock(bp);
3966 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3969 u32 written, offset32, len32;
3970 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3972 int align_start, align_end;
3977 align_start = align_end = 0;
3979 if ((align_start = (offset32 & 3))) {
3981 len32 += align_start;
3984 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3989 align_end = 4 - (len32 & 3);
3991 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3995 if (align_start || align_end) {
3996 align_buf = kmalloc(len32, GFP_KERNEL);
3997 if (align_buf == NULL)
4000 memcpy(align_buf, start, 4);
4003 memcpy(align_buf + len32 - 4, end, 4);
4005 memcpy(align_buf + align_start, data_buf, buf_size);
4009 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4010 flash_buffer = kmalloc(264, GFP_KERNEL);
4011 if (flash_buffer == NULL) {
4013 goto nvram_write_end;
4018 while ((written < len32) && (rc == 0)) {
4019 u32 page_start, page_end, data_start, data_end;
4020 u32 addr, cmd_flags;
4023 /* Find the page_start addr */
4024 page_start = offset32 + written;
4025 page_start -= (page_start % bp->flash_info->page_size);
4026 /* Find the page_end addr */
4027 page_end = page_start + bp->flash_info->page_size;
4028 /* Find the data_start addr */
4029 data_start = (written == 0) ? offset32 : page_start;
4030 /* Find the data_end addr */
4031 data_end = (page_end > offset32 + len32) ?
4032 (offset32 + len32) : page_end;
4034 /* Request access to the flash interface. */
4035 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4036 goto nvram_write_end;
4038 /* Enable access to flash interface */
4039 bnx2_enable_nvram_access(bp);
4041 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4042 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4045 /* Read the whole page into the buffer
4046 * (non-buffer flash only) */
4047 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4048 if (j == (bp->flash_info->page_size - 4)) {
4049 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4051 rc = bnx2_nvram_read_dword(bp,
4057 goto nvram_write_end;
4063 /* Enable writes to flash interface (unlock write-protect) */
4064 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4065 goto nvram_write_end;
4067 /* Loop to write back the buffer data from page_start to
4070 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4071 /* Erase the page */
4072 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4073 goto nvram_write_end;
4075 /* Re-enable the write again for the actual write */
4076 bnx2_enable_nvram_write(bp);
4078 for (addr = page_start; addr < data_start;
4079 addr += 4, i += 4) {
4081 rc = bnx2_nvram_write_dword(bp, addr,
4082 &flash_buffer[i], cmd_flags);
4085 goto nvram_write_end;
4091 /* Loop to write the new data from data_start to data_end */
4092 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4093 if ((addr == page_end - 4) ||
4094 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4095 (addr == data_end - 4))) {
4097 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4099 rc = bnx2_nvram_write_dword(bp, addr, buf,
4103 goto nvram_write_end;
4109 /* Loop to write back the buffer data from data_end
4111 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4112 for (addr = data_end; addr < page_end;
4113 addr += 4, i += 4) {
4115 if (addr == page_end-4) {
4116 cmd_flags = BNX2_NVM_COMMAND_LAST;
4118 rc = bnx2_nvram_write_dword(bp, addr,
4119 &flash_buffer[i], cmd_flags);
4122 goto nvram_write_end;
4128 /* Disable writes to flash interface (lock write-protect) */
4129 bnx2_disable_nvram_write(bp);
4131 /* Disable access to flash interface */
4132 bnx2_disable_nvram_access(bp);
4133 bnx2_release_nvram_lock(bp);
4135 /* Increment written */
4136 written += data_end - data_start;
4140 kfree(flash_buffer);
4146 bnx2_init_remote_phy(struct bnx2 *bp)
4150 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4151 if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES))
4154 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4155 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4158 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
4159 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4161 val = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4162 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4163 bp->phy_port = PORT_FIBRE;
4165 bp->phy_port = PORT_TP;
4167 if (netif_running(bp->dev)) {
4170 sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4171 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4172 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4178 bnx2_setup_msix_tbl(struct bnx2 *bp)
4180 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4182 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4183 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4187 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4193 /* Wait for the current PCI transaction to complete before
4194 * issuing a reset. */
4195 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4196 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4197 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4198 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4199 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4200 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4203 /* Wait for the firmware to tell us it is ok to issue a reset. */
4204 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
4206 /* Deposit a driver reset signature so the firmware knows that
4207 * this is a soft reset. */
4208 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4209 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4211 /* Do a dummy read to force the chip to complete all current transaction
4212 * before we issue a reset. */
4213 val = REG_RD(bp, BNX2_MISC_ID);
4215 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4216 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4217 REG_RD(bp, BNX2_MISC_COMMAND);
4220 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4221 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4223 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4226 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4227 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4228 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4231 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4233 /* Reading back any register after chip reset will hang the
4234 * bus on 5706 A0 and A1. The msleep below provides plenty
4235 * of margin for write posting.
4237 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4238 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4241 /* Reset takes approximate 30 usec */
4242 for (i = 0; i < 10; i++) {
4243 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4244 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4245 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4250 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4251 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4252 printk(KERN_ERR PFX "Chip reset did not complete\n");
4257 /* Make sure byte swapping is properly configured. */
4258 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4259 if (val != 0x01020304) {
4260 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4264 /* Wait for the firmware to finish its initialization. */
4265 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
4269 spin_lock_bh(&bp->phy_lock);
4270 old_port = bp->phy_port;
4271 bnx2_init_remote_phy(bp);
4272 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4273 old_port != bp->phy_port)
4274 bnx2_set_default_remote_link(bp);
4275 spin_unlock_bh(&bp->phy_lock);
4277 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4278 /* Adjust the voltage regular to two steps lower. The default
4279 * of this register is 0x0000000e. */
4280 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4282 /* Remove bad rbuf memory from the free pool. */
4283 rc = bnx2_alloc_bad_rbuf(bp);
4286 if (bp->flags & BNX2_FLAG_USING_MSIX)
4287 bnx2_setup_msix_tbl(bp);
4293 bnx2_init_chip(struct bnx2 *bp)
4298 /* Make sure the interrupt is not active. */
4299 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4301 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4302 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4304 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4306 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4307 DMA_READ_CHANS << 12 |
4308 DMA_WRITE_CHANS << 16;
4310 val |= (0x2 << 20) | (1 << 11);
4312 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4315 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4316 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4317 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4319 REG_WR(bp, BNX2_DMA_CONFIG, val);
4321 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4322 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4323 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4324 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4327 if (bp->flags & BNX2_FLAG_PCIX) {
4330 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4332 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4333 val16 & ~PCI_X_CMD_ERO);
4336 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4337 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4338 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4339 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4341 /* Initialize context mapping and zero out the quick contexts. The
4342 * context block must have already been enabled. */
4343 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4344 rc = bnx2_init_5709_context(bp);
4348 bnx2_init_context(bp);
4350 if ((rc = bnx2_init_cpus(bp)) != 0)
4353 bnx2_init_nvram(bp);
4355 bnx2_set_mac_addr(bp);
4357 val = REG_RD(bp, BNX2_MQ_CONFIG);
4358 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4359 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4360 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4361 val |= BNX2_MQ_CONFIG_HALT_DIS;
4363 REG_WR(bp, BNX2_MQ_CONFIG, val);
4365 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4366 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4367 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4369 val = (BCM_PAGE_BITS - 8) << 24;
4370 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4372 /* Configure page size. */
4373 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4374 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4375 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4376 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4378 val = bp->mac_addr[0] +
4379 (bp->mac_addr[1] << 8) +
4380 (bp->mac_addr[2] << 16) +
4382 (bp->mac_addr[4] << 8) +
4383 (bp->mac_addr[5] << 16);
4384 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4386 /* Program the MTU. Also include 4 bytes for CRC32. */
4387 val = bp->dev->mtu + ETH_HLEN + 4;
4388 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4389 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4390 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4392 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4393 bp->bnx2_napi[i].last_status_idx = 0;
4395 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4397 /* Set up how to generate a link change interrupt. */
4398 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4400 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4401 (u64) bp->status_blk_mapping & 0xffffffff);
4402 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4404 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4405 (u64) bp->stats_blk_mapping & 0xffffffff);
4406 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4407 (u64) bp->stats_blk_mapping >> 32);
4409 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4410 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4412 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4413 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4415 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4416 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4418 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4420 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4422 REG_WR(bp, BNX2_HC_COM_TICKS,
4423 (bp->com_ticks_int << 16) | bp->com_ticks);
4425 REG_WR(bp, BNX2_HC_CMD_TICKS,
4426 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4428 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4429 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4431 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4432 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4434 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4435 val = BNX2_HC_CONFIG_COLLECT_STATS;
4437 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4438 BNX2_HC_CONFIG_COLLECT_STATS;
4441 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4442 u32 base = ((BNX2_TX_VEC - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4443 BNX2_HC_SB_CONFIG_1;
4445 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4446 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4449 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4450 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4452 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4453 (bp->tx_quick_cons_trip_int << 16) |
4454 bp->tx_quick_cons_trip);
4456 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4457 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4459 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4462 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4463 val |= BNX2_HC_CONFIG_ONE_SHOT;
4465 REG_WR(bp, BNX2_HC_CONFIG, val);
4467 /* Clear internal stats counters. */
4468 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4470 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4472 /* Initialize the receive filter. */
4473 bnx2_set_rx_mode(bp->dev);
4475 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4476 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4477 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4478 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4480 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4483 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4484 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4488 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4494 bnx2_clear_ring_states(struct bnx2 *bp)
4496 struct bnx2_napi *bnapi;
4499 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4500 bnapi = &bp->bnx2_napi[i];
4503 bnapi->hw_tx_cons = 0;
4504 bnapi->rx_prod_bseq = 0;
4507 bnapi->rx_pg_prod = 0;
4508 bnapi->rx_pg_cons = 0;
4513 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4515 u32 val, offset0, offset1, offset2, offset3;
4516 u32 cid_addr = GET_CID_ADDR(cid);
4518 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4519 offset0 = BNX2_L2CTX_TYPE_XI;
4520 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4521 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4522 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4524 offset0 = BNX2_L2CTX_TYPE;
4525 offset1 = BNX2_L2CTX_CMD_TYPE;
4526 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4527 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4529 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4530 bnx2_ctx_wr(bp, cid_addr, offset0, val);
4532 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4533 bnx2_ctx_wr(bp, cid_addr, offset1, val);
4535 val = (u64) bp->tx_desc_mapping >> 32;
4536 bnx2_ctx_wr(bp, cid_addr, offset2, val);
4538 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4539 bnx2_ctx_wr(bp, cid_addr, offset3, val);
4543 bnx2_init_tx_ring(struct bnx2 *bp)
4547 struct bnx2_napi *bnapi;
4550 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4552 bp->tx_vec = BNX2_TX_VEC;
4553 REG_WR(bp, BNX2_TSCH_TSS_CFG, BNX2_TX_INT_NUM |
4556 bnapi = &bp->bnx2_napi[bp->tx_vec];
4558 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4560 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4562 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4563 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4566 bp->tx_prod_bseq = 0;
4568 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4569 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4571 bnx2_init_tx_context(bp, cid);
4575 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4581 for (i = 0; i < num_rings; i++) {
4584 rxbd = &rx_ring[i][0];
4585 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4586 rxbd->rx_bd_len = buf_size;
4587 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4589 if (i == (num_rings - 1))
4593 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4594 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4599 bnx2_init_rx_ring(struct bnx2 *bp)
4602 u16 prod, ring_prod;
4603 u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
4604 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
4606 bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
4607 bp->rx_buf_use_size, bp->rx_max_ring);
4609 bnx2_init_rx_context0(bp);
4611 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4612 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
4613 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
4616 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4617 if (bp->rx_pg_ring_size) {
4618 bnx2_init_rxbd_rings(bp->rx_pg_desc_ring,
4619 bp->rx_pg_desc_mapping,
4620 PAGE_SIZE, bp->rx_max_pg_ring);
4621 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4622 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4623 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4624 BNX2_L2CTX_RBDC_JUMBO_KEY);
4626 val = (u64) bp->rx_pg_desc_mapping[0] >> 32;
4627 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4629 val = (u64) bp->rx_pg_desc_mapping[0] & 0xffffffff;
4630 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4632 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4633 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4636 val = (u64) bp->rx_desc_mapping[0] >> 32;
4637 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4639 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4640 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4642 ring_prod = prod = bnapi->rx_pg_prod;
4643 for (i = 0; i < bp->rx_pg_ring_size; i++) {
4644 if (bnx2_alloc_rx_page(bp, ring_prod) < 0)
4646 prod = NEXT_RX_BD(prod);
4647 ring_prod = RX_PG_RING_IDX(prod);
4649 bnapi->rx_pg_prod = prod;
4651 ring_prod = prod = bnapi->rx_prod;
4652 for (i = 0; i < bp->rx_ring_size; i++) {
4653 if (bnx2_alloc_rx_skb(bp, bnapi, ring_prod) < 0) {
4656 prod = NEXT_RX_BD(prod);
4657 ring_prod = RX_RING_IDX(prod);
4659 bnapi->rx_prod = prod;
4661 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
4663 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4665 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
4668 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4670 u32 max, num_rings = 1;
4672 while (ring_size > MAX_RX_DESC_CNT) {
4673 ring_size -= MAX_RX_DESC_CNT;
4676 /* round to next power of 2 */
4678 while ((max & num_rings) == 0)
4681 if (num_rings != max)
4688 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4690 u32 rx_size, rx_space, jumbo_size;
4692 /* 8 for CRC and VLAN */
4693 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
4695 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4696 sizeof(struct skb_shared_info);
4698 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
4699 bp->rx_pg_ring_size = 0;
4700 bp->rx_max_pg_ring = 0;
4701 bp->rx_max_pg_ring_idx = 0;
4702 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
4703 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4705 jumbo_size = size * pages;
4706 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4707 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4709 bp->rx_pg_ring_size = jumbo_size;
4710 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4712 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4713 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
4714 bp->rx_copy_thresh = 0;
4717 bp->rx_buf_use_size = rx_size;
4719 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4720 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
4721 bp->rx_ring_size = size;
4722 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4723 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4727 bnx2_free_tx_skbs(struct bnx2 *bp)
4731 if (bp->tx_buf_ring == NULL)
4734 for (i = 0; i < TX_DESC_CNT; ) {
4735 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4736 struct sk_buff *skb = tx_buf->skb;
4744 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4745 skb_headlen(skb), PCI_DMA_TODEVICE);
4749 last = skb_shinfo(skb)->nr_frags;
4750 for (j = 0; j < last; j++) {
4751 tx_buf = &bp->tx_buf_ring[i + j + 1];
4752 pci_unmap_page(bp->pdev,
4753 pci_unmap_addr(tx_buf, mapping),
4754 skb_shinfo(skb)->frags[j].size,
4764 bnx2_free_rx_skbs(struct bnx2 *bp)
4768 if (bp->rx_buf_ring == NULL)
4771 for (i = 0; i < bp->rx_max_ring_idx; i++) {
4772 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4773 struct sk_buff *skb = rx_buf->skb;
4778 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4779 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4785 for (i = 0; i < bp->rx_max_pg_ring_idx; i++)
4786 bnx2_free_rx_page(bp, i);
4790 bnx2_free_skbs(struct bnx2 *bp)
4792 bnx2_free_tx_skbs(bp);
4793 bnx2_free_rx_skbs(bp);
4797 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4801 rc = bnx2_reset_chip(bp, reset_code);
4806 if ((rc = bnx2_init_chip(bp)) != 0)
4809 bnx2_clear_ring_states(bp);
4810 bnx2_init_tx_ring(bp);
4811 bnx2_init_rx_ring(bp);
4816 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
4820 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4823 spin_lock_bh(&bp->phy_lock);
4824 bnx2_init_phy(bp, reset_phy);
4826 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
4827 bnx2_remote_phy_event(bp);
4828 spin_unlock_bh(&bp->phy_lock);
4833 bnx2_test_registers(struct bnx2 *bp)
4837 static const struct {
4840 #define BNX2_FL_NOT_5709 1
4844 { 0x006c, 0, 0x00000000, 0x0000003f },
4845 { 0x0090, 0, 0xffffffff, 0x00000000 },
4846 { 0x0094, 0, 0x00000000, 0x00000000 },
4848 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4849 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4850 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4851 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4852 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4853 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4854 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4855 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4856 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4858 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4859 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4860 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4861 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4862 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4863 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4865 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4866 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4867 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
4869 { 0x1000, 0, 0x00000000, 0x00000001 },
4870 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
4872 { 0x1408, 0, 0x01c00800, 0x00000000 },
4873 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4874 { 0x14a8, 0, 0x00000000, 0x000001ff },
4875 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4876 { 0x14b0, 0, 0x00000002, 0x00000001 },
4877 { 0x14b8, 0, 0x00000000, 0x00000000 },
4878 { 0x14c0, 0, 0x00000000, 0x00000009 },
4879 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4880 { 0x14cc, 0, 0x00000000, 0x00000001 },
4881 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4883 { 0x1800, 0, 0x00000000, 0x00000001 },
4884 { 0x1804, 0, 0x00000000, 0x00000003 },
4886 { 0x2800, 0, 0x00000000, 0x00000001 },
4887 { 0x2804, 0, 0x00000000, 0x00003f01 },
4888 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4889 { 0x2810, 0, 0xffff0000, 0x00000000 },
4890 { 0x2814, 0, 0xffff0000, 0x00000000 },
4891 { 0x2818, 0, 0xffff0000, 0x00000000 },
4892 { 0x281c, 0, 0xffff0000, 0x00000000 },
4893 { 0x2834, 0, 0xffffffff, 0x00000000 },
4894 { 0x2840, 0, 0x00000000, 0xffffffff },
4895 { 0x2844, 0, 0x00000000, 0xffffffff },
4896 { 0x2848, 0, 0xffffffff, 0x00000000 },
4897 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4899 { 0x2c00, 0, 0x00000000, 0x00000011 },
4900 { 0x2c04, 0, 0x00000000, 0x00030007 },
4902 { 0x3c00, 0, 0x00000000, 0x00000001 },
4903 { 0x3c04, 0, 0x00000000, 0x00070000 },
4904 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4905 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4906 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4907 { 0x3c14, 0, 0x00000000, 0xffffffff },
4908 { 0x3c18, 0, 0x00000000, 0xffffffff },
4909 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4910 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4912 { 0x5004, 0, 0x00000000, 0x0000007f },
4913 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4915 { 0x5c00, 0, 0x00000000, 0x00000001 },
4916 { 0x5c04, 0, 0x00000000, 0x0003000f },
4917 { 0x5c08, 0, 0x00000003, 0x00000000 },
4918 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4919 { 0x5c10, 0, 0x00000000, 0xffffffff },
4920 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4921 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4922 { 0x5c88, 0, 0x00000000, 0x00077373 },
4923 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4925 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4926 { 0x680c, 0, 0xffffffff, 0x00000000 },
4927 { 0x6810, 0, 0xffffffff, 0x00000000 },
4928 { 0x6814, 0, 0xffffffff, 0x00000000 },
4929 { 0x6818, 0, 0xffffffff, 0x00000000 },
4930 { 0x681c, 0, 0xffffffff, 0x00000000 },
4931 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4932 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4933 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4934 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4935 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4936 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4937 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4938 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4939 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4940 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4941 { 0x684c, 0, 0xffffffff, 0x00000000 },
4942 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4943 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4944 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4945 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4946 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4947 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4949 { 0xffff, 0, 0x00000000, 0x00000000 },
4954 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4957 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4958 u32 offset, rw_mask, ro_mask, save_val, val;
4959 u16 flags = reg_tbl[i].flags;
4961 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4964 offset = (u32) reg_tbl[i].offset;
4965 rw_mask = reg_tbl[i].rw_mask;
4966 ro_mask = reg_tbl[i].ro_mask;
4968 save_val = readl(bp->regview + offset);
4970 writel(0, bp->regview + offset);
4972 val = readl(bp->regview + offset);
4973 if ((val & rw_mask) != 0) {
4977 if ((val & ro_mask) != (save_val & ro_mask)) {
4981 writel(0xffffffff, bp->regview + offset);
4983 val = readl(bp->regview + offset);
4984 if ((val & rw_mask) != rw_mask) {
4988 if ((val & ro_mask) != (save_val & ro_mask)) {
4992 writel(save_val, bp->regview + offset);
4996 writel(save_val, bp->regview + offset);
5004 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5006 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5007 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5010 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5013 for (offset = 0; offset < size; offset += 4) {
5015 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5017 if (bnx2_reg_rd_ind(bp, start + offset) !=
5027 bnx2_test_memory(struct bnx2 *bp)
5031 static struct mem_entry {
5034 } mem_tbl_5706[] = {
5035 { 0x60000, 0x4000 },
5036 { 0xa0000, 0x3000 },
5037 { 0xe0000, 0x4000 },
5038 { 0x120000, 0x4000 },
5039 { 0x1a0000, 0x4000 },
5040 { 0x160000, 0x4000 },
5044 { 0x60000, 0x4000 },
5045 { 0xa0000, 0x3000 },
5046 { 0xe0000, 0x4000 },
5047 { 0x120000, 0x4000 },
5048 { 0x1a0000, 0x4000 },
5051 struct mem_entry *mem_tbl;
5053 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5054 mem_tbl = mem_tbl_5709;
5056 mem_tbl = mem_tbl_5706;
5058 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5059 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5060 mem_tbl[i].len)) != 0) {
5068 #define BNX2_MAC_LOOPBACK 0
5069 #define BNX2_PHY_LOOPBACK 1
5072 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5074 unsigned int pkt_size, num_pkts, i;
5075 struct sk_buff *skb, *rx_skb;
5076 unsigned char *packet;
5077 u16 rx_start_idx, rx_idx;
5080 struct sw_bd *rx_buf;
5081 struct l2_fhdr *rx_hdr;
5083 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5086 if (bp->flags & BNX2_FLAG_USING_MSIX)
5087 tx_napi = &bp->bnx2_napi[BNX2_TX_VEC];
5089 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5090 bp->loopback = MAC_LOOPBACK;
5091 bnx2_set_mac_loopback(bp);
5093 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5094 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5097 bp->loopback = PHY_LOOPBACK;
5098 bnx2_set_phy_loopback(bp);
5103 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5104 skb = netdev_alloc_skb(bp->dev, pkt_size);
5107 packet = skb_put(skb, pkt_size);
5108 memcpy(packet, bp->dev->dev_addr, 6);
5109 memset(packet + 6, 0x0, 8);
5110 for (i = 14; i < pkt_size; i++)
5111 packet[i] = (unsigned char) (i & 0xff);
5113 map = pci_map_single(bp->pdev, skb->data, pkt_size,
5116 REG_WR(bp, BNX2_HC_COMMAND,
5117 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5119 REG_RD(bp, BNX2_HC_COMMAND);
5122 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5126 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
5128 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5129 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5130 txbd->tx_bd_mss_nbytes = pkt_size;
5131 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5134 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
5135 bp->tx_prod_bseq += pkt_size;
5137 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
5138 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5142 REG_WR(bp, BNX2_HC_COMMAND,
5143 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5145 REG_RD(bp, BNX2_HC_COMMAND);
5149 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5152 if (bnx2_get_hw_tx_cons(tx_napi) != bp->tx_prod)
5153 goto loopback_test_done;
5155 rx_idx = bnx2_get_hw_rx_cons(bnapi);
5156 if (rx_idx != rx_start_idx + num_pkts) {
5157 goto loopback_test_done;
5160 rx_buf = &bp->rx_buf_ring[rx_start_idx];
5161 rx_skb = rx_buf->skb;
5163 rx_hdr = (struct l2_fhdr *) rx_skb->data;
5164 skb_reserve(rx_skb, BNX2_RX_OFFSET);
5166 pci_dma_sync_single_for_cpu(bp->pdev,
5167 pci_unmap_addr(rx_buf, mapping),
5168 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5170 if (rx_hdr->l2_fhdr_status &
5171 (L2_FHDR_ERRORS_BAD_CRC |
5172 L2_FHDR_ERRORS_PHY_DECODE |
5173 L2_FHDR_ERRORS_ALIGNMENT |
5174 L2_FHDR_ERRORS_TOO_SHORT |
5175 L2_FHDR_ERRORS_GIANT_FRAME)) {
5177 goto loopback_test_done;
5180 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5181 goto loopback_test_done;
5184 for (i = 14; i < pkt_size; i++) {
5185 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5186 goto loopback_test_done;
5197 #define BNX2_MAC_LOOPBACK_FAILED 1
5198 #define BNX2_PHY_LOOPBACK_FAILED 2
5199 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5200 BNX2_PHY_LOOPBACK_FAILED)
5203 bnx2_test_loopback(struct bnx2 *bp)
5207 if (!netif_running(bp->dev))
5208 return BNX2_LOOPBACK_FAILED;
5210 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5211 spin_lock_bh(&bp->phy_lock);
5212 bnx2_init_phy(bp, 1);
5213 spin_unlock_bh(&bp->phy_lock);
5214 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5215 rc |= BNX2_MAC_LOOPBACK_FAILED;
5216 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5217 rc |= BNX2_PHY_LOOPBACK_FAILED;
5221 #define NVRAM_SIZE 0x200
5222 #define CRC32_RESIDUAL 0xdebb20e3
5225 bnx2_test_nvram(struct bnx2 *bp)
5227 __be32 buf[NVRAM_SIZE / 4];
5228 u8 *data = (u8 *) buf;
5232 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5233 goto test_nvram_done;
5235 magic = be32_to_cpu(buf[0]);
5236 if (magic != 0x669955aa) {
5238 goto test_nvram_done;
5241 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5242 goto test_nvram_done;
5244 csum = ether_crc_le(0x100, data);
5245 if (csum != CRC32_RESIDUAL) {
5247 goto test_nvram_done;
5250 csum = ether_crc_le(0x100, data + 0x100);
5251 if (csum != CRC32_RESIDUAL) {
5260 bnx2_test_link(struct bnx2 *bp)
5264 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5269 spin_lock_bh(&bp->phy_lock);
5270 bnx2_enable_bmsr1(bp);
5271 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5272 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5273 bnx2_disable_bmsr1(bp);
5274 spin_unlock_bh(&bp->phy_lock);
5276 if (bmsr & BMSR_LSTATUS) {
5283 bnx2_test_intr(struct bnx2 *bp)
5288 if (!netif_running(bp->dev))
5291 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5293 /* This register is not touched during run-time. */
5294 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5295 REG_RD(bp, BNX2_HC_COMMAND);
5297 for (i = 0; i < 10; i++) {
5298 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5304 msleep_interruptible(10);
5312 /* Determining link for parallel detection. */
5314 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5316 u32 mode_ctl, an_dbg, exp;
5318 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5321 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5322 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5324 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5327 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5328 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5329 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5331 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5334 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5335 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5336 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5338 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
5345 bnx2_5706_serdes_timer(struct bnx2 *bp)
5349 spin_lock(&bp->phy_lock);
5350 if (bp->serdes_an_pending) {
5351 bp->serdes_an_pending--;
5353 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5356 bp->current_interval = bp->timer_interval;
5358 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5360 if (bmcr & BMCR_ANENABLE) {
5361 if (bnx2_5706_serdes_has_link(bp)) {
5362 bmcr &= ~BMCR_ANENABLE;
5363 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5364 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5365 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5369 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5370 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5373 bnx2_write_phy(bp, 0x17, 0x0f01);
5374 bnx2_read_phy(bp, 0x15, &phy2);
5378 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5379 bmcr |= BMCR_ANENABLE;
5380 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5382 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5385 bp->current_interval = bp->timer_interval;
5390 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5391 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5392 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5394 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5395 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5396 bnx2_5706s_force_link_dn(bp, 1);
5397 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5400 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5403 spin_unlock(&bp->phy_lock);
5407 bnx2_5708_serdes_timer(struct bnx2 *bp)
5409 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5412 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5413 bp->serdes_an_pending = 0;
5417 spin_lock(&bp->phy_lock);
5418 if (bp->serdes_an_pending)
5419 bp->serdes_an_pending--;
5420 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5423 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5424 if (bmcr & BMCR_ANENABLE) {
5425 bnx2_enable_forced_2g5(bp);
5426 bp->current_interval = SERDES_FORCED_TIMEOUT;
5428 bnx2_disable_forced_2g5(bp);
5429 bp->serdes_an_pending = 2;
5430 bp->current_interval = bp->timer_interval;
5434 bp->current_interval = bp->timer_interval;
5436 spin_unlock(&bp->phy_lock);
5440 bnx2_timer(unsigned long data)
5442 struct bnx2 *bp = (struct bnx2 *) data;
5444 if (!netif_running(bp->dev))
5447 if (atomic_read(&bp->intr_sem) != 0)
5448 goto bnx2_restart_timer;
5450 bnx2_send_heart_beat(bp);
5452 bp->stats_blk->stat_FwRxDrop =
5453 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
5455 /* workaround occasional corrupted counters */
5456 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5457 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5458 BNX2_HC_COMMAND_STATS_NOW);
5460 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5461 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5462 bnx2_5706_serdes_timer(bp);
5464 bnx2_5708_serdes_timer(bp);
5468 mod_timer(&bp->timer, jiffies + bp->current_interval);
5472 bnx2_request_irq(struct bnx2 *bp)
5474 struct net_device *dev = bp->dev;
5475 unsigned long flags;
5476 struct bnx2_irq *irq;
5479 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
5482 flags = IRQF_SHARED;
5484 for (i = 0; i < bp->irq_nvecs; i++) {
5485 irq = &bp->irq_tbl[i];
5486 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5496 bnx2_free_irq(struct bnx2 *bp)
5498 struct net_device *dev = bp->dev;
5499 struct bnx2_irq *irq;
5502 for (i = 0; i < bp->irq_nvecs; i++) {
5503 irq = &bp->irq_tbl[i];
5505 free_irq(irq->vector, dev);
5508 if (bp->flags & BNX2_FLAG_USING_MSI)
5509 pci_disable_msi(bp->pdev);
5510 else if (bp->flags & BNX2_FLAG_USING_MSIX)
5511 pci_disable_msix(bp->pdev);
5513 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
5517 bnx2_enable_msix(struct bnx2 *bp)
5520 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5522 bnx2_setup_msix_tbl(bp);
5523 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5524 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5525 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5527 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5528 msix_ent[i].entry = i;
5529 msix_ent[i].vector = 0;
5532 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5536 bp->irq_tbl[BNX2_BASE_VEC].handler = bnx2_msi_1shot;
5537 bp->irq_tbl[BNX2_TX_VEC].handler = bnx2_tx_msix;
5539 strcpy(bp->irq_tbl[BNX2_BASE_VEC].name, bp->dev->name);
5540 strcat(bp->irq_tbl[BNX2_BASE_VEC].name, "-base");
5541 strcpy(bp->irq_tbl[BNX2_TX_VEC].name, bp->dev->name);
5542 strcat(bp->irq_tbl[BNX2_TX_VEC].name, "-tx");
5544 bp->irq_nvecs = BNX2_MAX_MSIX_VEC;
5545 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
5546 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5547 bp->irq_tbl[i].vector = msix_ent[i].vector;
5551 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5553 bp->irq_tbl[0].handler = bnx2_interrupt;
5554 strcpy(bp->irq_tbl[0].name, bp->dev->name);
5556 bp->irq_tbl[0].vector = bp->pdev->irq;
5558 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
5559 bnx2_enable_msix(bp);
5561 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5562 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
5563 if (pci_enable_msi(bp->pdev) == 0) {
5564 bp->flags |= BNX2_FLAG_USING_MSI;
5565 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5566 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
5567 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5569 bp->irq_tbl[0].handler = bnx2_msi;
5571 bp->irq_tbl[0].vector = bp->pdev->irq;
5576 /* Called with rtnl_lock */
5578 bnx2_open(struct net_device *dev)
5580 struct bnx2 *bp = netdev_priv(dev);
5583 netif_carrier_off(dev);
5585 bnx2_set_power_state(bp, PCI_D0);
5586 bnx2_disable_int(bp);
5588 rc = bnx2_alloc_mem(bp);
5592 bnx2_setup_int_mode(bp, disable_msi);
5593 bnx2_napi_enable(bp);
5594 rc = bnx2_request_irq(bp);
5597 bnx2_napi_disable(bp);
5602 rc = bnx2_init_nic(bp, 1);
5605 bnx2_napi_disable(bp);
5612 mod_timer(&bp->timer, jiffies + bp->current_interval);
5614 atomic_set(&bp->intr_sem, 0);
5616 bnx2_enable_int(bp);
5618 if (bp->flags & BNX2_FLAG_USING_MSI) {
5619 /* Test MSI to make sure it is working
5620 * If MSI test fails, go back to INTx mode
5622 if (bnx2_test_intr(bp) != 0) {
5623 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5624 " using MSI, switching to INTx mode. Please"
5625 " report this failure to the PCI maintainer"
5626 " and include system chipset information.\n",
5629 bnx2_disable_int(bp);
5632 bnx2_setup_int_mode(bp, 1);
5634 rc = bnx2_init_nic(bp, 0);
5637 rc = bnx2_request_irq(bp);
5640 bnx2_napi_disable(bp);
5643 del_timer_sync(&bp->timer);
5646 bnx2_enable_int(bp);
5649 if (bp->flags & BNX2_FLAG_USING_MSI)
5650 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5651 else if (bp->flags & BNX2_FLAG_USING_MSIX)
5652 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
5654 netif_start_queue(dev);
5660 bnx2_reset_task(struct work_struct *work)
5662 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5664 if (!netif_running(bp->dev))
5667 bp->in_reset_task = 1;
5668 bnx2_netif_stop(bp);
5670 bnx2_init_nic(bp, 1);
5672 atomic_set(&bp->intr_sem, 1);
5673 bnx2_netif_start(bp);
5674 bp->in_reset_task = 0;
5678 bnx2_tx_timeout(struct net_device *dev)
5680 struct bnx2 *bp = netdev_priv(dev);
5682 /* This allows the netif to be shutdown gracefully before resetting */
5683 schedule_work(&bp->reset_task);
5687 /* Called with rtnl_lock */
5689 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5691 struct bnx2 *bp = netdev_priv(dev);
5693 bnx2_netif_stop(bp);
5696 bnx2_set_rx_mode(dev);
5698 bnx2_netif_start(bp);
5702 /* Called with netif_tx_lock.
5703 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5704 * netif_wake_queue().
5707 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5709 struct bnx2 *bp = netdev_priv(dev);
5712 struct sw_bd *tx_buf;
5713 u32 len, vlan_tag_flags, last_frag, mss;
5714 u16 prod, ring_prod;
5716 struct bnx2_napi *bnapi = &bp->bnx2_napi[bp->tx_vec];
5718 if (unlikely(bnx2_tx_avail(bp, bnapi) <
5719 (skb_shinfo(skb)->nr_frags + 1))) {
5720 netif_stop_queue(dev);
5721 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5724 return NETDEV_TX_BUSY;
5726 len = skb_headlen(skb);
5728 ring_prod = TX_RING_IDX(prod);
5731 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5732 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5735 if (bp->vlgrp && vlan_tx_tag_present(skb)) {
5737 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5739 if ((mss = skb_shinfo(skb)->gso_size)) {
5740 u32 tcp_opt_len, ip_tcp_len;
5743 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5745 tcp_opt_len = tcp_optlen(skb);
5747 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5748 u32 tcp_off = skb_transport_offset(skb) -
5749 sizeof(struct ipv6hdr) - ETH_HLEN;
5751 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5752 TX_BD_FLAGS_SW_FLAGS;
5753 if (likely(tcp_off == 0))
5754 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5757 vlan_tag_flags |= ((tcp_off & 0x3) <<
5758 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5759 ((tcp_off & 0x10) <<
5760 TX_BD_FLAGS_TCP6_OFF4_SHL);
5761 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5764 if (skb_header_cloned(skb) &&
5765 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5767 return NETDEV_TX_OK;
5770 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5774 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5775 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5779 if (tcp_opt_len || (iph->ihl > 5)) {
5780 vlan_tag_flags |= ((iph->ihl - 5) +
5781 (tcp_opt_len >> 2)) << 8;
5787 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5789 tx_buf = &bp->tx_buf_ring[ring_prod];
5791 pci_unmap_addr_set(tx_buf, mapping, mapping);
5793 txbd = &bp->tx_desc_ring[ring_prod];
5795 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5796 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5797 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5798 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5800 last_frag = skb_shinfo(skb)->nr_frags;
5802 for (i = 0; i < last_frag; i++) {
5803 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5805 prod = NEXT_TX_BD(prod);
5806 ring_prod = TX_RING_IDX(prod);
5807 txbd = &bp->tx_desc_ring[ring_prod];
5810 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5811 len, PCI_DMA_TODEVICE);
5812 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5815 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5816 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5817 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5818 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5821 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5823 prod = NEXT_TX_BD(prod);
5824 bp->tx_prod_bseq += skb->len;
5826 REG_WR16(bp, bp->tx_bidx_addr, prod);
5827 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5832 dev->trans_start = jiffies;
5834 if (unlikely(bnx2_tx_avail(bp, bnapi) <= MAX_SKB_FRAGS)) {
5835 netif_stop_queue(dev);
5836 if (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)
5837 netif_wake_queue(dev);
5840 return NETDEV_TX_OK;
5843 /* Called with rtnl_lock */
5845 bnx2_close(struct net_device *dev)
5847 struct bnx2 *bp = netdev_priv(dev);
5850 /* Calling flush_scheduled_work() may deadlock because
5851 * linkwatch_event() may be on the workqueue and it will try to get
5852 * the rtnl_lock which we are holding.
5854 while (bp->in_reset_task)
5857 bnx2_disable_int_sync(bp);
5858 bnx2_napi_disable(bp);
5859 del_timer_sync(&bp->timer);
5860 if (bp->flags & BNX2_FLAG_NO_WOL)
5861 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5863 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5865 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5866 bnx2_reset_chip(bp, reset_code);
5871 netif_carrier_off(bp->dev);
5872 bnx2_set_power_state(bp, PCI_D3hot);
5876 #define GET_NET_STATS64(ctr) \
5877 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5878 (unsigned long) (ctr##_lo)
5880 #define GET_NET_STATS32(ctr) \
5883 #if (BITS_PER_LONG == 64)
5884 #define GET_NET_STATS GET_NET_STATS64
5886 #define GET_NET_STATS GET_NET_STATS32
5889 static struct net_device_stats *
5890 bnx2_get_stats(struct net_device *dev)
5892 struct bnx2 *bp = netdev_priv(dev);
5893 struct statistics_block *stats_blk = bp->stats_blk;
5894 struct net_device_stats *net_stats = &bp->net_stats;
5896 if (bp->stats_blk == NULL) {
5899 net_stats->rx_packets =
5900 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5901 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5902 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5904 net_stats->tx_packets =
5905 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5906 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5907 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5909 net_stats->rx_bytes =
5910 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5912 net_stats->tx_bytes =
5913 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5915 net_stats->multicast =
5916 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5918 net_stats->collisions =
5919 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5921 net_stats->rx_length_errors =
5922 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5923 stats_blk->stat_EtherStatsOverrsizePkts);
5925 net_stats->rx_over_errors =
5926 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5928 net_stats->rx_frame_errors =
5929 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5931 net_stats->rx_crc_errors =
5932 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5934 net_stats->rx_errors = net_stats->rx_length_errors +
5935 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5936 net_stats->rx_crc_errors;
5938 net_stats->tx_aborted_errors =
5939 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5940 stats_blk->stat_Dot3StatsLateCollisions);
5942 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5943 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5944 net_stats->tx_carrier_errors = 0;
5946 net_stats->tx_carrier_errors =
5948 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5951 net_stats->tx_errors =
5953 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5955 net_stats->tx_aborted_errors +
5956 net_stats->tx_carrier_errors;
5958 net_stats->rx_missed_errors =
5959 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5960 stats_blk->stat_FwRxDrop);
5965 /* All ethtool functions called with rtnl_lock */
5968 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5970 struct bnx2 *bp = netdev_priv(dev);
5971 int support_serdes = 0, support_copper = 0;
5973 cmd->supported = SUPPORTED_Autoneg;
5974 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5977 } else if (bp->phy_port == PORT_FIBRE)
5982 if (support_serdes) {
5983 cmd->supported |= SUPPORTED_1000baseT_Full |
5985 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
5986 cmd->supported |= SUPPORTED_2500baseX_Full;
5989 if (support_copper) {
5990 cmd->supported |= SUPPORTED_10baseT_Half |
5991 SUPPORTED_10baseT_Full |
5992 SUPPORTED_100baseT_Half |
5993 SUPPORTED_100baseT_Full |
5994 SUPPORTED_1000baseT_Full |
5999 spin_lock_bh(&bp->phy_lock);
6000 cmd->port = bp->phy_port;
6001 cmd->advertising = bp->advertising;
6003 if (bp->autoneg & AUTONEG_SPEED) {
6004 cmd->autoneg = AUTONEG_ENABLE;
6007 cmd->autoneg = AUTONEG_DISABLE;
6010 if (netif_carrier_ok(dev)) {
6011 cmd->speed = bp->line_speed;
6012 cmd->duplex = bp->duplex;
6018 spin_unlock_bh(&bp->phy_lock);
6020 cmd->transceiver = XCVR_INTERNAL;
6021 cmd->phy_address = bp->phy_addr;
6027 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6029 struct bnx2 *bp = netdev_priv(dev);
6030 u8 autoneg = bp->autoneg;
6031 u8 req_duplex = bp->req_duplex;
6032 u16 req_line_speed = bp->req_line_speed;
6033 u32 advertising = bp->advertising;
6036 spin_lock_bh(&bp->phy_lock);
6038 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6039 goto err_out_unlock;
6041 if (cmd->port != bp->phy_port &&
6042 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6043 goto err_out_unlock;
6045 if (cmd->autoneg == AUTONEG_ENABLE) {
6046 autoneg |= AUTONEG_SPEED;
6048 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6050 /* allow advertising 1 speed */
6051 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6052 (cmd->advertising == ADVERTISED_10baseT_Full) ||
6053 (cmd->advertising == ADVERTISED_100baseT_Half) ||
6054 (cmd->advertising == ADVERTISED_100baseT_Full)) {
6056 if (cmd->port == PORT_FIBRE)
6057 goto err_out_unlock;
6059 advertising = cmd->advertising;
6061 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6062 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6063 (cmd->port == PORT_TP))
6064 goto err_out_unlock;
6065 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6066 advertising = cmd->advertising;
6067 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6068 goto err_out_unlock;
6070 if (cmd->port == PORT_FIBRE)
6071 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6073 advertising = ETHTOOL_ALL_COPPER_SPEED;
6075 advertising |= ADVERTISED_Autoneg;
6078 if (cmd->port == PORT_FIBRE) {
6079 if ((cmd->speed != SPEED_1000 &&
6080 cmd->speed != SPEED_2500) ||
6081 (cmd->duplex != DUPLEX_FULL))
6082 goto err_out_unlock;
6084 if (cmd->speed == SPEED_2500 &&
6085 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6086 goto err_out_unlock;
6088 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6089 goto err_out_unlock;
6091 autoneg &= ~AUTONEG_SPEED;
6092 req_line_speed = cmd->speed;
6093 req_duplex = cmd->duplex;
6097 bp->autoneg = autoneg;
6098 bp->advertising = advertising;
6099 bp->req_line_speed = req_line_speed;
6100 bp->req_duplex = req_duplex;
6102 err = bnx2_setup_phy(bp, cmd->port);
6105 spin_unlock_bh(&bp->phy_lock);
6111 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6113 struct bnx2 *bp = netdev_priv(dev);
6115 strcpy(info->driver, DRV_MODULE_NAME);
6116 strcpy(info->version, DRV_MODULE_VERSION);
6117 strcpy(info->bus_info, pci_name(bp->pdev));
6118 strcpy(info->fw_version, bp->fw_version);
6121 #define BNX2_REGDUMP_LEN (32 * 1024)
6124 bnx2_get_regs_len(struct net_device *dev)
6126 return BNX2_REGDUMP_LEN;
6130 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6132 u32 *p = _p, i, offset;
6134 struct bnx2 *bp = netdev_priv(dev);
6135 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6136 0x0800, 0x0880, 0x0c00, 0x0c10,
6137 0x0c30, 0x0d08, 0x1000, 0x101c,
6138 0x1040, 0x1048, 0x1080, 0x10a4,
6139 0x1400, 0x1490, 0x1498, 0x14f0,
6140 0x1500, 0x155c, 0x1580, 0x15dc,
6141 0x1600, 0x1658, 0x1680, 0x16d8,
6142 0x1800, 0x1820, 0x1840, 0x1854,
6143 0x1880, 0x1894, 0x1900, 0x1984,
6144 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6145 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6146 0x2000, 0x2030, 0x23c0, 0x2400,
6147 0x2800, 0x2820, 0x2830, 0x2850,
6148 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6149 0x3c00, 0x3c94, 0x4000, 0x4010,
6150 0x4080, 0x4090, 0x43c0, 0x4458,
6151 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6152 0x4fc0, 0x5010, 0x53c0, 0x5444,
6153 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6154 0x5fc0, 0x6000, 0x6400, 0x6428,
6155 0x6800, 0x6848, 0x684c, 0x6860,
6156 0x6888, 0x6910, 0x8000 };
6160 memset(p, 0, BNX2_REGDUMP_LEN);
6162 if (!netif_running(bp->dev))
6166 offset = reg_boundaries[0];
6168 while (offset < BNX2_REGDUMP_LEN) {
6169 *p++ = REG_RD(bp, offset);
6171 if (offset == reg_boundaries[i + 1]) {
6172 offset = reg_boundaries[i + 2];
6173 p = (u32 *) (orig_p + offset);
6180 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6182 struct bnx2 *bp = netdev_priv(dev);
6184 if (bp->flags & BNX2_FLAG_NO_WOL) {
6189 wol->supported = WAKE_MAGIC;
6191 wol->wolopts = WAKE_MAGIC;
6195 memset(&wol->sopass, 0, sizeof(wol->sopass));
6199 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6201 struct bnx2 *bp = netdev_priv(dev);
6203 if (wol->wolopts & ~WAKE_MAGIC)
6206 if (wol->wolopts & WAKE_MAGIC) {
6207 if (bp->flags & BNX2_FLAG_NO_WOL)
6219 bnx2_nway_reset(struct net_device *dev)
6221 struct bnx2 *bp = netdev_priv(dev);
6224 if (!(bp->autoneg & AUTONEG_SPEED)) {
6228 spin_lock_bh(&bp->phy_lock);
6230 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6233 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6234 spin_unlock_bh(&bp->phy_lock);
6238 /* Force a link down visible on the other side */
6239 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6240 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6241 spin_unlock_bh(&bp->phy_lock);
6245 spin_lock_bh(&bp->phy_lock);
6247 bp->current_interval = SERDES_AN_TIMEOUT;
6248 bp->serdes_an_pending = 1;
6249 mod_timer(&bp->timer, jiffies + bp->current_interval);
6252 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6253 bmcr &= ~BMCR_LOOPBACK;
6254 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6256 spin_unlock_bh(&bp->phy_lock);
6262 bnx2_get_eeprom_len(struct net_device *dev)
6264 struct bnx2 *bp = netdev_priv(dev);
6266 if (bp->flash_info == NULL)
6269 return (int) bp->flash_size;
6273 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6276 struct bnx2 *bp = netdev_priv(dev);
6279 /* parameters already validated in ethtool_get_eeprom */
6281 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6287 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6290 struct bnx2 *bp = netdev_priv(dev);
6293 /* parameters already validated in ethtool_set_eeprom */
6295 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6301 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6303 struct bnx2 *bp = netdev_priv(dev);
6305 memset(coal, 0, sizeof(struct ethtool_coalesce));
6307 coal->rx_coalesce_usecs = bp->rx_ticks;
6308 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6309 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6310 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6312 coal->tx_coalesce_usecs = bp->tx_ticks;
6313 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6314 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6315 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6317 coal->stats_block_coalesce_usecs = bp->stats_ticks;
6323 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6325 struct bnx2 *bp = netdev_priv(dev);
6327 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6328 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6330 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6331 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6333 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6334 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6336 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6337 if (bp->rx_quick_cons_trip_int > 0xff)
6338 bp->rx_quick_cons_trip_int = 0xff;
6340 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6341 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6343 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6344 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6346 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6347 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6349 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6350 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6353 bp->stats_ticks = coal->stats_block_coalesce_usecs;
6354 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6355 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6356 bp->stats_ticks = USEC_PER_SEC;
6358 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6359 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6360 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6362 if (netif_running(bp->dev)) {
6363 bnx2_netif_stop(bp);
6364 bnx2_init_nic(bp, 0);
6365 bnx2_netif_start(bp);
6372 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6374 struct bnx2 *bp = netdev_priv(dev);
6376 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6377 ering->rx_mini_max_pending = 0;
6378 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6380 ering->rx_pending = bp->rx_ring_size;
6381 ering->rx_mini_pending = 0;
6382 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6384 ering->tx_max_pending = MAX_TX_DESC_CNT;
6385 ering->tx_pending = bp->tx_ring_size;
6389 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6391 if (netif_running(bp->dev)) {
6392 bnx2_netif_stop(bp);
6393 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6398 bnx2_set_rx_ring_size(bp, rx);
6399 bp->tx_ring_size = tx;
6401 if (netif_running(bp->dev)) {
6404 rc = bnx2_alloc_mem(bp);
6407 bnx2_init_nic(bp, 0);
6408 bnx2_netif_start(bp);
6414 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6416 struct bnx2 *bp = netdev_priv(dev);
6419 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6420 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6421 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6425 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6430 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6432 struct bnx2 *bp = netdev_priv(dev);
6434 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6435 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6436 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6440 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6442 struct bnx2 *bp = netdev_priv(dev);
6444 bp->req_flow_ctrl = 0;
6445 if (epause->rx_pause)
6446 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6447 if (epause->tx_pause)
6448 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6450 if (epause->autoneg) {
6451 bp->autoneg |= AUTONEG_FLOW_CTRL;
6454 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6457 spin_lock_bh(&bp->phy_lock);
6459 bnx2_setup_phy(bp, bp->phy_port);
6461 spin_unlock_bh(&bp->phy_lock);
6467 bnx2_get_rx_csum(struct net_device *dev)
6469 struct bnx2 *bp = netdev_priv(dev);
6475 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6477 struct bnx2 *bp = netdev_priv(dev);
6484 bnx2_set_tso(struct net_device *dev, u32 data)
6486 struct bnx2 *bp = netdev_priv(dev);
6489 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6490 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6491 dev->features |= NETIF_F_TSO6;
6493 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6498 #define BNX2_NUM_STATS 46
6501 char string[ETH_GSTRING_LEN];
6502 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6504 { "rx_error_bytes" },
6506 { "tx_error_bytes" },
6507 { "rx_ucast_packets" },
6508 { "rx_mcast_packets" },
6509 { "rx_bcast_packets" },
6510 { "tx_ucast_packets" },
6511 { "tx_mcast_packets" },
6512 { "tx_bcast_packets" },
6513 { "tx_mac_errors" },
6514 { "tx_carrier_errors" },
6515 { "rx_crc_errors" },
6516 { "rx_align_errors" },
6517 { "tx_single_collisions" },
6518 { "tx_multi_collisions" },
6520 { "tx_excess_collisions" },
6521 { "tx_late_collisions" },
6522 { "tx_total_collisions" },
6525 { "rx_undersize_packets" },
6526 { "rx_oversize_packets" },
6527 { "rx_64_byte_packets" },
6528 { "rx_65_to_127_byte_packets" },
6529 { "rx_128_to_255_byte_packets" },
6530 { "rx_256_to_511_byte_packets" },
6531 { "rx_512_to_1023_byte_packets" },
6532 { "rx_1024_to_1522_byte_packets" },
6533 { "rx_1523_to_9022_byte_packets" },
6534 { "tx_64_byte_packets" },
6535 { "tx_65_to_127_byte_packets" },
6536 { "tx_128_to_255_byte_packets" },
6537 { "tx_256_to_511_byte_packets" },
6538 { "tx_512_to_1023_byte_packets" },
6539 { "tx_1024_to_1522_byte_packets" },
6540 { "tx_1523_to_9022_byte_packets" },
6541 { "rx_xon_frames" },
6542 { "rx_xoff_frames" },
6543 { "tx_xon_frames" },
6544 { "tx_xoff_frames" },
6545 { "rx_mac_ctrl_frames" },
6546 { "rx_filtered_packets" },
6548 { "rx_fw_discards" },
6551 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6553 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6554 STATS_OFFSET32(stat_IfHCInOctets_hi),
6555 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6556 STATS_OFFSET32(stat_IfHCOutOctets_hi),
6557 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6558 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6559 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6560 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6561 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6562 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6563 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6564 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6565 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6566 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6567 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6568 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6569 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6570 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6571 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6572 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6573 STATS_OFFSET32(stat_EtherStatsCollisions),
6574 STATS_OFFSET32(stat_EtherStatsFragments),
6575 STATS_OFFSET32(stat_EtherStatsJabbers),
6576 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6577 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6578 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6579 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6580 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6581 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6582 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6583 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6584 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6585 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6586 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6587 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6588 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6589 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6590 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6591 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6592 STATS_OFFSET32(stat_XonPauseFramesReceived),
6593 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6594 STATS_OFFSET32(stat_OutXonSent),
6595 STATS_OFFSET32(stat_OutXoffSent),
6596 STATS_OFFSET32(stat_MacControlFramesReceived),
6597 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6598 STATS_OFFSET32(stat_IfInMBUFDiscards),
6599 STATS_OFFSET32(stat_FwRxDrop),
6602 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6603 * skipped because of errata.
6605 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6606 8,0,8,8,8,8,8,8,8,8,
6607 4,0,4,4,4,4,4,4,4,4,
6608 4,4,4,4,4,4,4,4,4,4,
6609 4,4,4,4,4,4,4,4,4,4,
6613 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6614 8,0,8,8,8,8,8,8,8,8,
6615 4,4,4,4,4,4,4,4,4,4,
6616 4,4,4,4,4,4,4,4,4,4,
6617 4,4,4,4,4,4,4,4,4,4,
6621 #define BNX2_NUM_TESTS 6
6624 char string[ETH_GSTRING_LEN];
6625 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6626 { "register_test (offline)" },
6627 { "memory_test (offline)" },
6628 { "loopback_test (offline)" },
6629 { "nvram_test (online)" },
6630 { "interrupt_test (online)" },
6631 { "link_test (online)" },
6635 bnx2_get_sset_count(struct net_device *dev, int sset)
6639 return BNX2_NUM_TESTS;
6641 return BNX2_NUM_STATS;
6648 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6650 struct bnx2 *bp = netdev_priv(dev);
6652 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6653 if (etest->flags & ETH_TEST_FL_OFFLINE) {
6656 bnx2_netif_stop(bp);
6657 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6660 if (bnx2_test_registers(bp) != 0) {
6662 etest->flags |= ETH_TEST_FL_FAILED;
6664 if (bnx2_test_memory(bp) != 0) {
6666 etest->flags |= ETH_TEST_FL_FAILED;
6668 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6669 etest->flags |= ETH_TEST_FL_FAILED;
6671 if (!netif_running(bp->dev)) {
6672 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6675 bnx2_init_nic(bp, 1);
6676 bnx2_netif_start(bp);
6679 /* wait for link up */
6680 for (i = 0; i < 7; i++) {
6683 msleep_interruptible(1000);
6687 if (bnx2_test_nvram(bp) != 0) {
6689 etest->flags |= ETH_TEST_FL_FAILED;
6691 if (bnx2_test_intr(bp) != 0) {
6693 etest->flags |= ETH_TEST_FL_FAILED;
6696 if (bnx2_test_link(bp) != 0) {
6698 etest->flags |= ETH_TEST_FL_FAILED;
6704 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6706 switch (stringset) {
6708 memcpy(buf, bnx2_stats_str_arr,
6709 sizeof(bnx2_stats_str_arr));
6712 memcpy(buf, bnx2_tests_str_arr,
6713 sizeof(bnx2_tests_str_arr));
6719 bnx2_get_ethtool_stats(struct net_device *dev,
6720 struct ethtool_stats *stats, u64 *buf)
6722 struct bnx2 *bp = netdev_priv(dev);
6724 u32 *hw_stats = (u32 *) bp->stats_blk;
6725 u8 *stats_len_arr = NULL;
6727 if (hw_stats == NULL) {
6728 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6732 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6733 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6734 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6735 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6736 stats_len_arr = bnx2_5706_stats_len_arr;
6738 stats_len_arr = bnx2_5708_stats_len_arr;
6740 for (i = 0; i < BNX2_NUM_STATS; i++) {
6741 if (stats_len_arr[i] == 0) {
6742 /* skip this counter */
6746 if (stats_len_arr[i] == 4) {
6747 /* 4-byte counter */
6749 *(hw_stats + bnx2_stats_offset_arr[i]);
6752 /* 8-byte counter */
6753 buf[i] = (((u64) *(hw_stats +
6754 bnx2_stats_offset_arr[i])) << 32) +
6755 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6760 bnx2_phys_id(struct net_device *dev, u32 data)
6762 struct bnx2 *bp = netdev_priv(dev);
6769 save = REG_RD(bp, BNX2_MISC_CFG);
6770 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6772 for (i = 0; i < (data * 2); i++) {
6774 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6777 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6778 BNX2_EMAC_LED_1000MB_OVERRIDE |
6779 BNX2_EMAC_LED_100MB_OVERRIDE |
6780 BNX2_EMAC_LED_10MB_OVERRIDE |
6781 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6782 BNX2_EMAC_LED_TRAFFIC);
6784 msleep_interruptible(500);
6785 if (signal_pending(current))
6788 REG_WR(bp, BNX2_EMAC_LED, 0);
6789 REG_WR(bp, BNX2_MISC_CFG, save);
6794 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6796 struct bnx2 *bp = netdev_priv(dev);
6798 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6799 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6801 return (ethtool_op_set_tx_csum(dev, data));
6804 static const struct ethtool_ops bnx2_ethtool_ops = {
6805 .get_settings = bnx2_get_settings,
6806 .set_settings = bnx2_set_settings,
6807 .get_drvinfo = bnx2_get_drvinfo,
6808 .get_regs_len = bnx2_get_regs_len,
6809 .get_regs = bnx2_get_regs,
6810 .get_wol = bnx2_get_wol,
6811 .set_wol = bnx2_set_wol,
6812 .nway_reset = bnx2_nway_reset,
6813 .get_link = ethtool_op_get_link,
6814 .get_eeprom_len = bnx2_get_eeprom_len,
6815 .get_eeprom = bnx2_get_eeprom,
6816 .set_eeprom = bnx2_set_eeprom,
6817 .get_coalesce = bnx2_get_coalesce,
6818 .set_coalesce = bnx2_set_coalesce,
6819 .get_ringparam = bnx2_get_ringparam,
6820 .set_ringparam = bnx2_set_ringparam,
6821 .get_pauseparam = bnx2_get_pauseparam,
6822 .set_pauseparam = bnx2_set_pauseparam,
6823 .get_rx_csum = bnx2_get_rx_csum,
6824 .set_rx_csum = bnx2_set_rx_csum,
6825 .set_tx_csum = bnx2_set_tx_csum,
6826 .set_sg = ethtool_op_set_sg,
6827 .set_tso = bnx2_set_tso,
6828 .self_test = bnx2_self_test,
6829 .get_strings = bnx2_get_strings,
6830 .phys_id = bnx2_phys_id,
6831 .get_ethtool_stats = bnx2_get_ethtool_stats,
6832 .get_sset_count = bnx2_get_sset_count,
6835 /* Called with rtnl_lock */
6837 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6839 struct mii_ioctl_data *data = if_mii(ifr);
6840 struct bnx2 *bp = netdev_priv(dev);
6845 data->phy_id = bp->phy_addr;
6851 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6854 if (!netif_running(dev))
6857 spin_lock_bh(&bp->phy_lock);
6858 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6859 spin_unlock_bh(&bp->phy_lock);
6861 data->val_out = mii_regval;
6867 if (!capable(CAP_NET_ADMIN))
6870 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6873 if (!netif_running(dev))
6876 spin_lock_bh(&bp->phy_lock);
6877 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6878 spin_unlock_bh(&bp->phy_lock);
6889 /* Called with rtnl_lock */
6891 bnx2_change_mac_addr(struct net_device *dev, void *p)
6893 struct sockaddr *addr = p;
6894 struct bnx2 *bp = netdev_priv(dev);
6896 if (!is_valid_ether_addr(addr->sa_data))
6899 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6900 if (netif_running(dev))
6901 bnx2_set_mac_addr(bp);
6906 /* Called with rtnl_lock */
6908 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6910 struct bnx2 *bp = netdev_priv(dev);
6912 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6913 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6917 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
6920 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6922 poll_bnx2(struct net_device *dev)
6924 struct bnx2 *bp = netdev_priv(dev);
6926 disable_irq(bp->pdev->irq);
6927 bnx2_interrupt(bp->pdev->irq, dev);
6928 enable_irq(bp->pdev->irq);
6932 static void __devinit
6933 bnx2_get_5709_media(struct bnx2 *bp)
6935 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6936 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6939 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6941 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6942 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
6946 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6947 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6949 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6951 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6956 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
6964 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
6970 static void __devinit
6971 bnx2_get_pci_speed(struct bnx2 *bp)
6975 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6976 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6979 bp->flags |= BNX2_FLAG_PCIX;
6981 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6983 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6985 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6986 bp->bus_speed_mhz = 133;
6989 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6990 bp->bus_speed_mhz = 100;
6993 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6994 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6995 bp->bus_speed_mhz = 66;
6998 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6999 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7000 bp->bus_speed_mhz = 50;
7003 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7004 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7005 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7006 bp->bus_speed_mhz = 33;
7011 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7012 bp->bus_speed_mhz = 66;
7014 bp->bus_speed_mhz = 33;
7017 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7018 bp->flags |= BNX2_FLAG_PCI_32BIT;
7022 static int __devinit
7023 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7026 unsigned long mem_len;
7029 u64 dma_mask, persist_dma_mask;
7031 SET_NETDEV_DEV(dev, &pdev->dev);
7032 bp = netdev_priv(dev);
7037 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7038 rc = pci_enable_device(pdev);
7040 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7044 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7046 "Cannot find PCI device base address, aborting.\n");
7048 goto err_out_disable;
7051 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7053 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7054 goto err_out_disable;
7057 pci_set_master(pdev);
7058 pci_save_state(pdev);
7060 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7061 if (bp->pm_cap == 0) {
7063 "Cannot find power management capability, aborting.\n");
7065 goto err_out_release;
7071 spin_lock_init(&bp->phy_lock);
7072 spin_lock_init(&bp->indirect_lock);
7073 INIT_WORK(&bp->reset_task, bnx2_reset_task);
7075 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7076 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
7077 dev->mem_end = dev->mem_start + mem_len;
7078 dev->irq = pdev->irq;
7080 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7083 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7085 goto err_out_release;
7088 /* Configure byte swap and enable write to the reg_window registers.
7089 * Rely on CPU to do target byte swapping on big endian systems
7090 * The chip's target access swapping will not swap all accesses
7092 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7093 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7094 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7096 bnx2_set_power_state(bp, PCI_D0);
7098 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7100 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7101 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7103 "Cannot find PCIE capability, aborting.\n");
7107 bp->flags |= BNX2_FLAG_PCIE;
7108 if (CHIP_REV(bp) == CHIP_REV_Ax)
7109 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7111 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7112 if (bp->pcix_cap == 0) {
7114 "Cannot find PCIX capability, aborting.\n");
7120 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7121 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7122 bp->flags |= BNX2_FLAG_MSIX_CAP;
7125 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7126 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7127 bp->flags |= BNX2_FLAG_MSI_CAP;
7130 /* 5708 cannot support DMA addresses > 40-bit. */
7131 if (CHIP_NUM(bp) == CHIP_NUM_5708)
7132 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
7134 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
7136 /* Configure DMA attributes. */
7137 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7138 dev->features |= NETIF_F_HIGHDMA;
7139 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7142 "pci_set_consistent_dma_mask failed, aborting.\n");
7145 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
7146 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7150 if (!(bp->flags & BNX2_FLAG_PCIE))
7151 bnx2_get_pci_speed(bp);
7153 /* 5706A0 may falsely detect SERR and PERR. */
7154 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7155 reg = REG_RD(bp, PCI_COMMAND);
7156 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7157 REG_WR(bp, PCI_COMMAND, reg);
7159 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7160 !(bp->flags & BNX2_FLAG_PCIX)) {
7163 "5706 A1 can only be used in a PCIX bus, aborting.\n");
7167 bnx2_init_nvram(bp);
7169 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7171 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7172 BNX2_SHM_HDR_SIGNATURE_SIG) {
7173 u32 off = PCI_FUNC(pdev->devfn) << 2;
7175 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7177 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7179 /* Get the permanent MAC address. First we need to make sure the
7180 * firmware is actually running.
7182 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7184 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7185 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7186 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7191 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7192 for (i = 0, j = 0; i < 3; i++) {
7195 num = (u8) (reg >> (24 - (i * 8)));
7196 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7197 if (num >= k || !skip0 || k == 1) {
7198 bp->fw_version[j++] = (num / k) + '0';
7203 bp->fw_version[j++] = '.';
7205 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
7206 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7209 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7210 bp->flags |= BNX2_FLAG_ASF_ENABLE;
7212 for (i = 0; i < 30; i++) {
7213 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7214 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7219 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7220 reg &= BNX2_CONDITION_MFW_RUN_MASK;
7221 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7222 reg != BNX2_CONDITION_MFW_RUN_NONE) {
7224 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7226 bp->fw_version[j++] = ' ';
7227 for (i = 0; i < 3; i++) {
7228 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
7230 memcpy(&bp->fw_version[j], ®, 4);
7235 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
7236 bp->mac_addr[0] = (u8) (reg >> 8);
7237 bp->mac_addr[1] = (u8) reg;
7239 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
7240 bp->mac_addr[2] = (u8) (reg >> 24);
7241 bp->mac_addr[3] = (u8) (reg >> 16);
7242 bp->mac_addr[4] = (u8) (reg >> 8);
7243 bp->mac_addr[5] = (u8) reg;
7245 bp->tx_ring_size = MAX_TX_DESC_CNT;
7246 bnx2_set_rx_ring_size(bp, 255);
7250 bp->tx_quick_cons_trip_int = 20;
7251 bp->tx_quick_cons_trip = 20;
7252 bp->tx_ticks_int = 80;
7255 bp->rx_quick_cons_trip_int = 6;
7256 bp->rx_quick_cons_trip = 6;
7257 bp->rx_ticks_int = 18;
7260 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7262 bp->timer_interval = HZ;
7263 bp->current_interval = HZ;
7267 /* Disable WOL support if we are running on a SERDES chip. */
7268 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7269 bnx2_get_5709_media(bp);
7270 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7271 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7273 bp->phy_port = PORT_TP;
7274 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7275 bp->phy_port = PORT_FIBRE;
7276 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
7277 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7278 bp->flags |= BNX2_FLAG_NO_WOL;
7281 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7282 /* Don't do parallel detect on this board because of
7283 * some board problems. The link will not go down
7284 * if we do parallel detect.
7286 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7287 pdev->subsystem_device == 0x310c)
7288 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7291 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7292 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7294 bnx2_init_remote_phy(bp);
7296 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7297 CHIP_NUM(bp) == CHIP_NUM_5708)
7298 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
7299 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7300 (CHIP_REV(bp) == CHIP_REV_Ax ||
7301 CHIP_REV(bp) == CHIP_REV_Bx))
7302 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7304 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7305 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7306 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
7307 bp->flags |= BNX2_FLAG_NO_WOL;
7311 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7312 bp->tx_quick_cons_trip_int =
7313 bp->tx_quick_cons_trip;
7314 bp->tx_ticks_int = bp->tx_ticks;
7315 bp->rx_quick_cons_trip_int =
7316 bp->rx_quick_cons_trip;
7317 bp->rx_ticks_int = bp->rx_ticks;
7318 bp->comp_prod_trip_int = bp->comp_prod_trip;
7319 bp->com_ticks_int = bp->com_ticks;
7320 bp->cmd_ticks_int = bp->cmd_ticks;
7323 /* Disable MSI on 5706 if AMD 8132 bridge is found.
7325 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
7326 * with byte enables disabled on the unused 32-bit word. This is legal
7327 * but causes problems on the AMD 8132 which will eventually stop
7328 * responding after a while.
7330 * AMD believes this incompatibility is unique to the 5706, and
7331 * prefers to locally disable MSI rather than globally disabling it.
7333 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7334 struct pci_dev *amd_8132 = NULL;
7336 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7337 PCI_DEVICE_ID_AMD_8132_BRIDGE,
7340 if (amd_8132->revision >= 0x10 &&
7341 amd_8132->revision <= 0x13) {
7343 pci_dev_put(amd_8132);
7349 bnx2_set_default_link(bp);
7350 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7352 init_timer(&bp->timer);
7353 bp->timer.expires = RUN_AT(bp->timer_interval);
7354 bp->timer.data = (unsigned long) bp;
7355 bp->timer.function = bnx2_timer;
7361 iounmap(bp->regview);
7366 pci_release_regions(pdev);
7369 pci_disable_device(pdev);
7370 pci_set_drvdata(pdev, NULL);
7376 static char * __devinit
7377 bnx2_bus_string(struct bnx2 *bp, char *str)
7381 if (bp->flags & BNX2_FLAG_PCIE) {
7382 s += sprintf(s, "PCI Express");
7384 s += sprintf(s, "PCI");
7385 if (bp->flags & BNX2_FLAG_PCIX)
7386 s += sprintf(s, "-X");
7387 if (bp->flags & BNX2_FLAG_PCI_32BIT)
7388 s += sprintf(s, " 32-bit");
7390 s += sprintf(s, " 64-bit");
7391 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7396 static void __devinit
7397 bnx2_init_napi(struct bnx2 *bp)
7400 struct bnx2_napi *bnapi;
7402 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7403 bnapi = &bp->bnx2_napi[i];
7406 netif_napi_add(bp->dev, &bp->bnx2_napi[0].napi, bnx2_poll, 64);
7407 netif_napi_add(bp->dev, &bp->bnx2_napi[BNX2_TX_VEC].napi, bnx2_tx_poll,
7411 static int __devinit
7412 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7414 static int version_printed = 0;
7415 struct net_device *dev = NULL;
7419 DECLARE_MAC_BUF(mac);
7421 if (version_printed++ == 0)
7422 printk(KERN_INFO "%s", version);
7424 /* dev zeroed in init_etherdev */
7425 dev = alloc_etherdev(sizeof(*bp));
7430 rc = bnx2_init_board(pdev, dev);
7436 dev->open = bnx2_open;
7437 dev->hard_start_xmit = bnx2_start_xmit;
7438 dev->stop = bnx2_close;
7439 dev->get_stats = bnx2_get_stats;
7440 dev->set_multicast_list = bnx2_set_rx_mode;
7441 dev->do_ioctl = bnx2_ioctl;
7442 dev->set_mac_address = bnx2_change_mac_addr;
7443 dev->change_mtu = bnx2_change_mtu;
7444 dev->tx_timeout = bnx2_tx_timeout;
7445 dev->watchdog_timeo = TX_TIMEOUT;
7447 dev->vlan_rx_register = bnx2_vlan_rx_register;
7449 dev->ethtool_ops = &bnx2_ethtool_ops;
7451 bp = netdev_priv(dev);
7454 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7455 dev->poll_controller = poll_bnx2;
7458 pci_set_drvdata(pdev, dev);
7460 memcpy(dev->dev_addr, bp->mac_addr, 6);
7461 memcpy(dev->perm_addr, bp->mac_addr, 6);
7462 bp->name = board_info[ent->driver_data].name;
7464 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7465 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7466 dev->features |= NETIF_F_IPV6_CSUM;
7469 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7471 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7472 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7473 dev->features |= NETIF_F_TSO6;
7475 if ((rc = register_netdev(dev))) {
7476 dev_err(&pdev->dev, "Cannot register net device\n");
7478 iounmap(bp->regview);
7479 pci_release_regions(pdev);
7480 pci_disable_device(pdev);
7481 pci_set_drvdata(pdev, NULL);
7486 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7487 "IRQ %d, node addr %s\n",
7490 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7491 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7492 bnx2_bus_string(bp, str),
7494 bp->pdev->irq, print_mac(mac, dev->dev_addr));
7499 static void __devexit
7500 bnx2_remove_one(struct pci_dev *pdev)
7502 struct net_device *dev = pci_get_drvdata(pdev);
7503 struct bnx2 *bp = netdev_priv(dev);
7505 flush_scheduled_work();
7507 unregister_netdev(dev);
7510 iounmap(bp->regview);
7513 pci_release_regions(pdev);
7514 pci_disable_device(pdev);
7515 pci_set_drvdata(pdev, NULL);
7519 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7521 struct net_device *dev = pci_get_drvdata(pdev);
7522 struct bnx2 *bp = netdev_priv(dev);
7525 /* PCI register 4 needs to be saved whether netif_running() or not.
7526 * MSI address and data need to be saved if using MSI and
7529 pci_save_state(pdev);
7530 if (!netif_running(dev))
7533 flush_scheduled_work();
7534 bnx2_netif_stop(bp);
7535 netif_device_detach(dev);
7536 del_timer_sync(&bp->timer);
7537 if (bp->flags & BNX2_FLAG_NO_WOL)
7538 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
7540 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7542 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7543 bnx2_reset_chip(bp, reset_code);
7545 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7550 bnx2_resume(struct pci_dev *pdev)
7552 struct net_device *dev = pci_get_drvdata(pdev);
7553 struct bnx2 *bp = netdev_priv(dev);
7555 pci_restore_state(pdev);
7556 if (!netif_running(dev))
7559 bnx2_set_power_state(bp, PCI_D0);
7560 netif_device_attach(dev);
7561 bnx2_init_nic(bp, 1);
7562 bnx2_netif_start(bp);
7567 * bnx2_io_error_detected - called when PCI error is detected
7568 * @pdev: Pointer to PCI device
7569 * @state: The current pci connection state
7571 * This function is called after a PCI bus error affecting
7572 * this device has been detected.
7574 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
7575 pci_channel_state_t state)
7577 struct net_device *dev = pci_get_drvdata(pdev);
7578 struct bnx2 *bp = netdev_priv(dev);
7581 netif_device_detach(dev);
7583 if (netif_running(dev)) {
7584 bnx2_netif_stop(bp);
7585 del_timer_sync(&bp->timer);
7586 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
7589 pci_disable_device(pdev);
7592 /* Request a slot slot reset. */
7593 return PCI_ERS_RESULT_NEED_RESET;
7597 * bnx2_io_slot_reset - called after the pci bus has been reset.
7598 * @pdev: Pointer to PCI device
7600 * Restart the card from scratch, as if from a cold-boot.
7602 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
7604 struct net_device *dev = pci_get_drvdata(pdev);
7605 struct bnx2 *bp = netdev_priv(dev);
7608 if (pci_enable_device(pdev)) {
7610 "Cannot re-enable PCI device after reset.\n");
7612 return PCI_ERS_RESULT_DISCONNECT;
7614 pci_set_master(pdev);
7615 pci_restore_state(pdev);
7617 if (netif_running(dev)) {
7618 bnx2_set_power_state(bp, PCI_D0);
7619 bnx2_init_nic(bp, 1);
7623 return PCI_ERS_RESULT_RECOVERED;
7627 * bnx2_io_resume - called when traffic can start flowing again.
7628 * @pdev: Pointer to PCI device
7630 * This callback is called when the error recovery driver tells us that
7631 * its OK to resume normal operation.
7633 static void bnx2_io_resume(struct pci_dev *pdev)
7635 struct net_device *dev = pci_get_drvdata(pdev);
7636 struct bnx2 *bp = netdev_priv(dev);
7639 if (netif_running(dev))
7640 bnx2_netif_start(bp);
7642 netif_device_attach(dev);
7646 static struct pci_error_handlers bnx2_err_handler = {
7647 .error_detected = bnx2_io_error_detected,
7648 .slot_reset = bnx2_io_slot_reset,
7649 .resume = bnx2_io_resume,
7652 static struct pci_driver bnx2_pci_driver = {
7653 .name = DRV_MODULE_NAME,
7654 .id_table = bnx2_pci_tbl,
7655 .probe = bnx2_init_one,
7656 .remove = __devexit_p(bnx2_remove_one),
7657 .suspend = bnx2_suspend,
7658 .resume = bnx2_resume,
7659 .err_handler = &bnx2_err_handler,
7662 static int __init bnx2_init(void)
7664 return pci_register_driver(&bnx2_pci_driver);
7667 static void __exit bnx2_cleanup(void)
7669 pci_unregister_driver(&bnx2_pci_driver);
7672 module_init(bnx2_init);
7673 module_exit(bnx2_cleanup);