1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2007 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
55 #define FW_BUF_SIZE 0x10000
57 #define DRV_MODULE_NAME "bnx2"
58 #define PFX DRV_MODULE_NAME ": "
59 #define DRV_MODULE_VERSION "1.7.0"
60 #define DRV_MODULE_RELDATE "December 11, 2007"
62 #define RUN_AT(x) (jiffies + (x))
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT (5*HZ)
67 static const char version[] __devinitdata =
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
75 static int disable_msi = 0;
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
92 /* indexed by board_t, above */
95 } board_info[] __devinitdata = {
96 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97 { "HP NC370T Multifunction Gigabit Server Adapter" },
98 { "HP NC370i Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100 { "HP NC370F Multifunction Gigabit Server Adapter" },
101 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
107 static struct pci_device_id bnx2_pci_tbl[] = {
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
129 static struct flash_spec flash_table[] =
131 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
134 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
138 /* Expansion entry 0001 */
139 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
143 /* Saifun SA25F010 (non-buffered flash) */
144 /* strap, cfg1, & write1 need updates */
145 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148 "Non-buffered flash (128kB)"},
149 /* Saifun SA25F020 (non-buffered flash) */
150 /* strap, cfg1, & write1 need updates */
151 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154 "Non-buffered flash (256kB)"},
155 /* Expansion entry 0100 */
156 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
160 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170 /* Saifun SA25F005 (non-buffered flash) */
171 /* strap, cfg1, & write1 need updates */
172 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175 "Non-buffered flash (64kB)"},
177 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
181 /* Expansion entry 1001 */
182 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186 /* Expansion entry 1010 */
187 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
191 /* ATMEL AT45DB011B (buffered flash) */
192 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195 "Buffered flash (128kB)"},
196 /* Expansion entry 1100 */
197 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201 /* Expansion entry 1101 */
202 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
206 /* Ateml Expansion entry 1110 */
207 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1110 (Atmel)"},
211 /* ATMEL AT45DB021B (buffered flash) */
212 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215 "Buffered flash (256kB)"},
218 static struct flash_spec flash_5709 = {
219 .flags = BNX2_NV_BUFFERED,
220 .page_bits = BCM5709_FLASH_PAGE_BITS,
221 .page_size = BCM5709_FLASH_PAGE_SIZE,
222 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
223 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
224 .name = "5709 Buffered flash (256kB)",
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_napi *bnapi)
235 /* The ring uses 256 indices for 255 entries, one of them
236 * needs to be skipped.
238 diff = bp->tx_prod - bnapi->tx_cons;
239 if (unlikely(diff >= TX_DESC_CNT)) {
241 if (diff == TX_DESC_CNT)
242 diff = MAX_TX_DESC_CNT;
244 return (bp->tx_ring_size - diff);
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
252 spin_lock_bh(&bp->indirect_lock);
253 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255 spin_unlock_bh(&bp->indirect_lock);
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
262 spin_lock_bh(&bp->indirect_lock);
263 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265 spin_unlock_bh(&bp->indirect_lock);
269 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
272 spin_lock_bh(&bp->indirect_lock);
273 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
276 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
277 REG_WR(bp, BNX2_CTX_CTX_CTRL,
278 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
279 for (i = 0; i < 5; i++) {
281 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
282 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
287 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
288 REG_WR(bp, BNX2_CTX_DATA, val);
290 spin_unlock_bh(&bp->indirect_lock);
294 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
299 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
300 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
303 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
304 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
309 val1 = (bp->phy_addr << 21) | (reg << 16) |
310 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
311 BNX2_EMAC_MDIO_COMM_START_BUSY;
312 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
314 for (i = 0; i < 50; i++) {
317 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
318 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
321 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
328 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
337 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
338 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
341 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
342 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
351 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
356 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
357 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
360 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
366 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
367 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
368 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
369 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
371 for (i = 0; i < 50; i++) {
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
375 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
381 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
386 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
387 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
390 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
391 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
400 bnx2_disable_int(struct bnx2 *bp)
403 struct bnx2_napi *bnapi;
405 for (i = 0; i < bp->irq_nvecs; i++) {
406 bnapi = &bp->bnx2_napi[i];
407 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
408 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
410 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
414 bnx2_enable_int(struct bnx2 *bp)
417 struct bnx2_napi *bnapi;
419 for (i = 0; i < bp->irq_nvecs; i++) {
420 bnapi = &bp->bnx2_napi[i];
422 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
423 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
424 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
425 bnapi->last_status_idx);
427 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
428 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
429 bnapi->last_status_idx);
431 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
435 bnx2_disable_int_sync(struct bnx2 *bp)
439 atomic_inc(&bp->intr_sem);
440 bnx2_disable_int(bp);
441 for (i = 0; i < bp->irq_nvecs; i++)
442 synchronize_irq(bp->irq_tbl[i].vector);
446 bnx2_napi_disable(struct bnx2 *bp)
450 for (i = 0; i < bp->irq_nvecs; i++)
451 napi_disable(&bp->bnx2_napi[i].napi);
455 bnx2_napi_enable(struct bnx2 *bp)
459 for (i = 0; i < bp->irq_nvecs; i++)
460 napi_enable(&bp->bnx2_napi[i].napi);
464 bnx2_netif_stop(struct bnx2 *bp)
466 bnx2_disable_int_sync(bp);
467 if (netif_running(bp->dev)) {
468 bnx2_napi_disable(bp);
469 netif_tx_disable(bp->dev);
470 bp->dev->trans_start = jiffies; /* prevent tx timeout */
475 bnx2_netif_start(struct bnx2 *bp)
477 if (atomic_dec_and_test(&bp->intr_sem)) {
478 if (netif_running(bp->dev)) {
479 netif_wake_queue(bp->dev);
480 bnx2_napi_enable(bp);
487 bnx2_free_mem(struct bnx2 *bp)
491 for (i = 0; i < bp->ctx_pages; i++) {
492 if (bp->ctx_blk[i]) {
493 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
495 bp->ctx_blk_mapping[i]);
496 bp->ctx_blk[i] = NULL;
499 if (bp->status_blk) {
500 pci_free_consistent(bp->pdev, bp->status_stats_size,
501 bp->status_blk, bp->status_blk_mapping);
502 bp->status_blk = NULL;
503 bp->stats_blk = NULL;
505 if (bp->tx_desc_ring) {
506 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
507 bp->tx_desc_ring, bp->tx_desc_mapping);
508 bp->tx_desc_ring = NULL;
510 kfree(bp->tx_buf_ring);
511 bp->tx_buf_ring = NULL;
512 for (i = 0; i < bp->rx_max_ring; i++) {
513 if (bp->rx_desc_ring[i])
514 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
516 bp->rx_desc_mapping[i]);
517 bp->rx_desc_ring[i] = NULL;
519 vfree(bp->rx_buf_ring);
520 bp->rx_buf_ring = NULL;
521 for (i = 0; i < bp->rx_max_pg_ring; i++) {
522 if (bp->rx_pg_desc_ring[i])
523 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
524 bp->rx_pg_desc_ring[i],
525 bp->rx_pg_desc_mapping[i]);
526 bp->rx_pg_desc_ring[i] = NULL;
529 vfree(bp->rx_pg_ring);
530 bp->rx_pg_ring = NULL;
534 bnx2_alloc_mem(struct bnx2 *bp)
536 int i, status_blk_size;
538 bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
539 if (bp->tx_buf_ring == NULL)
542 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
543 &bp->tx_desc_mapping);
544 if (bp->tx_desc_ring == NULL)
547 bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
548 if (bp->rx_buf_ring == NULL)
551 memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
553 for (i = 0; i < bp->rx_max_ring; i++) {
554 bp->rx_desc_ring[i] =
555 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
556 &bp->rx_desc_mapping[i]);
557 if (bp->rx_desc_ring[i] == NULL)
562 if (bp->rx_pg_ring_size) {
563 bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
565 if (bp->rx_pg_ring == NULL)
568 memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
572 for (i = 0; i < bp->rx_max_pg_ring; i++) {
573 bp->rx_pg_desc_ring[i] =
574 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
575 &bp->rx_pg_desc_mapping[i]);
576 if (bp->rx_pg_desc_ring[i] == NULL)
581 /* Combine status and statistics blocks into one allocation. */
582 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
583 if (bp->flags & MSIX_CAP_FLAG)
584 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
585 BNX2_SBLK_MSIX_ALIGN_SIZE);
586 bp->status_stats_size = status_blk_size +
587 sizeof(struct statistics_block);
589 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
590 &bp->status_blk_mapping);
591 if (bp->status_blk == NULL)
594 memset(bp->status_blk, 0, bp->status_stats_size);
596 bp->bnx2_napi[0].status_blk = bp->status_blk;
597 if (bp->flags & MSIX_CAP_FLAG) {
598 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
599 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
601 bnapi->status_blk = (void *)
602 ((unsigned long) bp->status_blk +
603 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
604 bnapi->int_num = i << 24;
608 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
611 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
613 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
614 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
615 if (bp->ctx_pages == 0)
617 for (i = 0; i < bp->ctx_pages; i++) {
618 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
620 &bp->ctx_blk_mapping[i]);
621 if (bp->ctx_blk[i] == NULL)
633 bnx2_report_fw_link(struct bnx2 *bp)
635 u32 fw_link_status = 0;
637 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
643 switch (bp->line_speed) {
645 if (bp->duplex == DUPLEX_HALF)
646 fw_link_status = BNX2_LINK_STATUS_10HALF;
648 fw_link_status = BNX2_LINK_STATUS_10FULL;
651 if (bp->duplex == DUPLEX_HALF)
652 fw_link_status = BNX2_LINK_STATUS_100HALF;
654 fw_link_status = BNX2_LINK_STATUS_100FULL;
657 if (bp->duplex == DUPLEX_HALF)
658 fw_link_status = BNX2_LINK_STATUS_1000HALF;
660 fw_link_status = BNX2_LINK_STATUS_1000FULL;
663 if (bp->duplex == DUPLEX_HALF)
664 fw_link_status = BNX2_LINK_STATUS_2500HALF;
666 fw_link_status = BNX2_LINK_STATUS_2500FULL;
670 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
673 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
675 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
676 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
678 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
679 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
680 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
682 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
686 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
688 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
692 bnx2_xceiver_str(struct bnx2 *bp)
694 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
695 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
700 bnx2_report_link(struct bnx2 *bp)
703 netif_carrier_on(bp->dev);
704 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
705 bnx2_xceiver_str(bp));
707 printk("%d Mbps ", bp->line_speed);
709 if (bp->duplex == DUPLEX_FULL)
710 printk("full duplex");
712 printk("half duplex");
715 if (bp->flow_ctrl & FLOW_CTRL_RX) {
716 printk(", receive ");
717 if (bp->flow_ctrl & FLOW_CTRL_TX)
718 printk("& transmit ");
721 printk(", transmit ");
723 printk("flow control ON");
728 netif_carrier_off(bp->dev);
729 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
730 bnx2_xceiver_str(bp));
733 bnx2_report_fw_link(bp);
737 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
739 u32 local_adv, remote_adv;
742 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
743 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
745 if (bp->duplex == DUPLEX_FULL) {
746 bp->flow_ctrl = bp->req_flow_ctrl;
751 if (bp->duplex != DUPLEX_FULL) {
755 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
756 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
759 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
760 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
761 bp->flow_ctrl |= FLOW_CTRL_TX;
762 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
763 bp->flow_ctrl |= FLOW_CTRL_RX;
767 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
768 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
770 if (bp->phy_flags & PHY_SERDES_FLAG) {
771 u32 new_local_adv = 0;
772 u32 new_remote_adv = 0;
774 if (local_adv & ADVERTISE_1000XPAUSE)
775 new_local_adv |= ADVERTISE_PAUSE_CAP;
776 if (local_adv & ADVERTISE_1000XPSE_ASYM)
777 new_local_adv |= ADVERTISE_PAUSE_ASYM;
778 if (remote_adv & ADVERTISE_1000XPAUSE)
779 new_remote_adv |= ADVERTISE_PAUSE_CAP;
780 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
781 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
783 local_adv = new_local_adv;
784 remote_adv = new_remote_adv;
787 /* See Table 28B-3 of 802.3ab-1999 spec. */
788 if (local_adv & ADVERTISE_PAUSE_CAP) {
789 if(local_adv & ADVERTISE_PAUSE_ASYM) {
790 if (remote_adv & ADVERTISE_PAUSE_CAP) {
791 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
793 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
794 bp->flow_ctrl = FLOW_CTRL_RX;
798 if (remote_adv & ADVERTISE_PAUSE_CAP) {
799 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
803 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
804 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
805 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
807 bp->flow_ctrl = FLOW_CTRL_TX;
813 bnx2_5709s_linkup(struct bnx2 *bp)
819 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
820 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
821 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
823 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
824 bp->line_speed = bp->req_line_speed;
825 bp->duplex = bp->req_duplex;
828 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
830 case MII_BNX2_GP_TOP_AN_SPEED_10:
831 bp->line_speed = SPEED_10;
833 case MII_BNX2_GP_TOP_AN_SPEED_100:
834 bp->line_speed = SPEED_100;
836 case MII_BNX2_GP_TOP_AN_SPEED_1G:
837 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
838 bp->line_speed = SPEED_1000;
840 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
841 bp->line_speed = SPEED_2500;
844 if (val & MII_BNX2_GP_TOP_AN_FD)
845 bp->duplex = DUPLEX_FULL;
847 bp->duplex = DUPLEX_HALF;
852 bnx2_5708s_linkup(struct bnx2 *bp)
857 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
858 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
859 case BCM5708S_1000X_STAT1_SPEED_10:
860 bp->line_speed = SPEED_10;
862 case BCM5708S_1000X_STAT1_SPEED_100:
863 bp->line_speed = SPEED_100;
865 case BCM5708S_1000X_STAT1_SPEED_1G:
866 bp->line_speed = SPEED_1000;
868 case BCM5708S_1000X_STAT1_SPEED_2G5:
869 bp->line_speed = SPEED_2500;
872 if (val & BCM5708S_1000X_STAT1_FD)
873 bp->duplex = DUPLEX_FULL;
875 bp->duplex = DUPLEX_HALF;
881 bnx2_5706s_linkup(struct bnx2 *bp)
883 u32 bmcr, local_adv, remote_adv, common;
886 bp->line_speed = SPEED_1000;
888 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
889 if (bmcr & BMCR_FULLDPLX) {
890 bp->duplex = DUPLEX_FULL;
893 bp->duplex = DUPLEX_HALF;
896 if (!(bmcr & BMCR_ANENABLE)) {
900 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
901 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
903 common = local_adv & remote_adv;
904 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
906 if (common & ADVERTISE_1000XFULL) {
907 bp->duplex = DUPLEX_FULL;
910 bp->duplex = DUPLEX_HALF;
918 bnx2_copper_linkup(struct bnx2 *bp)
922 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
923 if (bmcr & BMCR_ANENABLE) {
924 u32 local_adv, remote_adv, common;
926 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
927 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
929 common = local_adv & (remote_adv >> 2);
930 if (common & ADVERTISE_1000FULL) {
931 bp->line_speed = SPEED_1000;
932 bp->duplex = DUPLEX_FULL;
934 else if (common & ADVERTISE_1000HALF) {
935 bp->line_speed = SPEED_1000;
936 bp->duplex = DUPLEX_HALF;
939 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
940 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
942 common = local_adv & remote_adv;
943 if (common & ADVERTISE_100FULL) {
944 bp->line_speed = SPEED_100;
945 bp->duplex = DUPLEX_FULL;
947 else if (common & ADVERTISE_100HALF) {
948 bp->line_speed = SPEED_100;
949 bp->duplex = DUPLEX_HALF;
951 else if (common & ADVERTISE_10FULL) {
952 bp->line_speed = SPEED_10;
953 bp->duplex = DUPLEX_FULL;
955 else if (common & ADVERTISE_10HALF) {
956 bp->line_speed = SPEED_10;
957 bp->duplex = DUPLEX_HALF;
966 if (bmcr & BMCR_SPEED100) {
967 bp->line_speed = SPEED_100;
970 bp->line_speed = SPEED_10;
972 if (bmcr & BMCR_FULLDPLX) {
973 bp->duplex = DUPLEX_FULL;
976 bp->duplex = DUPLEX_HALF;
984 bnx2_set_mac_link(struct bnx2 *bp)
988 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
989 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
990 (bp->duplex == DUPLEX_HALF)) {
991 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
994 /* Configure the EMAC mode register. */
995 val = REG_RD(bp, BNX2_EMAC_MODE);
997 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
998 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
999 BNX2_EMAC_MODE_25G_MODE);
1002 switch (bp->line_speed) {
1004 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1005 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1010 val |= BNX2_EMAC_MODE_PORT_MII;
1013 val |= BNX2_EMAC_MODE_25G_MODE;
1016 val |= BNX2_EMAC_MODE_PORT_GMII;
1021 val |= BNX2_EMAC_MODE_PORT_GMII;
1024 /* Set the MAC to operate in the appropriate duplex mode. */
1025 if (bp->duplex == DUPLEX_HALF)
1026 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1027 REG_WR(bp, BNX2_EMAC_MODE, val);
1029 /* Enable/disable rx PAUSE. */
1030 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1032 if (bp->flow_ctrl & FLOW_CTRL_RX)
1033 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1034 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1036 /* Enable/disable tx PAUSE. */
1037 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1038 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1040 if (bp->flow_ctrl & FLOW_CTRL_TX)
1041 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1042 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1044 /* Acknowledge the interrupt. */
1045 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1051 bnx2_enable_bmsr1(struct bnx2 *bp)
1053 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1054 (CHIP_NUM(bp) == CHIP_NUM_5709))
1055 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1056 MII_BNX2_BLK_ADDR_GP_STATUS);
1060 bnx2_disable_bmsr1(struct bnx2 *bp)
1062 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1063 (CHIP_NUM(bp) == CHIP_NUM_5709))
1064 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1065 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1069 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1074 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1077 if (bp->autoneg & AUTONEG_SPEED)
1078 bp->advertising |= ADVERTISED_2500baseX_Full;
1080 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1081 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1083 bnx2_read_phy(bp, bp->mii_up1, &up1);
1084 if (!(up1 & BCM5708S_UP1_2G5)) {
1085 up1 |= BCM5708S_UP1_2G5;
1086 bnx2_write_phy(bp, bp->mii_up1, up1);
1090 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1091 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1092 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1098 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1103 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1106 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1107 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1109 bnx2_read_phy(bp, bp->mii_up1, &up1);
1110 if (up1 & BCM5708S_UP1_2G5) {
1111 up1 &= ~BCM5708S_UP1_2G5;
1112 bnx2_write_phy(bp, bp->mii_up1, up1);
1116 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1117 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1118 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1124 bnx2_enable_forced_2g5(struct bnx2 *bp)
1128 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1131 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1134 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1135 MII_BNX2_BLK_ADDR_SERDES_DIG);
1136 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1137 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1138 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1139 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1141 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1142 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1143 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1145 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1146 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1147 bmcr |= BCM5708S_BMCR_FORCE_2500;
1150 if (bp->autoneg & AUTONEG_SPEED) {
1151 bmcr &= ~BMCR_ANENABLE;
1152 if (bp->req_duplex == DUPLEX_FULL)
1153 bmcr |= BMCR_FULLDPLX;
1155 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1159 bnx2_disable_forced_2g5(struct bnx2 *bp)
1163 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1166 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1169 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1170 MII_BNX2_BLK_ADDR_SERDES_DIG);
1171 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1172 val &= ~MII_BNX2_SD_MISC1_FORCE;
1173 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1175 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1176 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1177 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1179 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1180 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1181 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1184 if (bp->autoneg & AUTONEG_SPEED)
1185 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1186 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1190 bnx2_set_link(struct bnx2 *bp)
1195 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1200 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1203 link_up = bp->link_up;
1205 bnx2_enable_bmsr1(bp);
1206 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1207 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1208 bnx2_disable_bmsr1(bp);
1210 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1211 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1214 val = REG_RD(bp, BNX2_EMAC_STATUS);
1215 if (val & BNX2_EMAC_STATUS_LINK)
1216 bmsr |= BMSR_LSTATUS;
1218 bmsr &= ~BMSR_LSTATUS;
1221 if (bmsr & BMSR_LSTATUS) {
1224 if (bp->phy_flags & PHY_SERDES_FLAG) {
1225 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1226 bnx2_5706s_linkup(bp);
1227 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1228 bnx2_5708s_linkup(bp);
1229 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1230 bnx2_5709s_linkup(bp);
1233 bnx2_copper_linkup(bp);
1235 bnx2_resolve_flow_ctrl(bp);
1238 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1239 (bp->autoneg & AUTONEG_SPEED))
1240 bnx2_disable_forced_2g5(bp);
1242 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1246 if (bp->link_up != link_up) {
1247 bnx2_report_link(bp);
1250 bnx2_set_mac_link(bp);
1256 bnx2_reset_phy(struct bnx2 *bp)
1261 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1263 #define PHY_RESET_MAX_WAIT 100
1264 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1267 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1268 if (!(reg & BMCR_RESET)) {
1273 if (i == PHY_RESET_MAX_WAIT) {
1280 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1284 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1285 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1287 if (bp->phy_flags & PHY_SERDES_FLAG) {
1288 adv = ADVERTISE_1000XPAUSE;
1291 adv = ADVERTISE_PAUSE_CAP;
1294 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1295 if (bp->phy_flags & PHY_SERDES_FLAG) {
1296 adv = ADVERTISE_1000XPSE_ASYM;
1299 adv = ADVERTISE_PAUSE_ASYM;
1302 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1303 if (bp->phy_flags & PHY_SERDES_FLAG) {
1304 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1307 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1313 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1316 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1318 u32 speed_arg = 0, pause_adv;
1320 pause_adv = bnx2_phy_get_pause_adv(bp);
1322 if (bp->autoneg & AUTONEG_SPEED) {
1323 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1324 if (bp->advertising & ADVERTISED_10baseT_Half)
1325 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1326 if (bp->advertising & ADVERTISED_10baseT_Full)
1327 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1328 if (bp->advertising & ADVERTISED_100baseT_Half)
1329 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1330 if (bp->advertising & ADVERTISED_100baseT_Full)
1331 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1332 if (bp->advertising & ADVERTISED_1000baseT_Full)
1333 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1334 if (bp->advertising & ADVERTISED_2500baseX_Full)
1335 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1337 if (bp->req_line_speed == SPEED_2500)
1338 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1339 else if (bp->req_line_speed == SPEED_1000)
1340 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1341 else if (bp->req_line_speed == SPEED_100) {
1342 if (bp->req_duplex == DUPLEX_FULL)
1343 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1345 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1346 } else if (bp->req_line_speed == SPEED_10) {
1347 if (bp->req_duplex == DUPLEX_FULL)
1348 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1350 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1354 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1355 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1356 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1357 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1359 if (port == PORT_TP)
1360 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1361 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1363 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1365 spin_unlock_bh(&bp->phy_lock);
1366 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1367 spin_lock_bh(&bp->phy_lock);
1373 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1378 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1379 return (bnx2_setup_remote_phy(bp, port));
1381 if (!(bp->autoneg & AUTONEG_SPEED)) {
1383 int force_link_down = 0;
1385 if (bp->req_line_speed == SPEED_2500) {
1386 if (!bnx2_test_and_enable_2g5(bp))
1387 force_link_down = 1;
1388 } else if (bp->req_line_speed == SPEED_1000) {
1389 if (bnx2_test_and_disable_2g5(bp))
1390 force_link_down = 1;
1392 bnx2_read_phy(bp, bp->mii_adv, &adv);
1393 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1395 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1396 new_bmcr = bmcr & ~BMCR_ANENABLE;
1397 new_bmcr |= BMCR_SPEED1000;
1399 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1400 if (bp->req_line_speed == SPEED_2500)
1401 bnx2_enable_forced_2g5(bp);
1402 else if (bp->req_line_speed == SPEED_1000) {
1403 bnx2_disable_forced_2g5(bp);
1404 new_bmcr &= ~0x2000;
1407 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1408 if (bp->req_line_speed == SPEED_2500)
1409 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1411 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1414 if (bp->req_duplex == DUPLEX_FULL) {
1415 adv |= ADVERTISE_1000XFULL;
1416 new_bmcr |= BMCR_FULLDPLX;
1419 adv |= ADVERTISE_1000XHALF;
1420 new_bmcr &= ~BMCR_FULLDPLX;
1422 if ((new_bmcr != bmcr) || (force_link_down)) {
1423 /* Force a link down visible on the other side */
1425 bnx2_write_phy(bp, bp->mii_adv, adv &
1426 ~(ADVERTISE_1000XFULL |
1427 ADVERTISE_1000XHALF));
1428 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1429 BMCR_ANRESTART | BMCR_ANENABLE);
1432 netif_carrier_off(bp->dev);
1433 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1434 bnx2_report_link(bp);
1436 bnx2_write_phy(bp, bp->mii_adv, adv);
1437 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1439 bnx2_resolve_flow_ctrl(bp);
1440 bnx2_set_mac_link(bp);
1445 bnx2_test_and_enable_2g5(bp);
1447 if (bp->advertising & ADVERTISED_1000baseT_Full)
1448 new_adv |= ADVERTISE_1000XFULL;
1450 new_adv |= bnx2_phy_get_pause_adv(bp);
1452 bnx2_read_phy(bp, bp->mii_adv, &adv);
1453 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1455 bp->serdes_an_pending = 0;
1456 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1457 /* Force a link down visible on the other side */
1459 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1460 spin_unlock_bh(&bp->phy_lock);
1462 spin_lock_bh(&bp->phy_lock);
1465 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1466 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1468 /* Speed up link-up time when the link partner
1469 * does not autonegotiate which is very common
1470 * in blade servers. Some blade servers use
1471 * IPMI for kerboard input and it's important
1472 * to minimize link disruptions. Autoneg. involves
1473 * exchanging base pages plus 3 next pages and
1474 * normally completes in about 120 msec.
1476 bp->current_interval = SERDES_AN_TIMEOUT;
1477 bp->serdes_an_pending = 1;
1478 mod_timer(&bp->timer, jiffies + bp->current_interval);
1480 bnx2_resolve_flow_ctrl(bp);
1481 bnx2_set_mac_link(bp);
1487 #define ETHTOOL_ALL_FIBRE_SPEED \
1488 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1489 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1490 (ADVERTISED_1000baseT_Full)
1492 #define ETHTOOL_ALL_COPPER_SPEED \
1493 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1494 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1495 ADVERTISED_1000baseT_Full)
1497 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1498 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1500 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1503 bnx2_set_default_remote_link(struct bnx2 *bp)
1507 if (bp->phy_port == PORT_TP)
1508 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1510 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1512 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1513 bp->req_line_speed = 0;
1514 bp->autoneg |= AUTONEG_SPEED;
1515 bp->advertising = ADVERTISED_Autoneg;
1516 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1517 bp->advertising |= ADVERTISED_10baseT_Half;
1518 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1519 bp->advertising |= ADVERTISED_10baseT_Full;
1520 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1521 bp->advertising |= ADVERTISED_100baseT_Half;
1522 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1523 bp->advertising |= ADVERTISED_100baseT_Full;
1524 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1525 bp->advertising |= ADVERTISED_1000baseT_Full;
1526 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1527 bp->advertising |= ADVERTISED_2500baseX_Full;
1530 bp->advertising = 0;
1531 bp->req_duplex = DUPLEX_FULL;
1532 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1533 bp->req_line_speed = SPEED_10;
1534 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1535 bp->req_duplex = DUPLEX_HALF;
1537 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1538 bp->req_line_speed = SPEED_100;
1539 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1540 bp->req_duplex = DUPLEX_HALF;
1542 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1543 bp->req_line_speed = SPEED_1000;
1544 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1545 bp->req_line_speed = SPEED_2500;
1550 bnx2_set_default_link(struct bnx2 *bp)
1552 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1553 return bnx2_set_default_remote_link(bp);
1555 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1556 bp->req_line_speed = 0;
1557 if (bp->phy_flags & PHY_SERDES_FLAG) {
1560 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1562 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1563 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1564 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1566 bp->req_line_speed = bp->line_speed = SPEED_1000;
1567 bp->req_duplex = DUPLEX_FULL;
1570 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1574 bnx2_send_heart_beat(struct bnx2 *bp)
1579 spin_lock(&bp->indirect_lock);
1580 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1581 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1582 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1583 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1584 spin_unlock(&bp->indirect_lock);
1588 bnx2_remote_phy_event(struct bnx2 *bp)
1591 u8 link_up = bp->link_up;
1594 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1596 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1597 bnx2_send_heart_beat(bp);
1599 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1601 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1607 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1608 bp->duplex = DUPLEX_FULL;
1610 case BNX2_LINK_STATUS_10HALF:
1611 bp->duplex = DUPLEX_HALF;
1612 case BNX2_LINK_STATUS_10FULL:
1613 bp->line_speed = SPEED_10;
1615 case BNX2_LINK_STATUS_100HALF:
1616 bp->duplex = DUPLEX_HALF;
1617 case BNX2_LINK_STATUS_100BASE_T4:
1618 case BNX2_LINK_STATUS_100FULL:
1619 bp->line_speed = SPEED_100;
1621 case BNX2_LINK_STATUS_1000HALF:
1622 bp->duplex = DUPLEX_HALF;
1623 case BNX2_LINK_STATUS_1000FULL:
1624 bp->line_speed = SPEED_1000;
1626 case BNX2_LINK_STATUS_2500HALF:
1627 bp->duplex = DUPLEX_HALF;
1628 case BNX2_LINK_STATUS_2500FULL:
1629 bp->line_speed = SPEED_2500;
1636 spin_lock(&bp->phy_lock);
1638 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1639 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1640 if (bp->duplex == DUPLEX_FULL)
1641 bp->flow_ctrl = bp->req_flow_ctrl;
1643 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1644 bp->flow_ctrl |= FLOW_CTRL_TX;
1645 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1646 bp->flow_ctrl |= FLOW_CTRL_RX;
1649 old_port = bp->phy_port;
1650 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1651 bp->phy_port = PORT_FIBRE;
1653 bp->phy_port = PORT_TP;
1655 if (old_port != bp->phy_port)
1656 bnx2_set_default_link(bp);
1658 spin_unlock(&bp->phy_lock);
1660 if (bp->link_up != link_up)
1661 bnx2_report_link(bp);
1663 bnx2_set_mac_link(bp);
1667 bnx2_set_remote_link(struct bnx2 *bp)
1671 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1673 case BNX2_FW_EVT_CODE_LINK_EVENT:
1674 bnx2_remote_phy_event(bp);
1676 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1678 bnx2_send_heart_beat(bp);
1685 bnx2_setup_copper_phy(struct bnx2 *bp)
1690 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1692 if (bp->autoneg & AUTONEG_SPEED) {
1693 u32 adv_reg, adv1000_reg;
1694 u32 new_adv_reg = 0;
1695 u32 new_adv1000_reg = 0;
1697 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1698 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1699 ADVERTISE_PAUSE_ASYM);
1701 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1702 adv1000_reg &= PHY_ALL_1000_SPEED;
1704 if (bp->advertising & ADVERTISED_10baseT_Half)
1705 new_adv_reg |= ADVERTISE_10HALF;
1706 if (bp->advertising & ADVERTISED_10baseT_Full)
1707 new_adv_reg |= ADVERTISE_10FULL;
1708 if (bp->advertising & ADVERTISED_100baseT_Half)
1709 new_adv_reg |= ADVERTISE_100HALF;
1710 if (bp->advertising & ADVERTISED_100baseT_Full)
1711 new_adv_reg |= ADVERTISE_100FULL;
1712 if (bp->advertising & ADVERTISED_1000baseT_Full)
1713 new_adv1000_reg |= ADVERTISE_1000FULL;
1715 new_adv_reg |= ADVERTISE_CSMA;
1717 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1719 if ((adv1000_reg != new_adv1000_reg) ||
1720 (adv_reg != new_adv_reg) ||
1721 ((bmcr & BMCR_ANENABLE) == 0)) {
1723 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1724 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1725 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1728 else if (bp->link_up) {
1729 /* Flow ctrl may have changed from auto to forced */
1730 /* or vice-versa. */
1732 bnx2_resolve_flow_ctrl(bp);
1733 bnx2_set_mac_link(bp);
1739 if (bp->req_line_speed == SPEED_100) {
1740 new_bmcr |= BMCR_SPEED100;
1742 if (bp->req_duplex == DUPLEX_FULL) {
1743 new_bmcr |= BMCR_FULLDPLX;
1745 if (new_bmcr != bmcr) {
1748 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1749 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1751 if (bmsr & BMSR_LSTATUS) {
1752 /* Force link down */
1753 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1754 spin_unlock_bh(&bp->phy_lock);
1756 spin_lock_bh(&bp->phy_lock);
1758 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1759 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1762 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1764 /* Normally, the new speed is setup after the link has
1765 * gone down and up again. In some cases, link will not go
1766 * down so we need to set up the new speed here.
1768 if (bmsr & BMSR_LSTATUS) {
1769 bp->line_speed = bp->req_line_speed;
1770 bp->duplex = bp->req_duplex;
1771 bnx2_resolve_flow_ctrl(bp);
1772 bnx2_set_mac_link(bp);
1775 bnx2_resolve_flow_ctrl(bp);
1776 bnx2_set_mac_link(bp);
1782 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1784 if (bp->loopback == MAC_LOOPBACK)
1787 if (bp->phy_flags & PHY_SERDES_FLAG) {
1788 return (bnx2_setup_serdes_phy(bp, port));
1791 return (bnx2_setup_copper_phy(bp));
1796 bnx2_init_5709s_phy(struct bnx2 *bp)
1800 bp->mii_bmcr = MII_BMCR + 0x10;
1801 bp->mii_bmsr = MII_BMSR + 0x10;
1802 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1803 bp->mii_adv = MII_ADVERTISE + 0x10;
1804 bp->mii_lpa = MII_LPA + 0x10;
1805 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1807 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1808 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1810 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1813 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1815 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1816 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1817 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1818 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1820 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1821 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1822 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1823 val |= BCM5708S_UP1_2G5;
1825 val &= ~BCM5708S_UP1_2G5;
1826 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1828 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1829 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1830 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1831 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1833 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1835 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1836 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1837 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1839 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1845 bnx2_init_5708s_phy(struct bnx2 *bp)
1851 bp->mii_up1 = BCM5708S_UP1;
1853 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1854 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1855 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1857 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1858 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1859 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1861 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1862 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1863 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1865 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1866 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1867 val |= BCM5708S_UP1_2G5;
1868 bnx2_write_phy(bp, BCM5708S_UP1, val);
1871 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1872 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1873 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1874 /* increase tx signal amplitude */
1875 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1876 BCM5708S_BLK_ADDR_TX_MISC);
1877 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1878 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1879 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1880 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1883 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1884 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1889 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1890 BNX2_SHARED_HW_CFG_CONFIG);
1891 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1892 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1893 BCM5708S_BLK_ADDR_TX_MISC);
1894 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1895 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1896 BCM5708S_BLK_ADDR_DIG);
1903 bnx2_init_5706s_phy(struct bnx2 *bp)
1907 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1909 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1910 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1912 if (bp->dev->mtu > 1500) {
1915 /* Set extended packet length bit */
1916 bnx2_write_phy(bp, 0x18, 0x7);
1917 bnx2_read_phy(bp, 0x18, &val);
1918 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1920 bnx2_write_phy(bp, 0x1c, 0x6c00);
1921 bnx2_read_phy(bp, 0x1c, &val);
1922 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1927 bnx2_write_phy(bp, 0x18, 0x7);
1928 bnx2_read_phy(bp, 0x18, &val);
1929 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1931 bnx2_write_phy(bp, 0x1c, 0x6c00);
1932 bnx2_read_phy(bp, 0x1c, &val);
1933 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1940 bnx2_init_copper_phy(struct bnx2 *bp)
1946 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1947 bnx2_write_phy(bp, 0x18, 0x0c00);
1948 bnx2_write_phy(bp, 0x17, 0x000a);
1949 bnx2_write_phy(bp, 0x15, 0x310b);
1950 bnx2_write_phy(bp, 0x17, 0x201f);
1951 bnx2_write_phy(bp, 0x15, 0x9506);
1952 bnx2_write_phy(bp, 0x17, 0x401f);
1953 bnx2_write_phy(bp, 0x15, 0x14e2);
1954 bnx2_write_phy(bp, 0x18, 0x0400);
1957 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1958 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1959 MII_BNX2_DSP_EXPAND_REG | 0x8);
1960 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1962 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1965 if (bp->dev->mtu > 1500) {
1966 /* Set extended packet length bit */
1967 bnx2_write_phy(bp, 0x18, 0x7);
1968 bnx2_read_phy(bp, 0x18, &val);
1969 bnx2_write_phy(bp, 0x18, val | 0x4000);
1971 bnx2_read_phy(bp, 0x10, &val);
1972 bnx2_write_phy(bp, 0x10, val | 0x1);
1975 bnx2_write_phy(bp, 0x18, 0x7);
1976 bnx2_read_phy(bp, 0x18, &val);
1977 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1979 bnx2_read_phy(bp, 0x10, &val);
1980 bnx2_write_phy(bp, 0x10, val & ~0x1);
1983 /* ethernet@wirespeed */
1984 bnx2_write_phy(bp, 0x18, 0x7007);
1985 bnx2_read_phy(bp, 0x18, &val);
1986 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1992 bnx2_init_phy(struct bnx2 *bp)
1997 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1998 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
2000 bp->mii_bmcr = MII_BMCR;
2001 bp->mii_bmsr = MII_BMSR;
2002 bp->mii_bmsr1 = MII_BMSR;
2003 bp->mii_adv = MII_ADVERTISE;
2004 bp->mii_lpa = MII_LPA;
2006 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2008 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
2011 bnx2_read_phy(bp, MII_PHYSID1, &val);
2012 bp->phy_id = val << 16;
2013 bnx2_read_phy(bp, MII_PHYSID2, &val);
2014 bp->phy_id |= val & 0xffff;
2016 if (bp->phy_flags & PHY_SERDES_FLAG) {
2017 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2018 rc = bnx2_init_5706s_phy(bp);
2019 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2020 rc = bnx2_init_5708s_phy(bp);
2021 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2022 rc = bnx2_init_5709s_phy(bp);
2025 rc = bnx2_init_copper_phy(bp);
2030 rc = bnx2_setup_phy(bp, bp->phy_port);
2036 bnx2_set_mac_loopback(struct bnx2 *bp)
2040 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2041 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2042 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2043 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2048 static int bnx2_test_link(struct bnx2 *);
2051 bnx2_set_phy_loopback(struct bnx2 *bp)
2056 spin_lock_bh(&bp->phy_lock);
2057 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2059 spin_unlock_bh(&bp->phy_lock);
2063 for (i = 0; i < 10; i++) {
2064 if (bnx2_test_link(bp) == 0)
2069 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2070 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2071 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2072 BNX2_EMAC_MODE_25G_MODE);
2074 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2075 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2081 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2087 msg_data |= bp->fw_wr_seq;
2089 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2091 /* wait for an acknowledgement. */
2092 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2095 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
2097 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2100 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2103 /* If we timed out, inform the firmware that this is the case. */
2104 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2106 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2109 msg_data &= ~BNX2_DRV_MSG_CODE;
2110 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2112 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2117 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2124 bnx2_init_5709_context(struct bnx2 *bp)
2129 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2130 val |= (BCM_PAGE_BITS - 8) << 16;
2131 REG_WR(bp, BNX2_CTX_COMMAND, val);
2132 for (i = 0; i < 10; i++) {
2133 val = REG_RD(bp, BNX2_CTX_COMMAND);
2134 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2138 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2141 for (i = 0; i < bp->ctx_pages; i++) {
2144 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2145 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2146 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2147 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2148 (u64) bp->ctx_blk_mapping[i] >> 32);
2149 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2150 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2151 for (j = 0; j < 10; j++) {
2153 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2154 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2158 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2167 bnx2_init_context(struct bnx2 *bp)
2173 u32 vcid_addr, pcid_addr, offset;
2178 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2181 vcid_addr = GET_PCID_ADDR(vcid);
2183 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2188 pcid_addr = GET_PCID_ADDR(new_vcid);
2191 vcid_addr = GET_CID_ADDR(vcid);
2192 pcid_addr = vcid_addr;
2195 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2196 vcid_addr += (i << PHY_CTX_SHIFT);
2197 pcid_addr += (i << PHY_CTX_SHIFT);
2199 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2200 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2202 /* Zero out the context. */
2203 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2204 CTX_WR(bp, vcid_addr, offset, 0);
2210 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2216 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2217 if (good_mbuf == NULL) {
2218 printk(KERN_ERR PFX "Failed to allocate memory in "
2219 "bnx2_alloc_bad_rbuf\n");
2223 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2224 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2228 /* Allocate a bunch of mbufs and save the good ones in an array. */
2229 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2230 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2231 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2233 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2235 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2237 /* The addresses with Bit 9 set are bad memory blocks. */
2238 if (!(val & (1 << 9))) {
2239 good_mbuf[good_mbuf_cnt] = (u16) val;
2243 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2246 /* Free the good ones back to the mbuf pool thus discarding
2247 * all the bad ones. */
2248 while (good_mbuf_cnt) {
2251 val = good_mbuf[good_mbuf_cnt];
2252 val = (val << 9) | val | 1;
2254 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2261 bnx2_set_mac_addr(struct bnx2 *bp)
2264 u8 *mac_addr = bp->dev->dev_addr;
2266 val = (mac_addr[0] << 8) | mac_addr[1];
2268 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2270 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2271 (mac_addr[4] << 8) | mac_addr[5];
2273 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2277 bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
2280 struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2281 struct rx_bd *rxbd =
2282 &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2283 struct page *page = alloc_page(GFP_ATOMIC);
2287 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2288 PCI_DMA_FROMDEVICE);
2290 pci_unmap_addr_set(rx_pg, mapping, mapping);
2291 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2292 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2297 bnx2_free_rx_page(struct bnx2 *bp, u16 index)
2299 struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2300 struct page *page = rx_pg->page;
2305 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2306 PCI_DMA_FROMDEVICE);
2313 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, u16 index)
2315 struct sk_buff *skb;
2316 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2318 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2319 unsigned long align;
2321 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2326 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2327 skb_reserve(skb, BNX2_RX_ALIGN - align);
2329 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2330 PCI_DMA_FROMDEVICE);
2333 pci_unmap_addr_set(rx_buf, mapping, mapping);
2335 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2336 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2338 bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2344 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2346 struct status_block *sblk = bnapi->status_blk;
2347 u32 new_link_state, old_link_state;
2350 new_link_state = sblk->status_attn_bits & event;
2351 old_link_state = sblk->status_attn_bits_ack & event;
2352 if (new_link_state != old_link_state) {
2354 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2356 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2364 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2366 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE)) {
2367 spin_lock(&bp->phy_lock);
2369 spin_unlock(&bp->phy_lock);
2371 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2372 bnx2_set_remote_link(bp);
2377 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2381 if (bnapi->int_num == 0)
2382 cons = bnapi->status_blk->status_tx_quick_consumer_index0;
2384 cons = bnapi->status_blk_msix->status_tx_quick_consumer_index;
2386 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2392 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2394 u16 hw_cons, sw_cons, sw_ring_cons;
2396 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2397 sw_cons = bnapi->tx_cons;
2399 while (sw_cons != hw_cons) {
2400 struct sw_bd *tx_buf;
2401 struct sk_buff *skb;
2404 sw_ring_cons = TX_RING_IDX(sw_cons);
2406 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2409 /* partial BD completions possible with TSO packets */
2410 if (skb_is_gso(skb)) {
2411 u16 last_idx, last_ring_idx;
2413 last_idx = sw_cons +
2414 skb_shinfo(skb)->nr_frags + 1;
2415 last_ring_idx = sw_ring_cons +
2416 skb_shinfo(skb)->nr_frags + 1;
2417 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2420 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2425 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2426 skb_headlen(skb), PCI_DMA_TODEVICE);
2429 last = skb_shinfo(skb)->nr_frags;
2431 for (i = 0; i < last; i++) {
2432 sw_cons = NEXT_TX_BD(sw_cons);
2434 pci_unmap_page(bp->pdev,
2436 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2438 skb_shinfo(skb)->frags[i].size,
2442 sw_cons = NEXT_TX_BD(sw_cons);
2446 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2449 bnapi->hw_tx_cons = hw_cons;
2450 bnapi->tx_cons = sw_cons;
2451 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2452 * before checking for netif_queue_stopped(). Without the
2453 * memory barrier, there is a small possibility that bnx2_start_xmit()
2454 * will miss it and cause the queue to be stopped forever.
2458 if (unlikely(netif_queue_stopped(bp->dev)) &&
2459 (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)) {
2460 netif_tx_lock(bp->dev);
2461 if ((netif_queue_stopped(bp->dev)) &&
2462 (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh))
2463 netif_wake_queue(bp->dev);
2464 netif_tx_unlock(bp->dev);
2469 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_napi *bnapi,
2470 struct sk_buff *skb, int count)
2472 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2473 struct rx_bd *cons_bd, *prod_bd;
2476 u16 hw_prod = bnapi->rx_pg_prod, prod;
2477 u16 cons = bnapi->rx_pg_cons;
2479 for (i = 0; i < count; i++) {
2480 prod = RX_PG_RING_IDX(hw_prod);
2482 prod_rx_pg = &bp->rx_pg_ring[prod];
2483 cons_rx_pg = &bp->rx_pg_ring[cons];
2484 cons_bd = &bp->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2485 prod_bd = &bp->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2487 if (i == 0 && skb) {
2489 struct skb_shared_info *shinfo;
2491 shinfo = skb_shinfo(skb);
2493 page = shinfo->frags[shinfo->nr_frags].page;
2494 shinfo->frags[shinfo->nr_frags].page = NULL;
2495 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2496 PCI_DMA_FROMDEVICE);
2497 cons_rx_pg->page = page;
2498 pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2502 prod_rx_pg->page = cons_rx_pg->page;
2503 cons_rx_pg->page = NULL;
2504 pci_unmap_addr_set(prod_rx_pg, mapping,
2505 pci_unmap_addr(cons_rx_pg, mapping));
2507 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2508 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2511 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2512 hw_prod = NEXT_RX_BD(hw_prod);
2514 bnapi->rx_pg_prod = hw_prod;
2515 bnapi->rx_pg_cons = cons;
2519 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2522 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2523 struct rx_bd *cons_bd, *prod_bd;
2525 cons_rx_buf = &bp->rx_buf_ring[cons];
2526 prod_rx_buf = &bp->rx_buf_ring[prod];
2528 pci_dma_sync_single_for_device(bp->pdev,
2529 pci_unmap_addr(cons_rx_buf, mapping),
2530 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2532 bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2534 prod_rx_buf->skb = skb;
2539 pci_unmap_addr_set(prod_rx_buf, mapping,
2540 pci_unmap_addr(cons_rx_buf, mapping));
2542 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2543 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2544 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2545 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2549 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2550 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2554 u16 prod = ring_idx & 0xffff;
2556 err = bnx2_alloc_rx_skb(bp, bnapi, prod);
2557 if (unlikely(err)) {
2558 bnx2_reuse_rx_skb(bp, bnapi, skb, (u16) (ring_idx >> 16), prod);
2560 unsigned int raw_len = len + 4;
2561 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2563 bnx2_reuse_rx_skb_pages(bp, bnapi, NULL, pages);
2568 skb_reserve(skb, bp->rx_offset);
2569 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2570 PCI_DMA_FROMDEVICE);
2576 unsigned int i, frag_len, frag_size, pages;
2577 struct sw_pg *rx_pg;
2578 u16 pg_cons = bnapi->rx_pg_cons;
2579 u16 pg_prod = bnapi->rx_pg_prod;
2581 frag_size = len + 4 - hdr_len;
2582 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2583 skb_put(skb, hdr_len);
2585 for (i = 0; i < pages; i++) {
2586 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2587 if (unlikely(frag_len <= 4)) {
2588 unsigned int tail = 4 - frag_len;
2590 bnapi->rx_pg_cons = pg_cons;
2591 bnapi->rx_pg_prod = pg_prod;
2592 bnx2_reuse_rx_skb_pages(bp, bnapi, NULL,
2599 &skb_shinfo(skb)->frags[i - 1];
2601 skb->data_len -= tail;
2602 skb->truesize -= tail;
2606 rx_pg = &bp->rx_pg_ring[pg_cons];
2608 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2609 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2614 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2617 err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod));
2618 if (unlikely(err)) {
2619 bnapi->rx_pg_cons = pg_cons;
2620 bnapi->rx_pg_prod = pg_prod;
2621 bnx2_reuse_rx_skb_pages(bp, bnapi, skb,
2626 frag_size -= frag_len;
2627 skb->data_len += frag_len;
2628 skb->truesize += frag_len;
2629 skb->len += frag_len;
2631 pg_prod = NEXT_RX_BD(pg_prod);
2632 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2634 bnapi->rx_pg_prod = pg_prod;
2635 bnapi->rx_pg_cons = pg_cons;
2641 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2643 u16 cons = bnapi->status_blk->status_rx_quick_consumer_index0;
2645 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2651 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2653 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2654 struct l2_fhdr *rx_hdr;
2655 int rx_pkt = 0, pg_ring_used = 0;
2657 hw_cons = bnx2_get_hw_rx_cons(bnapi);
2658 sw_cons = bnapi->rx_cons;
2659 sw_prod = bnapi->rx_prod;
2661 /* Memory barrier necessary as speculative reads of the rx
2662 * buffer can be ahead of the index in the status block
2665 while (sw_cons != hw_cons) {
2666 unsigned int len, hdr_len;
2668 struct sw_bd *rx_buf;
2669 struct sk_buff *skb;
2670 dma_addr_t dma_addr;
2672 sw_ring_cons = RX_RING_IDX(sw_cons);
2673 sw_ring_prod = RX_RING_IDX(sw_prod);
2675 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2680 dma_addr = pci_unmap_addr(rx_buf, mapping);
2682 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2683 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2685 rx_hdr = (struct l2_fhdr *) skb->data;
2686 len = rx_hdr->l2_fhdr_pkt_len;
2688 if ((status = rx_hdr->l2_fhdr_status) &
2689 (L2_FHDR_ERRORS_BAD_CRC |
2690 L2_FHDR_ERRORS_PHY_DECODE |
2691 L2_FHDR_ERRORS_ALIGNMENT |
2692 L2_FHDR_ERRORS_TOO_SHORT |
2693 L2_FHDR_ERRORS_GIANT_FRAME)) {
2695 bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2700 if (status & L2_FHDR_STATUS_SPLIT) {
2701 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2703 } else if (len > bp->rx_jumbo_thresh) {
2704 hdr_len = bp->rx_jumbo_thresh;
2710 if (len <= bp->rx_copy_thresh) {
2711 struct sk_buff *new_skb;
2713 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2714 if (new_skb == NULL) {
2715 bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2721 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2722 new_skb->data, len + 2);
2723 skb_reserve(new_skb, 2);
2724 skb_put(new_skb, len);
2726 bnx2_reuse_rx_skb(bp, bnapi, skb,
2727 sw_ring_cons, sw_ring_prod);
2730 } else if (unlikely(bnx2_rx_skb(bp, bnapi, skb, len, hdr_len,
2731 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2734 skb->protocol = eth_type_trans(skb, bp->dev);
2736 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2737 (ntohs(skb->protocol) != 0x8100)) {
2744 skb->ip_summed = CHECKSUM_NONE;
2746 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2747 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2749 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2750 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2751 skb->ip_summed = CHECKSUM_UNNECESSARY;
2755 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2756 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2757 rx_hdr->l2_fhdr_vlan_tag);
2761 netif_receive_skb(skb);
2763 bp->dev->last_rx = jiffies;
2767 sw_cons = NEXT_RX_BD(sw_cons);
2768 sw_prod = NEXT_RX_BD(sw_prod);
2770 if ((rx_pkt == budget))
2773 /* Refresh hw_cons to see if there is new work */
2774 if (sw_cons == hw_cons) {
2775 hw_cons = bnx2_get_hw_rx_cons(bnapi);
2779 bnapi->rx_cons = sw_cons;
2780 bnapi->rx_prod = sw_prod;
2783 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
2786 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2788 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
2796 /* MSI ISR - The only difference between this and the INTx ISR
2797 * is that the MSI interrupt is always serviced.
2800 bnx2_msi(int irq, void *dev_instance)
2802 struct net_device *dev = dev_instance;
2803 struct bnx2 *bp = netdev_priv(dev);
2804 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2806 prefetch(bnapi->status_blk);
2807 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2808 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2809 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2811 /* Return here if interrupt is disabled. */
2812 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2815 netif_rx_schedule(dev, &bnapi->napi);
2821 bnx2_msi_1shot(int irq, void *dev_instance)
2823 struct net_device *dev = dev_instance;
2824 struct bnx2 *bp = netdev_priv(dev);
2825 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2827 prefetch(bnapi->status_blk);
2829 /* Return here if interrupt is disabled. */
2830 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2833 netif_rx_schedule(dev, &bnapi->napi);
2839 bnx2_interrupt(int irq, void *dev_instance)
2841 struct net_device *dev = dev_instance;
2842 struct bnx2 *bp = netdev_priv(dev);
2843 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2844 struct status_block *sblk = bnapi->status_blk;
2846 /* When using INTx, it is possible for the interrupt to arrive
2847 * at the CPU before the status block posted prior to the
2848 * interrupt. Reading a register will flush the status block.
2849 * When using MSI, the MSI message will always complete after
2850 * the status block write.
2852 if ((sblk->status_idx == bnapi->last_status_idx) &&
2853 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2854 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2857 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2858 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2859 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2861 /* Read back to deassert IRQ immediately to avoid too many
2862 * spurious interrupts.
2864 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2866 /* Return here if interrupt is shared and is disabled. */
2867 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2870 if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
2871 bnapi->last_status_idx = sblk->status_idx;
2872 __netif_rx_schedule(dev, &bnapi->napi);
2878 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2879 STATUS_ATTN_BITS_TIMER_ABORT)
2882 bnx2_has_work(struct bnx2_napi *bnapi)
2884 struct bnx2 *bp = bnapi->bp;
2885 struct status_block *sblk = bp->status_blk;
2887 if ((bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons) ||
2888 (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons))
2891 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2892 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2898 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
2899 int work_done, int budget)
2901 struct status_block *sblk = bnapi->status_blk;
2902 u32 status_attn_bits = sblk->status_attn_bits;
2903 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2905 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2906 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2908 bnx2_phy_int(bp, bnapi);
2910 /* This is needed to take care of transient status
2911 * during link changes.
2913 REG_WR(bp, BNX2_HC_COMMAND,
2914 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2915 REG_RD(bp, BNX2_HC_COMMAND);
2918 if (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons)
2919 bnx2_tx_int(bp, bnapi);
2921 if (bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons)
2922 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
2927 static int bnx2_poll(struct napi_struct *napi, int budget)
2929 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
2930 struct bnx2 *bp = bnapi->bp;
2932 struct status_block *sblk = bnapi->status_blk;
2935 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
2937 if (unlikely(work_done >= budget))
2940 /* bnapi->last_status_idx is used below to tell the hw how
2941 * much work has been processed, so we must read it before
2942 * checking for more work.
2944 bnapi->last_status_idx = sblk->status_idx;
2946 if (likely(!bnx2_has_work(bnapi))) {
2947 netif_rx_complete(bp->dev, napi);
2948 if (likely(bp->flags & USING_MSI_OR_MSIX_FLAG)) {
2949 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2950 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2951 bnapi->last_status_idx);
2954 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2955 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2956 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2957 bnapi->last_status_idx);
2959 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2960 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2961 bnapi->last_status_idx);
2969 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2970 * from set_multicast.
2973 bnx2_set_rx_mode(struct net_device *dev)
2975 struct bnx2 *bp = netdev_priv(dev);
2976 u32 rx_mode, sort_mode;
2979 spin_lock_bh(&bp->phy_lock);
2981 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2982 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2983 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2985 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2986 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2988 if (!(bp->flags & ASF_ENABLE_FLAG))
2989 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2991 if (dev->flags & IFF_PROMISC) {
2992 /* Promiscuous mode. */
2993 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2994 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2995 BNX2_RPM_SORT_USER0_PROM_VLAN;
2997 else if (dev->flags & IFF_ALLMULTI) {
2998 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2999 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3002 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3005 /* Accept one or more multicast(s). */
3006 struct dev_mc_list *mclist;
3007 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3012 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3014 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3015 i++, mclist = mclist->next) {
3017 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3019 regidx = (bit & 0xe0) >> 5;
3021 mc_filter[regidx] |= (1 << bit);
3024 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3025 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3029 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3032 if (rx_mode != bp->rx_mode) {
3033 bp->rx_mode = rx_mode;
3034 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3037 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3038 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3039 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3041 spin_unlock_bh(&bp->phy_lock);
3045 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
3052 for (i = 0; i < rv2p_code_len; i += 8) {
3053 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
3055 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
3058 if (rv2p_proc == RV2P_PROC1) {
3059 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3060 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3063 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3064 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3068 /* Reset the processor, un-stall is done later. */
3069 if (rv2p_proc == RV2P_PROC1) {
3070 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3073 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3078 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
3085 val = REG_RD_IND(bp, cpu_reg->mode);
3086 val |= cpu_reg->mode_value_halt;
3087 REG_WR_IND(bp, cpu_reg->mode, val);
3088 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3090 /* Load the Text area. */
3091 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3095 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3100 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3101 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
3105 /* Load the Data area. */
3106 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3110 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3111 REG_WR_IND(bp, offset, fw->data[j]);
3115 /* Load the SBSS area. */
3116 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3120 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3121 REG_WR_IND(bp, offset, 0);
3125 /* Load the BSS area. */
3126 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3130 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3131 REG_WR_IND(bp, offset, 0);
3135 /* Load the Read-Only area. */
3136 offset = cpu_reg->spad_base +
3137 (fw->rodata_addr - cpu_reg->mips_view_base);
3141 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3142 REG_WR_IND(bp, offset, fw->rodata[j]);
3146 /* Clear the pre-fetch instruction. */
3147 REG_WR_IND(bp, cpu_reg->inst, 0);
3148 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
3150 /* Start the CPU. */
3151 val = REG_RD_IND(bp, cpu_reg->mode);
3152 val &= ~cpu_reg->mode_value_halt;
3153 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3154 REG_WR_IND(bp, cpu_reg->mode, val);
3160 bnx2_init_cpus(struct bnx2 *bp)
3162 struct cpu_reg cpu_reg;
3167 /* Initialize the RV2P processor. */
3168 text = vmalloc(FW_BUF_SIZE);
3171 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3172 rv2p = bnx2_xi_rv2p_proc1;
3173 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3175 rv2p = bnx2_rv2p_proc1;
3176 rv2p_len = sizeof(bnx2_rv2p_proc1);
3178 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3182 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3184 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3185 rv2p = bnx2_xi_rv2p_proc2;
3186 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3188 rv2p = bnx2_rv2p_proc2;
3189 rv2p_len = sizeof(bnx2_rv2p_proc2);
3191 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3195 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3197 /* Initialize the RX Processor. */
3198 cpu_reg.mode = BNX2_RXP_CPU_MODE;
3199 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
3200 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
3201 cpu_reg.state = BNX2_RXP_CPU_STATE;
3202 cpu_reg.state_value_clear = 0xffffff;
3203 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3204 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3205 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3206 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3207 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3208 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3209 cpu_reg.mips_view_base = 0x8000000;
3211 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3212 fw = &bnx2_rxp_fw_09;
3214 fw = &bnx2_rxp_fw_06;
3217 rc = load_cpu_fw(bp, &cpu_reg, fw);
3221 /* Initialize the TX Processor. */
3222 cpu_reg.mode = BNX2_TXP_CPU_MODE;
3223 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3224 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3225 cpu_reg.state = BNX2_TXP_CPU_STATE;
3226 cpu_reg.state_value_clear = 0xffffff;
3227 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3228 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3229 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3230 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3231 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3232 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3233 cpu_reg.mips_view_base = 0x8000000;
3235 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3236 fw = &bnx2_txp_fw_09;
3238 fw = &bnx2_txp_fw_06;
3241 rc = load_cpu_fw(bp, &cpu_reg, fw);
3245 /* Initialize the TX Patch-up Processor. */
3246 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3247 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3248 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3249 cpu_reg.state = BNX2_TPAT_CPU_STATE;
3250 cpu_reg.state_value_clear = 0xffffff;
3251 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3252 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3253 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3254 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3255 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3256 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3257 cpu_reg.mips_view_base = 0x8000000;
3259 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3260 fw = &bnx2_tpat_fw_09;
3262 fw = &bnx2_tpat_fw_06;
3265 rc = load_cpu_fw(bp, &cpu_reg, fw);
3269 /* Initialize the Completion Processor. */
3270 cpu_reg.mode = BNX2_COM_CPU_MODE;
3271 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3272 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3273 cpu_reg.state = BNX2_COM_CPU_STATE;
3274 cpu_reg.state_value_clear = 0xffffff;
3275 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3276 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3277 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3278 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3279 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3280 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3281 cpu_reg.mips_view_base = 0x8000000;
3283 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3284 fw = &bnx2_com_fw_09;
3286 fw = &bnx2_com_fw_06;
3289 rc = load_cpu_fw(bp, &cpu_reg, fw);
3293 /* Initialize the Command Processor. */
3294 cpu_reg.mode = BNX2_CP_CPU_MODE;
3295 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3296 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3297 cpu_reg.state = BNX2_CP_CPU_STATE;
3298 cpu_reg.state_value_clear = 0xffffff;
3299 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3300 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3301 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3302 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3303 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3304 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3305 cpu_reg.mips_view_base = 0x8000000;
3307 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3308 fw = &bnx2_cp_fw_09;
3310 fw = &bnx2_cp_fw_06;
3313 rc = load_cpu_fw(bp, &cpu_reg, fw);
3321 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3325 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3331 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3332 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3333 PCI_PM_CTRL_PME_STATUS);
3335 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3336 /* delay required during transition out of D3hot */
3339 val = REG_RD(bp, BNX2_EMAC_MODE);
3340 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3341 val &= ~BNX2_EMAC_MODE_MPKT;
3342 REG_WR(bp, BNX2_EMAC_MODE, val);
3344 val = REG_RD(bp, BNX2_RPM_CONFIG);
3345 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3346 REG_WR(bp, BNX2_RPM_CONFIG, val);
3357 autoneg = bp->autoneg;
3358 advertising = bp->advertising;
3360 if (bp->phy_port == PORT_TP) {
3361 bp->autoneg = AUTONEG_SPEED;
3362 bp->advertising = ADVERTISED_10baseT_Half |
3363 ADVERTISED_10baseT_Full |
3364 ADVERTISED_100baseT_Half |
3365 ADVERTISED_100baseT_Full |
3369 spin_lock_bh(&bp->phy_lock);
3370 bnx2_setup_phy(bp, bp->phy_port);
3371 spin_unlock_bh(&bp->phy_lock);
3373 bp->autoneg = autoneg;
3374 bp->advertising = advertising;
3376 bnx2_set_mac_addr(bp);
3378 val = REG_RD(bp, BNX2_EMAC_MODE);
3380 /* Enable port mode. */
3381 val &= ~BNX2_EMAC_MODE_PORT;
3382 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3383 BNX2_EMAC_MODE_ACPI_RCVD |
3384 BNX2_EMAC_MODE_MPKT;
3385 if (bp->phy_port == PORT_TP)
3386 val |= BNX2_EMAC_MODE_PORT_MII;
3388 val |= BNX2_EMAC_MODE_PORT_GMII;
3389 if (bp->line_speed == SPEED_2500)
3390 val |= BNX2_EMAC_MODE_25G_MODE;
3393 REG_WR(bp, BNX2_EMAC_MODE, val);
3395 /* receive all multicast */
3396 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3397 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3400 REG_WR(bp, BNX2_EMAC_RX_MODE,
3401 BNX2_EMAC_RX_MODE_SORT_MODE);
3403 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3404 BNX2_RPM_SORT_USER0_MC_EN;
3405 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3406 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3407 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3408 BNX2_RPM_SORT_USER0_ENA);
3410 /* Need to enable EMAC and RPM for WOL. */
3411 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3412 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3413 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3414 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3416 val = REG_RD(bp, BNX2_RPM_CONFIG);
3417 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3418 REG_WR(bp, BNX2_RPM_CONFIG, val);
3420 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3423 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3426 if (!(bp->flags & NO_WOL_FLAG))
3427 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3429 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3430 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3431 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3440 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3442 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3445 /* No more memory access after this point until
3446 * device is brought back to D0.
3458 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3463 /* Request access to the flash interface. */
3464 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3465 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3466 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3467 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3473 if (j >= NVRAM_TIMEOUT_COUNT)
3480 bnx2_release_nvram_lock(struct bnx2 *bp)
3485 /* Relinquish nvram interface. */
3486 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3488 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3489 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3490 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3496 if (j >= NVRAM_TIMEOUT_COUNT)
3504 bnx2_enable_nvram_write(struct bnx2 *bp)
3508 val = REG_RD(bp, BNX2_MISC_CFG);
3509 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3511 if (bp->flash_info->flags & BNX2_NV_WREN) {
3514 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3515 REG_WR(bp, BNX2_NVM_COMMAND,
3516 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3518 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3521 val = REG_RD(bp, BNX2_NVM_COMMAND);
3522 if (val & BNX2_NVM_COMMAND_DONE)
3526 if (j >= NVRAM_TIMEOUT_COUNT)
3533 bnx2_disable_nvram_write(struct bnx2 *bp)
3537 val = REG_RD(bp, BNX2_MISC_CFG);
3538 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3543 bnx2_enable_nvram_access(struct bnx2 *bp)
3547 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3548 /* Enable both bits, even on read. */
3549 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3550 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3554 bnx2_disable_nvram_access(struct bnx2 *bp)
3558 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3559 /* Disable both bits, even after read. */
3560 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3561 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3562 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3566 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3571 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3572 /* Buffered flash, no erase needed */
3575 /* Build an erase command */
3576 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3577 BNX2_NVM_COMMAND_DOIT;
3579 /* Need to clear DONE bit separately. */
3580 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3582 /* Address of the NVRAM to read from. */
3583 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3585 /* Issue an erase command. */
3586 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3588 /* Wait for completion. */
3589 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3594 val = REG_RD(bp, BNX2_NVM_COMMAND);
3595 if (val & BNX2_NVM_COMMAND_DONE)
3599 if (j >= NVRAM_TIMEOUT_COUNT)
3606 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3611 /* Build the command word. */
3612 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3614 /* Calculate an offset of a buffered flash, not needed for 5709. */
3615 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3616 offset = ((offset / bp->flash_info->page_size) <<
3617 bp->flash_info->page_bits) +
3618 (offset % bp->flash_info->page_size);
3621 /* Need to clear DONE bit separately. */
3622 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3624 /* Address of the NVRAM to read from. */
3625 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3627 /* Issue a read command. */
3628 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3630 /* Wait for completion. */
3631 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3636 val = REG_RD(bp, BNX2_NVM_COMMAND);
3637 if (val & BNX2_NVM_COMMAND_DONE) {
3638 val = REG_RD(bp, BNX2_NVM_READ);
3640 val = be32_to_cpu(val);
3641 memcpy(ret_val, &val, 4);
3645 if (j >= NVRAM_TIMEOUT_COUNT)
3653 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3658 /* Build the command word. */
3659 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3661 /* Calculate an offset of a buffered flash, not needed for 5709. */
3662 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3663 offset = ((offset / bp->flash_info->page_size) <<
3664 bp->flash_info->page_bits) +
3665 (offset % bp->flash_info->page_size);
3668 /* Need to clear DONE bit separately. */
3669 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3671 memcpy(&val32, val, 4);
3672 val32 = cpu_to_be32(val32);
3674 /* Write the data. */
3675 REG_WR(bp, BNX2_NVM_WRITE, val32);
3677 /* Address of the NVRAM to write to. */
3678 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3680 /* Issue the write command. */
3681 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3683 /* Wait for completion. */
3684 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3687 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3690 if (j >= NVRAM_TIMEOUT_COUNT)
3697 bnx2_init_nvram(struct bnx2 *bp)
3700 int j, entry_count, rc = 0;
3701 struct flash_spec *flash;
3703 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3704 bp->flash_info = &flash_5709;
3705 goto get_flash_size;
3708 /* Determine the selected interface. */
3709 val = REG_RD(bp, BNX2_NVM_CFG1);
3711 entry_count = ARRAY_SIZE(flash_table);
3713 if (val & 0x40000000) {
3715 /* Flash interface has been reconfigured */
3716 for (j = 0, flash = &flash_table[0]; j < entry_count;
3718 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3719 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3720 bp->flash_info = flash;
3727 /* Not yet been reconfigured */
3729 if (val & (1 << 23))
3730 mask = FLASH_BACKUP_STRAP_MASK;
3732 mask = FLASH_STRAP_MASK;
3734 for (j = 0, flash = &flash_table[0]; j < entry_count;
3737 if ((val & mask) == (flash->strapping & mask)) {
3738 bp->flash_info = flash;
3740 /* Request access to the flash interface. */
3741 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3744 /* Enable access to flash interface */
3745 bnx2_enable_nvram_access(bp);
3747 /* Reconfigure the flash interface */
3748 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3749 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3750 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3751 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3753 /* Disable access to flash interface */
3754 bnx2_disable_nvram_access(bp);
3755 bnx2_release_nvram_lock(bp);
3760 } /* if (val & 0x40000000) */
3762 if (j == entry_count) {
3763 bp->flash_info = NULL;
3764 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3769 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3770 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3772 bp->flash_size = val;
3774 bp->flash_size = bp->flash_info->total_size;
3780 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3784 u32 cmd_flags, offset32, len32, extra;
3789 /* Request access to the flash interface. */
3790 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3793 /* Enable access to flash interface */
3794 bnx2_enable_nvram_access(bp);
3807 pre_len = 4 - (offset & 3);
3809 if (pre_len >= len32) {
3811 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3812 BNX2_NVM_COMMAND_LAST;
3815 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3818 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3823 memcpy(ret_buf, buf + (offset & 3), pre_len);
3830 extra = 4 - (len32 & 3);
3831 len32 = (len32 + 4) & ~3;
3838 cmd_flags = BNX2_NVM_COMMAND_LAST;
3840 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3841 BNX2_NVM_COMMAND_LAST;
3843 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3845 memcpy(ret_buf, buf, 4 - extra);
3847 else if (len32 > 0) {
3850 /* Read the first word. */
3854 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3856 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3858 /* Advance to the next dword. */
3863 while (len32 > 4 && rc == 0) {
3864 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3866 /* Advance to the next dword. */
3875 cmd_flags = BNX2_NVM_COMMAND_LAST;
3876 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3878 memcpy(ret_buf, buf, 4 - extra);
3881 /* Disable access to flash interface */
3882 bnx2_disable_nvram_access(bp);
3884 bnx2_release_nvram_lock(bp);
3890 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3893 u32 written, offset32, len32;
3894 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3896 int align_start, align_end;
3901 align_start = align_end = 0;
3903 if ((align_start = (offset32 & 3))) {
3905 len32 += align_start;
3908 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3913 align_end = 4 - (len32 & 3);
3915 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3919 if (align_start || align_end) {
3920 align_buf = kmalloc(len32, GFP_KERNEL);
3921 if (align_buf == NULL)
3924 memcpy(align_buf, start, 4);
3927 memcpy(align_buf + len32 - 4, end, 4);
3929 memcpy(align_buf + align_start, data_buf, buf_size);
3933 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3934 flash_buffer = kmalloc(264, GFP_KERNEL);
3935 if (flash_buffer == NULL) {
3937 goto nvram_write_end;
3942 while ((written < len32) && (rc == 0)) {
3943 u32 page_start, page_end, data_start, data_end;
3944 u32 addr, cmd_flags;
3947 /* Find the page_start addr */
3948 page_start = offset32 + written;
3949 page_start -= (page_start % bp->flash_info->page_size);
3950 /* Find the page_end addr */
3951 page_end = page_start + bp->flash_info->page_size;
3952 /* Find the data_start addr */
3953 data_start = (written == 0) ? offset32 : page_start;
3954 /* Find the data_end addr */
3955 data_end = (page_end > offset32 + len32) ?
3956 (offset32 + len32) : page_end;
3958 /* Request access to the flash interface. */
3959 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3960 goto nvram_write_end;
3962 /* Enable access to flash interface */
3963 bnx2_enable_nvram_access(bp);
3965 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3966 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3969 /* Read the whole page into the buffer
3970 * (non-buffer flash only) */
3971 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3972 if (j == (bp->flash_info->page_size - 4)) {
3973 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3975 rc = bnx2_nvram_read_dword(bp,
3981 goto nvram_write_end;
3987 /* Enable writes to flash interface (unlock write-protect) */
3988 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3989 goto nvram_write_end;
3991 /* Loop to write back the buffer data from page_start to
3994 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3995 /* Erase the page */
3996 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3997 goto nvram_write_end;
3999 /* Re-enable the write again for the actual write */
4000 bnx2_enable_nvram_write(bp);
4002 for (addr = page_start; addr < data_start;
4003 addr += 4, i += 4) {
4005 rc = bnx2_nvram_write_dword(bp, addr,
4006 &flash_buffer[i], cmd_flags);
4009 goto nvram_write_end;
4015 /* Loop to write the new data from data_start to data_end */
4016 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4017 if ((addr == page_end - 4) ||
4018 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4019 (addr == data_end - 4))) {
4021 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4023 rc = bnx2_nvram_write_dword(bp, addr, buf,
4027 goto nvram_write_end;
4033 /* Loop to write back the buffer data from data_end
4035 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4036 for (addr = data_end; addr < page_end;
4037 addr += 4, i += 4) {
4039 if (addr == page_end-4) {
4040 cmd_flags = BNX2_NVM_COMMAND_LAST;
4042 rc = bnx2_nvram_write_dword(bp, addr,
4043 &flash_buffer[i], cmd_flags);
4046 goto nvram_write_end;
4052 /* Disable writes to flash interface (lock write-protect) */
4053 bnx2_disable_nvram_write(bp);
4055 /* Disable access to flash interface */
4056 bnx2_disable_nvram_access(bp);
4057 bnx2_release_nvram_lock(bp);
4059 /* Increment written */
4060 written += data_end - data_start;
4064 kfree(flash_buffer);
4070 bnx2_init_remote_phy(struct bnx2 *bp)
4074 bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
4075 if (!(bp->phy_flags & PHY_SERDES_FLAG))
4078 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
4079 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4082 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
4083 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
4085 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
4086 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4087 bp->phy_port = PORT_FIBRE;
4089 bp->phy_port = PORT_TP;
4091 if (netif_running(bp->dev)) {
4094 if (val & BNX2_LINK_STATUS_LINK_UP) {
4096 netif_carrier_on(bp->dev);
4099 netif_carrier_off(bp->dev);
4101 sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4102 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4103 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
4110 bnx2_setup_msix_tbl(struct bnx2 *bp)
4112 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4114 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4115 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4119 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4125 /* Wait for the current PCI transaction to complete before
4126 * issuing a reset. */
4127 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4128 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4129 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4130 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4131 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4132 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4135 /* Wait for the firmware to tell us it is ok to issue a reset. */
4136 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
4138 /* Deposit a driver reset signature so the firmware knows that
4139 * this is a soft reset. */
4140 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
4141 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4143 /* Do a dummy read to force the chip to complete all current transaction
4144 * before we issue a reset. */
4145 val = REG_RD(bp, BNX2_MISC_ID);
4147 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4148 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4149 REG_RD(bp, BNX2_MISC_COMMAND);
4152 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4153 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4155 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4158 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4159 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4160 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4163 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4165 /* Reading back any register after chip reset will hang the
4166 * bus on 5706 A0 and A1. The msleep below provides plenty
4167 * of margin for write posting.
4169 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4170 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4173 /* Reset takes approximate 30 usec */
4174 for (i = 0; i < 10; i++) {
4175 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4176 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4177 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4182 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4183 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4184 printk(KERN_ERR PFX "Chip reset did not complete\n");
4189 /* Make sure byte swapping is properly configured. */
4190 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4191 if (val != 0x01020304) {
4192 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4196 /* Wait for the firmware to finish its initialization. */
4197 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
4201 spin_lock_bh(&bp->phy_lock);
4202 old_port = bp->phy_port;
4203 bnx2_init_remote_phy(bp);
4204 if ((bp->phy_flags & REMOTE_PHY_CAP_FLAG) && old_port != bp->phy_port)
4205 bnx2_set_default_remote_link(bp);
4206 spin_unlock_bh(&bp->phy_lock);
4208 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4209 /* Adjust the voltage regular to two steps lower. The default
4210 * of this register is 0x0000000e. */
4211 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4213 /* Remove bad rbuf memory from the free pool. */
4214 rc = bnx2_alloc_bad_rbuf(bp);
4217 if (bp->flags & USING_MSIX_FLAG)
4218 bnx2_setup_msix_tbl(bp);
4224 bnx2_init_chip(struct bnx2 *bp)
4229 /* Make sure the interrupt is not active. */
4230 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4232 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4233 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4235 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4237 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4238 DMA_READ_CHANS << 12 |
4239 DMA_WRITE_CHANS << 16;
4241 val |= (0x2 << 20) | (1 << 11);
4243 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
4246 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4247 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
4248 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4250 REG_WR(bp, BNX2_DMA_CONFIG, val);
4252 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4253 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4254 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4255 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4258 if (bp->flags & PCIX_FLAG) {
4261 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4263 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4264 val16 & ~PCI_X_CMD_ERO);
4267 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4268 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4269 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4270 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4272 /* Initialize context mapping and zero out the quick contexts. The
4273 * context block must have already been enabled. */
4274 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4275 rc = bnx2_init_5709_context(bp);
4279 bnx2_init_context(bp);
4281 if ((rc = bnx2_init_cpus(bp)) != 0)
4284 bnx2_init_nvram(bp);
4286 bnx2_set_mac_addr(bp);
4288 val = REG_RD(bp, BNX2_MQ_CONFIG);
4289 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4290 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4291 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4292 val |= BNX2_MQ_CONFIG_HALT_DIS;
4294 REG_WR(bp, BNX2_MQ_CONFIG, val);
4296 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4297 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4298 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4300 val = (BCM_PAGE_BITS - 8) << 24;
4301 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4303 /* Configure page size. */
4304 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4305 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4306 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4307 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4309 val = bp->mac_addr[0] +
4310 (bp->mac_addr[1] << 8) +
4311 (bp->mac_addr[2] << 16) +
4313 (bp->mac_addr[4] << 8) +
4314 (bp->mac_addr[5] << 16);
4315 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4317 /* Program the MTU. Also include 4 bytes for CRC32. */
4318 val = bp->dev->mtu + ETH_HLEN + 4;
4319 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4320 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4321 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4323 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4324 bp->bnx2_napi[i].last_status_idx = 0;
4326 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4328 /* Set up how to generate a link change interrupt. */
4329 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4331 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4332 (u64) bp->status_blk_mapping & 0xffffffff);
4333 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4335 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4336 (u64) bp->stats_blk_mapping & 0xffffffff);
4337 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4338 (u64) bp->stats_blk_mapping >> 32);
4340 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4341 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4343 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4344 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4346 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4347 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4349 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4351 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4353 REG_WR(bp, BNX2_HC_COM_TICKS,
4354 (bp->com_ticks_int << 16) | bp->com_ticks);
4356 REG_WR(bp, BNX2_HC_CMD_TICKS,
4357 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4359 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4360 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4362 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4363 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4365 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4366 val = BNX2_HC_CONFIG_COLLECT_STATS;
4368 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4369 BNX2_HC_CONFIG_COLLECT_STATS;
4372 if (bp->flags & USING_MSIX_FLAG) {
4373 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4374 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4376 REG_WR(bp, BNX2_HC_SB_CONFIG_1,
4377 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4378 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4380 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP_1,
4381 (bp->tx_quick_cons_trip_int << 16) |
4382 bp->tx_quick_cons_trip);
4384 REG_WR(bp, BNX2_HC_TX_TICKS_1,
4385 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4387 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4390 if (bp->flags & ONE_SHOT_MSI_FLAG)
4391 val |= BNX2_HC_CONFIG_ONE_SHOT;
4393 REG_WR(bp, BNX2_HC_CONFIG, val);
4395 /* Clear internal stats counters. */
4396 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4398 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4400 /* Initialize the receive filter. */
4401 bnx2_set_rx_mode(bp->dev);
4403 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4404 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4405 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4406 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4408 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4411 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4412 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4416 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4422 bnx2_clear_ring_states(struct bnx2 *bp)
4424 struct bnx2_napi *bnapi;
4427 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4428 bnapi = &bp->bnx2_napi[i];
4431 bnapi->hw_tx_cons = 0;
4432 bnapi->rx_prod_bseq = 0;
4435 bnapi->rx_pg_prod = 0;
4436 bnapi->rx_pg_cons = 0;
4441 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4443 u32 val, offset0, offset1, offset2, offset3;
4445 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4446 offset0 = BNX2_L2CTX_TYPE_XI;
4447 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4448 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4449 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4451 offset0 = BNX2_L2CTX_TYPE;
4452 offset1 = BNX2_L2CTX_CMD_TYPE;
4453 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4454 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4456 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4457 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4459 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4460 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4462 val = (u64) bp->tx_desc_mapping >> 32;
4463 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4465 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4466 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4470 bnx2_init_tx_ring(struct bnx2 *bp)
4474 struct bnx2_napi *bnapi;
4477 if (bp->flags & USING_MSIX_FLAG) {
4479 bp->tx_vec = BNX2_TX_VEC;
4480 REG_WR(bp, BNX2_TSCH_TSS_CFG, BNX2_TX_INT_NUM |
4483 bnapi = &bp->bnx2_napi[bp->tx_vec];
4485 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4487 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4489 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4490 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4493 bp->tx_prod_bseq = 0;
4495 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4496 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4498 bnx2_init_tx_context(bp, cid);
4502 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4508 for (i = 0; i < num_rings; i++) {
4511 rxbd = &rx_ring[i][0];
4512 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4513 rxbd->rx_bd_len = buf_size;
4514 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4516 if (i == (num_rings - 1))
4520 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4521 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4526 bnx2_init_rx_ring(struct bnx2 *bp)
4529 u16 prod, ring_prod;
4530 u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
4531 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
4533 bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
4534 bp->rx_buf_use_size, bp->rx_max_ring);
4536 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4537 if (bp->rx_pg_ring_size) {
4538 bnx2_init_rxbd_rings(bp->rx_pg_desc_ring,
4539 bp->rx_pg_desc_mapping,
4540 PAGE_SIZE, bp->rx_max_pg_ring);
4541 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4542 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4543 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4544 BNX2_L2CTX_RBDC_JUMBO_KEY);
4546 val = (u64) bp->rx_pg_desc_mapping[0] >> 32;
4547 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4549 val = (u64) bp->rx_pg_desc_mapping[0] & 0xffffffff;
4550 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4552 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4553 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4556 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4557 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4559 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4561 val = (u64) bp->rx_desc_mapping[0] >> 32;
4562 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4564 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4565 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4567 ring_prod = prod = bnapi->rx_pg_prod;
4568 for (i = 0; i < bp->rx_pg_ring_size; i++) {
4569 if (bnx2_alloc_rx_page(bp, ring_prod) < 0)
4571 prod = NEXT_RX_BD(prod);
4572 ring_prod = RX_PG_RING_IDX(prod);
4574 bnapi->rx_pg_prod = prod;
4576 ring_prod = prod = bnapi->rx_prod;
4577 for (i = 0; i < bp->rx_ring_size; i++) {
4578 if (bnx2_alloc_rx_skb(bp, bnapi, ring_prod) < 0) {
4581 prod = NEXT_RX_BD(prod);
4582 ring_prod = RX_RING_IDX(prod);
4584 bnapi->rx_prod = prod;
4586 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
4588 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4590 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
4593 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4595 u32 max, num_rings = 1;
4597 while (ring_size > MAX_RX_DESC_CNT) {
4598 ring_size -= MAX_RX_DESC_CNT;
4601 /* round to next power of 2 */
4603 while ((max & num_rings) == 0)
4606 if (num_rings != max)
4613 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4615 u32 rx_size, rx_space, jumbo_size;
4617 /* 8 for CRC and VLAN */
4618 rx_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4620 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4621 sizeof(struct skb_shared_info);
4623 bp->rx_copy_thresh = RX_COPY_THRESH;
4624 bp->rx_pg_ring_size = 0;
4625 bp->rx_max_pg_ring = 0;
4626 bp->rx_max_pg_ring_idx = 0;
4627 if (rx_space > PAGE_SIZE) {
4628 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4630 jumbo_size = size * pages;
4631 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4632 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4634 bp->rx_pg_ring_size = jumbo_size;
4635 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4637 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4638 rx_size = RX_COPY_THRESH + bp->rx_offset;
4639 bp->rx_copy_thresh = 0;
4642 bp->rx_buf_use_size = rx_size;
4644 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4645 bp->rx_jumbo_thresh = rx_size - bp->rx_offset;
4646 bp->rx_ring_size = size;
4647 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4648 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4652 bnx2_free_tx_skbs(struct bnx2 *bp)
4656 if (bp->tx_buf_ring == NULL)
4659 for (i = 0; i < TX_DESC_CNT; ) {
4660 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4661 struct sk_buff *skb = tx_buf->skb;
4669 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4670 skb_headlen(skb), PCI_DMA_TODEVICE);
4674 last = skb_shinfo(skb)->nr_frags;
4675 for (j = 0; j < last; j++) {
4676 tx_buf = &bp->tx_buf_ring[i + j + 1];
4677 pci_unmap_page(bp->pdev,
4678 pci_unmap_addr(tx_buf, mapping),
4679 skb_shinfo(skb)->frags[j].size,
4689 bnx2_free_rx_skbs(struct bnx2 *bp)
4693 if (bp->rx_buf_ring == NULL)
4696 for (i = 0; i < bp->rx_max_ring_idx; i++) {
4697 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4698 struct sk_buff *skb = rx_buf->skb;
4703 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4704 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4710 for (i = 0; i < bp->rx_max_pg_ring_idx; i++)
4711 bnx2_free_rx_page(bp, i);
4715 bnx2_free_skbs(struct bnx2 *bp)
4717 bnx2_free_tx_skbs(bp);
4718 bnx2_free_rx_skbs(bp);
4722 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4726 rc = bnx2_reset_chip(bp, reset_code);
4731 if ((rc = bnx2_init_chip(bp)) != 0)
4734 bnx2_clear_ring_states(bp);
4735 bnx2_init_tx_ring(bp);
4736 bnx2_init_rx_ring(bp);
4741 bnx2_init_nic(struct bnx2 *bp)
4745 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4748 spin_lock_bh(&bp->phy_lock);
4751 spin_unlock_bh(&bp->phy_lock);
4756 bnx2_test_registers(struct bnx2 *bp)
4760 static const struct {
4763 #define BNX2_FL_NOT_5709 1
4767 { 0x006c, 0, 0x00000000, 0x0000003f },
4768 { 0x0090, 0, 0xffffffff, 0x00000000 },
4769 { 0x0094, 0, 0x00000000, 0x00000000 },
4771 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4772 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4773 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4774 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4775 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4776 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4777 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4778 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4779 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4781 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4782 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4783 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4784 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4785 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4786 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4788 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4789 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4790 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
4792 { 0x1000, 0, 0x00000000, 0x00000001 },
4793 { 0x1004, 0, 0x00000000, 0x000f0001 },
4795 { 0x1408, 0, 0x01c00800, 0x00000000 },
4796 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4797 { 0x14a8, 0, 0x00000000, 0x000001ff },
4798 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4799 { 0x14b0, 0, 0x00000002, 0x00000001 },
4800 { 0x14b8, 0, 0x00000000, 0x00000000 },
4801 { 0x14c0, 0, 0x00000000, 0x00000009 },
4802 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4803 { 0x14cc, 0, 0x00000000, 0x00000001 },
4804 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4806 { 0x1800, 0, 0x00000000, 0x00000001 },
4807 { 0x1804, 0, 0x00000000, 0x00000003 },
4809 { 0x2800, 0, 0x00000000, 0x00000001 },
4810 { 0x2804, 0, 0x00000000, 0x00003f01 },
4811 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4812 { 0x2810, 0, 0xffff0000, 0x00000000 },
4813 { 0x2814, 0, 0xffff0000, 0x00000000 },
4814 { 0x2818, 0, 0xffff0000, 0x00000000 },
4815 { 0x281c, 0, 0xffff0000, 0x00000000 },
4816 { 0x2834, 0, 0xffffffff, 0x00000000 },
4817 { 0x2840, 0, 0x00000000, 0xffffffff },
4818 { 0x2844, 0, 0x00000000, 0xffffffff },
4819 { 0x2848, 0, 0xffffffff, 0x00000000 },
4820 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4822 { 0x2c00, 0, 0x00000000, 0x00000011 },
4823 { 0x2c04, 0, 0x00000000, 0x00030007 },
4825 { 0x3c00, 0, 0x00000000, 0x00000001 },
4826 { 0x3c04, 0, 0x00000000, 0x00070000 },
4827 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4828 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4829 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4830 { 0x3c14, 0, 0x00000000, 0xffffffff },
4831 { 0x3c18, 0, 0x00000000, 0xffffffff },
4832 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4833 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4835 { 0x5004, 0, 0x00000000, 0x0000007f },
4836 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4838 { 0x5c00, 0, 0x00000000, 0x00000001 },
4839 { 0x5c04, 0, 0x00000000, 0x0003000f },
4840 { 0x5c08, 0, 0x00000003, 0x00000000 },
4841 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4842 { 0x5c10, 0, 0x00000000, 0xffffffff },
4843 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4844 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4845 { 0x5c88, 0, 0x00000000, 0x00077373 },
4846 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4848 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4849 { 0x680c, 0, 0xffffffff, 0x00000000 },
4850 { 0x6810, 0, 0xffffffff, 0x00000000 },
4851 { 0x6814, 0, 0xffffffff, 0x00000000 },
4852 { 0x6818, 0, 0xffffffff, 0x00000000 },
4853 { 0x681c, 0, 0xffffffff, 0x00000000 },
4854 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4855 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4856 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4857 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4858 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4859 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4860 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4861 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4862 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4863 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4864 { 0x684c, 0, 0xffffffff, 0x00000000 },
4865 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4866 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4867 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4868 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4869 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4870 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4872 { 0xffff, 0, 0x00000000, 0x00000000 },
4877 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4880 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4881 u32 offset, rw_mask, ro_mask, save_val, val;
4882 u16 flags = reg_tbl[i].flags;
4884 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4887 offset = (u32) reg_tbl[i].offset;
4888 rw_mask = reg_tbl[i].rw_mask;
4889 ro_mask = reg_tbl[i].ro_mask;
4891 save_val = readl(bp->regview + offset);
4893 writel(0, bp->regview + offset);
4895 val = readl(bp->regview + offset);
4896 if ((val & rw_mask) != 0) {
4900 if ((val & ro_mask) != (save_val & ro_mask)) {
4904 writel(0xffffffff, bp->regview + offset);
4906 val = readl(bp->regview + offset);
4907 if ((val & rw_mask) != rw_mask) {
4911 if ((val & ro_mask) != (save_val & ro_mask)) {
4915 writel(save_val, bp->regview + offset);
4919 writel(save_val, bp->regview + offset);
4927 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4929 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4930 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4933 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4936 for (offset = 0; offset < size; offset += 4) {
4938 REG_WR_IND(bp, start + offset, test_pattern[i]);
4940 if (REG_RD_IND(bp, start + offset) !=
4950 bnx2_test_memory(struct bnx2 *bp)
4954 static struct mem_entry {
4957 } mem_tbl_5706[] = {
4958 { 0x60000, 0x4000 },
4959 { 0xa0000, 0x3000 },
4960 { 0xe0000, 0x4000 },
4961 { 0x120000, 0x4000 },
4962 { 0x1a0000, 0x4000 },
4963 { 0x160000, 0x4000 },
4967 { 0x60000, 0x4000 },
4968 { 0xa0000, 0x3000 },
4969 { 0xe0000, 0x4000 },
4970 { 0x120000, 0x4000 },
4971 { 0x1a0000, 0x4000 },
4974 struct mem_entry *mem_tbl;
4976 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4977 mem_tbl = mem_tbl_5709;
4979 mem_tbl = mem_tbl_5706;
4981 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4982 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4983 mem_tbl[i].len)) != 0) {
4991 #define BNX2_MAC_LOOPBACK 0
4992 #define BNX2_PHY_LOOPBACK 1
4995 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4997 unsigned int pkt_size, num_pkts, i;
4998 struct sk_buff *skb, *rx_skb;
4999 unsigned char *packet;
5000 u16 rx_start_idx, rx_idx;
5003 struct sw_bd *rx_buf;
5004 struct l2_fhdr *rx_hdr;
5006 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5009 if (bp->flags & USING_MSIX_FLAG)
5010 tx_napi = &bp->bnx2_napi[BNX2_TX_VEC];
5012 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5013 bp->loopback = MAC_LOOPBACK;
5014 bnx2_set_mac_loopback(bp);
5016 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5017 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
5020 bp->loopback = PHY_LOOPBACK;
5021 bnx2_set_phy_loopback(bp);
5026 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5027 skb = netdev_alloc_skb(bp->dev, pkt_size);
5030 packet = skb_put(skb, pkt_size);
5031 memcpy(packet, bp->dev->dev_addr, 6);
5032 memset(packet + 6, 0x0, 8);
5033 for (i = 14; i < pkt_size; i++)
5034 packet[i] = (unsigned char) (i & 0xff);
5036 map = pci_map_single(bp->pdev, skb->data, pkt_size,
5039 REG_WR(bp, BNX2_HC_COMMAND,
5040 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5042 REG_RD(bp, BNX2_HC_COMMAND);
5045 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5049 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
5051 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5052 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5053 txbd->tx_bd_mss_nbytes = pkt_size;
5054 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5057 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
5058 bp->tx_prod_bseq += pkt_size;
5060 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
5061 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5065 REG_WR(bp, BNX2_HC_COMMAND,
5066 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5068 REG_RD(bp, BNX2_HC_COMMAND);
5072 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5075 if (bnx2_get_hw_tx_cons(tx_napi) != bp->tx_prod)
5076 goto loopback_test_done;
5078 rx_idx = bnx2_get_hw_rx_cons(bnapi);
5079 if (rx_idx != rx_start_idx + num_pkts) {
5080 goto loopback_test_done;
5083 rx_buf = &bp->rx_buf_ring[rx_start_idx];
5084 rx_skb = rx_buf->skb;
5086 rx_hdr = (struct l2_fhdr *) rx_skb->data;
5087 skb_reserve(rx_skb, bp->rx_offset);
5089 pci_dma_sync_single_for_cpu(bp->pdev,
5090 pci_unmap_addr(rx_buf, mapping),
5091 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5093 if (rx_hdr->l2_fhdr_status &
5094 (L2_FHDR_ERRORS_BAD_CRC |
5095 L2_FHDR_ERRORS_PHY_DECODE |
5096 L2_FHDR_ERRORS_ALIGNMENT |
5097 L2_FHDR_ERRORS_TOO_SHORT |
5098 L2_FHDR_ERRORS_GIANT_FRAME)) {
5100 goto loopback_test_done;
5103 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5104 goto loopback_test_done;
5107 for (i = 14; i < pkt_size; i++) {
5108 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5109 goto loopback_test_done;
5120 #define BNX2_MAC_LOOPBACK_FAILED 1
5121 #define BNX2_PHY_LOOPBACK_FAILED 2
5122 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5123 BNX2_PHY_LOOPBACK_FAILED)
5126 bnx2_test_loopback(struct bnx2 *bp)
5130 if (!netif_running(bp->dev))
5131 return BNX2_LOOPBACK_FAILED;
5133 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5134 spin_lock_bh(&bp->phy_lock);
5136 spin_unlock_bh(&bp->phy_lock);
5137 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5138 rc |= BNX2_MAC_LOOPBACK_FAILED;
5139 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5140 rc |= BNX2_PHY_LOOPBACK_FAILED;
5144 #define NVRAM_SIZE 0x200
5145 #define CRC32_RESIDUAL 0xdebb20e3
5148 bnx2_test_nvram(struct bnx2 *bp)
5150 u32 buf[NVRAM_SIZE / 4];
5151 u8 *data = (u8 *) buf;
5155 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5156 goto test_nvram_done;
5158 magic = be32_to_cpu(buf[0]);
5159 if (magic != 0x669955aa) {
5161 goto test_nvram_done;
5164 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5165 goto test_nvram_done;
5167 csum = ether_crc_le(0x100, data);
5168 if (csum != CRC32_RESIDUAL) {
5170 goto test_nvram_done;
5173 csum = ether_crc_le(0x100, data + 0x100);
5174 if (csum != CRC32_RESIDUAL) {
5183 bnx2_test_link(struct bnx2 *bp)
5187 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5192 spin_lock_bh(&bp->phy_lock);
5193 bnx2_enable_bmsr1(bp);
5194 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5195 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5196 bnx2_disable_bmsr1(bp);
5197 spin_unlock_bh(&bp->phy_lock);
5199 if (bmsr & BMSR_LSTATUS) {
5206 bnx2_test_intr(struct bnx2 *bp)
5211 if (!netif_running(bp->dev))
5214 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5216 /* This register is not touched during run-time. */
5217 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5218 REG_RD(bp, BNX2_HC_COMMAND);
5220 for (i = 0; i < 10; i++) {
5221 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5227 msleep_interruptible(10);
5236 bnx2_5706_serdes_timer(struct bnx2 *bp)
5238 spin_lock(&bp->phy_lock);
5239 if (bp->serdes_an_pending)
5240 bp->serdes_an_pending--;
5241 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5244 bp->current_interval = bp->timer_interval;
5246 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5248 if (bmcr & BMCR_ANENABLE) {
5251 bnx2_write_phy(bp, 0x1c, 0x7c00);
5252 bnx2_read_phy(bp, 0x1c, &phy1);
5254 bnx2_write_phy(bp, 0x17, 0x0f01);
5255 bnx2_read_phy(bp, 0x15, &phy2);
5256 bnx2_write_phy(bp, 0x17, 0x0f01);
5257 bnx2_read_phy(bp, 0x15, &phy2);
5259 if ((phy1 & 0x10) && /* SIGNAL DETECT */
5260 !(phy2 & 0x20)) { /* no CONFIG */
5262 bmcr &= ~BMCR_ANENABLE;
5263 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5264 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5265 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
5269 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5270 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
5273 bnx2_write_phy(bp, 0x17, 0x0f01);
5274 bnx2_read_phy(bp, 0x15, &phy2);
5278 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5279 bmcr |= BMCR_ANENABLE;
5280 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5282 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
5285 bp->current_interval = bp->timer_interval;
5287 spin_unlock(&bp->phy_lock);
5291 bnx2_5708_serdes_timer(struct bnx2 *bp)
5293 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
5296 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
5297 bp->serdes_an_pending = 0;
5301 spin_lock(&bp->phy_lock);
5302 if (bp->serdes_an_pending)
5303 bp->serdes_an_pending--;
5304 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5307 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5308 if (bmcr & BMCR_ANENABLE) {
5309 bnx2_enable_forced_2g5(bp);
5310 bp->current_interval = SERDES_FORCED_TIMEOUT;
5312 bnx2_disable_forced_2g5(bp);
5313 bp->serdes_an_pending = 2;
5314 bp->current_interval = bp->timer_interval;
5318 bp->current_interval = bp->timer_interval;
5320 spin_unlock(&bp->phy_lock);
5324 bnx2_timer(unsigned long data)
5326 struct bnx2 *bp = (struct bnx2 *) data;
5328 if (!netif_running(bp->dev))
5331 if (atomic_read(&bp->intr_sem) != 0)
5332 goto bnx2_restart_timer;
5334 bnx2_send_heart_beat(bp);
5336 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
5338 /* workaround occasional corrupted counters */
5339 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5340 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5341 BNX2_HC_COMMAND_STATS_NOW);
5343 if (bp->phy_flags & PHY_SERDES_FLAG) {
5344 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5345 bnx2_5706_serdes_timer(bp);
5347 bnx2_5708_serdes_timer(bp);
5351 mod_timer(&bp->timer, jiffies + bp->current_interval);
5355 bnx2_request_irq(struct bnx2 *bp)
5357 struct net_device *dev = bp->dev;
5358 unsigned long flags;
5359 struct bnx2_irq *irq;
5362 if (bp->flags & USING_MSI_OR_MSIX_FLAG)
5365 flags = IRQF_SHARED;
5367 for (i = 0; i < bp->irq_nvecs; i++) {
5368 irq = &bp->irq_tbl[i];
5369 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5379 bnx2_free_irq(struct bnx2 *bp)
5381 struct net_device *dev = bp->dev;
5382 struct bnx2_irq *irq;
5385 for (i = 0; i < bp->irq_nvecs; i++) {
5386 irq = &bp->irq_tbl[i];
5388 free_irq(irq->vector, dev);
5391 if (bp->flags & USING_MSI_FLAG)
5392 pci_disable_msi(bp->pdev);
5393 else if (bp->flags & USING_MSIX_FLAG)
5394 pci_disable_msix(bp->pdev);
5396 bp->flags &= ~(USING_MSI_OR_MSIX_FLAG | ONE_SHOT_MSI_FLAG);
5400 bnx2_enable_msix(struct bnx2 *bp)
5402 bnx2_setup_msix_tbl(bp);
5403 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5404 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5405 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5409 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5411 bp->irq_tbl[0].handler = bnx2_interrupt;
5412 strcpy(bp->irq_tbl[0].name, bp->dev->name);
5414 bp->irq_tbl[0].vector = bp->pdev->irq;
5416 if ((bp->flags & MSIX_CAP_FLAG) && !dis_msi)
5417 bnx2_enable_msix(bp);
5419 if ((bp->flags & MSI_CAP_FLAG) && !dis_msi &&
5420 !(bp->flags & USING_MSIX_FLAG)) {
5421 if (pci_enable_msi(bp->pdev) == 0) {
5422 bp->flags |= USING_MSI_FLAG;
5423 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5424 bp->flags |= ONE_SHOT_MSI_FLAG;
5425 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5427 bp->irq_tbl[0].handler = bnx2_msi;
5429 bp->irq_tbl[0].vector = bp->pdev->irq;
5434 /* Called with rtnl_lock */
5436 bnx2_open(struct net_device *dev)
5438 struct bnx2 *bp = netdev_priv(dev);
5441 netif_carrier_off(dev);
5443 bnx2_set_power_state(bp, PCI_D0);
5444 bnx2_disable_int(bp);
5446 rc = bnx2_alloc_mem(bp);
5450 bnx2_setup_int_mode(bp, disable_msi);
5451 bnx2_napi_enable(bp);
5452 rc = bnx2_request_irq(bp);
5455 bnx2_napi_disable(bp);
5460 rc = bnx2_init_nic(bp);
5463 bnx2_napi_disable(bp);
5470 mod_timer(&bp->timer, jiffies + bp->current_interval);
5472 atomic_set(&bp->intr_sem, 0);
5474 bnx2_enable_int(bp);
5476 if (bp->flags & USING_MSI_FLAG) {
5477 /* Test MSI to make sure it is working
5478 * If MSI test fails, go back to INTx mode
5480 if (bnx2_test_intr(bp) != 0) {
5481 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5482 " using MSI, switching to INTx mode. Please"
5483 " report this failure to the PCI maintainer"
5484 " and include system chipset information.\n",
5487 bnx2_disable_int(bp);
5490 bnx2_setup_int_mode(bp, 1);
5492 rc = bnx2_init_nic(bp);
5495 rc = bnx2_request_irq(bp);
5498 bnx2_napi_disable(bp);
5501 del_timer_sync(&bp->timer);
5504 bnx2_enable_int(bp);
5507 if (bp->flags & USING_MSI_FLAG) {
5508 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5511 netif_start_queue(dev);
5517 bnx2_reset_task(struct work_struct *work)
5519 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5521 if (!netif_running(bp->dev))
5524 bp->in_reset_task = 1;
5525 bnx2_netif_stop(bp);
5529 atomic_set(&bp->intr_sem, 1);
5530 bnx2_netif_start(bp);
5531 bp->in_reset_task = 0;
5535 bnx2_tx_timeout(struct net_device *dev)
5537 struct bnx2 *bp = netdev_priv(dev);
5539 /* This allows the netif to be shutdown gracefully before resetting */
5540 schedule_work(&bp->reset_task);
5544 /* Called with rtnl_lock */
5546 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5548 struct bnx2 *bp = netdev_priv(dev);
5550 bnx2_netif_stop(bp);
5553 bnx2_set_rx_mode(dev);
5555 bnx2_netif_start(bp);
5559 /* Called with netif_tx_lock.
5560 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5561 * netif_wake_queue().
5564 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5566 struct bnx2 *bp = netdev_priv(dev);
5569 struct sw_bd *tx_buf;
5570 u32 len, vlan_tag_flags, last_frag, mss;
5571 u16 prod, ring_prod;
5573 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
5575 if (unlikely(bnx2_tx_avail(bp, bnapi) <
5576 (skb_shinfo(skb)->nr_frags + 1))) {
5577 netif_stop_queue(dev);
5578 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5581 return NETDEV_TX_BUSY;
5583 len = skb_headlen(skb);
5585 ring_prod = TX_RING_IDX(prod);
5588 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5589 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5592 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5594 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5596 if ((mss = skb_shinfo(skb)->gso_size)) {
5597 u32 tcp_opt_len, ip_tcp_len;
5600 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5602 tcp_opt_len = tcp_optlen(skb);
5604 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5605 u32 tcp_off = skb_transport_offset(skb) -
5606 sizeof(struct ipv6hdr) - ETH_HLEN;
5608 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5609 TX_BD_FLAGS_SW_FLAGS;
5610 if (likely(tcp_off == 0))
5611 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5614 vlan_tag_flags |= ((tcp_off & 0x3) <<
5615 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5616 ((tcp_off & 0x10) <<
5617 TX_BD_FLAGS_TCP6_OFF4_SHL);
5618 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5621 if (skb_header_cloned(skb) &&
5622 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5624 return NETDEV_TX_OK;
5627 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5631 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5632 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5636 if (tcp_opt_len || (iph->ihl > 5)) {
5637 vlan_tag_flags |= ((iph->ihl - 5) +
5638 (tcp_opt_len >> 2)) << 8;
5644 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5646 tx_buf = &bp->tx_buf_ring[ring_prod];
5648 pci_unmap_addr_set(tx_buf, mapping, mapping);
5650 txbd = &bp->tx_desc_ring[ring_prod];
5652 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5653 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5654 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5655 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5657 last_frag = skb_shinfo(skb)->nr_frags;
5659 for (i = 0; i < last_frag; i++) {
5660 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5662 prod = NEXT_TX_BD(prod);
5663 ring_prod = TX_RING_IDX(prod);
5664 txbd = &bp->tx_desc_ring[ring_prod];
5667 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5668 len, PCI_DMA_TODEVICE);
5669 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5672 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5673 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5674 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5675 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5678 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5680 prod = NEXT_TX_BD(prod);
5681 bp->tx_prod_bseq += skb->len;
5683 REG_WR16(bp, bp->tx_bidx_addr, prod);
5684 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5689 dev->trans_start = jiffies;
5691 if (unlikely(bnx2_tx_avail(bp, bnapi) <= MAX_SKB_FRAGS)) {
5692 netif_stop_queue(dev);
5693 if (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)
5694 netif_wake_queue(dev);
5697 return NETDEV_TX_OK;
5700 /* Called with rtnl_lock */
5702 bnx2_close(struct net_device *dev)
5704 struct bnx2 *bp = netdev_priv(dev);
5707 /* Calling flush_scheduled_work() may deadlock because
5708 * linkwatch_event() may be on the workqueue and it will try to get
5709 * the rtnl_lock which we are holding.
5711 while (bp->in_reset_task)
5714 bnx2_disable_int_sync(bp);
5715 bnx2_napi_disable(bp);
5716 del_timer_sync(&bp->timer);
5717 if (bp->flags & NO_WOL_FLAG)
5718 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5720 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5722 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5723 bnx2_reset_chip(bp, reset_code);
5728 netif_carrier_off(bp->dev);
5729 bnx2_set_power_state(bp, PCI_D3hot);
5733 #define GET_NET_STATS64(ctr) \
5734 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5735 (unsigned long) (ctr##_lo)
5737 #define GET_NET_STATS32(ctr) \
5740 #if (BITS_PER_LONG == 64)
5741 #define GET_NET_STATS GET_NET_STATS64
5743 #define GET_NET_STATS GET_NET_STATS32
5746 static struct net_device_stats *
5747 bnx2_get_stats(struct net_device *dev)
5749 struct bnx2 *bp = netdev_priv(dev);
5750 struct statistics_block *stats_blk = bp->stats_blk;
5751 struct net_device_stats *net_stats = &bp->net_stats;
5753 if (bp->stats_blk == NULL) {
5756 net_stats->rx_packets =
5757 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5758 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5759 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5761 net_stats->tx_packets =
5762 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5763 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5764 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5766 net_stats->rx_bytes =
5767 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5769 net_stats->tx_bytes =
5770 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5772 net_stats->multicast =
5773 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5775 net_stats->collisions =
5776 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5778 net_stats->rx_length_errors =
5779 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5780 stats_blk->stat_EtherStatsOverrsizePkts);
5782 net_stats->rx_over_errors =
5783 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5785 net_stats->rx_frame_errors =
5786 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5788 net_stats->rx_crc_errors =
5789 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5791 net_stats->rx_errors = net_stats->rx_length_errors +
5792 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5793 net_stats->rx_crc_errors;
5795 net_stats->tx_aborted_errors =
5796 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5797 stats_blk->stat_Dot3StatsLateCollisions);
5799 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5800 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5801 net_stats->tx_carrier_errors = 0;
5803 net_stats->tx_carrier_errors =
5805 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5808 net_stats->tx_errors =
5810 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5812 net_stats->tx_aborted_errors +
5813 net_stats->tx_carrier_errors;
5815 net_stats->rx_missed_errors =
5816 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5817 stats_blk->stat_FwRxDrop);
5822 /* All ethtool functions called with rtnl_lock */
5825 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5827 struct bnx2 *bp = netdev_priv(dev);
5828 int support_serdes = 0, support_copper = 0;
5830 cmd->supported = SUPPORTED_Autoneg;
5831 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5834 } else if (bp->phy_port == PORT_FIBRE)
5839 if (support_serdes) {
5840 cmd->supported |= SUPPORTED_1000baseT_Full |
5842 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5843 cmd->supported |= SUPPORTED_2500baseX_Full;
5846 if (support_copper) {
5847 cmd->supported |= SUPPORTED_10baseT_Half |
5848 SUPPORTED_10baseT_Full |
5849 SUPPORTED_100baseT_Half |
5850 SUPPORTED_100baseT_Full |
5851 SUPPORTED_1000baseT_Full |
5856 spin_lock_bh(&bp->phy_lock);
5857 cmd->port = bp->phy_port;
5858 cmd->advertising = bp->advertising;
5860 if (bp->autoneg & AUTONEG_SPEED) {
5861 cmd->autoneg = AUTONEG_ENABLE;
5864 cmd->autoneg = AUTONEG_DISABLE;
5867 if (netif_carrier_ok(dev)) {
5868 cmd->speed = bp->line_speed;
5869 cmd->duplex = bp->duplex;
5875 spin_unlock_bh(&bp->phy_lock);
5877 cmd->transceiver = XCVR_INTERNAL;
5878 cmd->phy_address = bp->phy_addr;
5884 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5886 struct bnx2 *bp = netdev_priv(dev);
5887 u8 autoneg = bp->autoneg;
5888 u8 req_duplex = bp->req_duplex;
5889 u16 req_line_speed = bp->req_line_speed;
5890 u32 advertising = bp->advertising;
5893 spin_lock_bh(&bp->phy_lock);
5895 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5896 goto err_out_unlock;
5898 if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5899 goto err_out_unlock;
5901 if (cmd->autoneg == AUTONEG_ENABLE) {
5902 autoneg |= AUTONEG_SPEED;
5904 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5906 /* allow advertising 1 speed */
5907 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5908 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5909 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5910 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5912 if (cmd->port == PORT_FIBRE)
5913 goto err_out_unlock;
5915 advertising = cmd->advertising;
5917 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5918 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5919 (cmd->port == PORT_TP))
5920 goto err_out_unlock;
5921 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
5922 advertising = cmd->advertising;
5923 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5924 goto err_out_unlock;
5926 if (cmd->port == PORT_FIBRE)
5927 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5929 advertising = ETHTOOL_ALL_COPPER_SPEED;
5931 advertising |= ADVERTISED_Autoneg;
5934 if (cmd->port == PORT_FIBRE) {
5935 if ((cmd->speed != SPEED_1000 &&
5936 cmd->speed != SPEED_2500) ||
5937 (cmd->duplex != DUPLEX_FULL))
5938 goto err_out_unlock;
5940 if (cmd->speed == SPEED_2500 &&
5941 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5942 goto err_out_unlock;
5944 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5945 goto err_out_unlock;
5947 autoneg &= ~AUTONEG_SPEED;
5948 req_line_speed = cmd->speed;
5949 req_duplex = cmd->duplex;
5953 bp->autoneg = autoneg;
5954 bp->advertising = advertising;
5955 bp->req_line_speed = req_line_speed;
5956 bp->req_duplex = req_duplex;
5958 err = bnx2_setup_phy(bp, cmd->port);
5961 spin_unlock_bh(&bp->phy_lock);
5967 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5969 struct bnx2 *bp = netdev_priv(dev);
5971 strcpy(info->driver, DRV_MODULE_NAME);
5972 strcpy(info->version, DRV_MODULE_VERSION);
5973 strcpy(info->bus_info, pci_name(bp->pdev));
5974 strcpy(info->fw_version, bp->fw_version);
5977 #define BNX2_REGDUMP_LEN (32 * 1024)
5980 bnx2_get_regs_len(struct net_device *dev)
5982 return BNX2_REGDUMP_LEN;
5986 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5988 u32 *p = _p, i, offset;
5990 struct bnx2 *bp = netdev_priv(dev);
5991 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5992 0x0800, 0x0880, 0x0c00, 0x0c10,
5993 0x0c30, 0x0d08, 0x1000, 0x101c,
5994 0x1040, 0x1048, 0x1080, 0x10a4,
5995 0x1400, 0x1490, 0x1498, 0x14f0,
5996 0x1500, 0x155c, 0x1580, 0x15dc,
5997 0x1600, 0x1658, 0x1680, 0x16d8,
5998 0x1800, 0x1820, 0x1840, 0x1854,
5999 0x1880, 0x1894, 0x1900, 0x1984,
6000 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6001 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6002 0x2000, 0x2030, 0x23c0, 0x2400,
6003 0x2800, 0x2820, 0x2830, 0x2850,
6004 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6005 0x3c00, 0x3c94, 0x4000, 0x4010,
6006 0x4080, 0x4090, 0x43c0, 0x4458,
6007 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6008 0x4fc0, 0x5010, 0x53c0, 0x5444,
6009 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6010 0x5fc0, 0x6000, 0x6400, 0x6428,
6011 0x6800, 0x6848, 0x684c, 0x6860,
6012 0x6888, 0x6910, 0x8000 };
6016 memset(p, 0, BNX2_REGDUMP_LEN);
6018 if (!netif_running(bp->dev))
6022 offset = reg_boundaries[0];
6024 while (offset < BNX2_REGDUMP_LEN) {
6025 *p++ = REG_RD(bp, offset);
6027 if (offset == reg_boundaries[i + 1]) {
6028 offset = reg_boundaries[i + 2];
6029 p = (u32 *) (orig_p + offset);
6036 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6038 struct bnx2 *bp = netdev_priv(dev);
6040 if (bp->flags & NO_WOL_FLAG) {
6045 wol->supported = WAKE_MAGIC;
6047 wol->wolopts = WAKE_MAGIC;
6051 memset(&wol->sopass, 0, sizeof(wol->sopass));
6055 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6057 struct bnx2 *bp = netdev_priv(dev);
6059 if (wol->wolopts & ~WAKE_MAGIC)
6062 if (wol->wolopts & WAKE_MAGIC) {
6063 if (bp->flags & NO_WOL_FLAG)
6075 bnx2_nway_reset(struct net_device *dev)
6077 struct bnx2 *bp = netdev_priv(dev);
6080 if (!(bp->autoneg & AUTONEG_SPEED)) {
6084 spin_lock_bh(&bp->phy_lock);
6086 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
6089 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6090 spin_unlock_bh(&bp->phy_lock);
6094 /* Force a link down visible on the other side */
6095 if (bp->phy_flags & PHY_SERDES_FLAG) {
6096 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6097 spin_unlock_bh(&bp->phy_lock);
6101 spin_lock_bh(&bp->phy_lock);
6103 bp->current_interval = SERDES_AN_TIMEOUT;
6104 bp->serdes_an_pending = 1;
6105 mod_timer(&bp->timer, jiffies + bp->current_interval);
6108 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6109 bmcr &= ~BMCR_LOOPBACK;
6110 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6112 spin_unlock_bh(&bp->phy_lock);
6118 bnx2_get_eeprom_len(struct net_device *dev)
6120 struct bnx2 *bp = netdev_priv(dev);
6122 if (bp->flash_info == NULL)
6125 return (int) bp->flash_size;
6129 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6132 struct bnx2 *bp = netdev_priv(dev);
6135 /* parameters already validated in ethtool_get_eeprom */
6137 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6143 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6146 struct bnx2 *bp = netdev_priv(dev);
6149 /* parameters already validated in ethtool_set_eeprom */
6151 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6157 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6159 struct bnx2 *bp = netdev_priv(dev);
6161 memset(coal, 0, sizeof(struct ethtool_coalesce));
6163 coal->rx_coalesce_usecs = bp->rx_ticks;
6164 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6165 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6166 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6168 coal->tx_coalesce_usecs = bp->tx_ticks;
6169 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6170 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6171 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6173 coal->stats_block_coalesce_usecs = bp->stats_ticks;
6179 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6181 struct bnx2 *bp = netdev_priv(dev);
6183 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6184 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6186 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6187 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6189 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6190 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6192 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6193 if (bp->rx_quick_cons_trip_int > 0xff)
6194 bp->rx_quick_cons_trip_int = 0xff;
6196 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6197 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6199 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6200 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6202 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6203 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6205 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6206 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6209 bp->stats_ticks = coal->stats_block_coalesce_usecs;
6210 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6211 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6212 bp->stats_ticks = USEC_PER_SEC;
6214 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6215 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6216 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6218 if (netif_running(bp->dev)) {
6219 bnx2_netif_stop(bp);
6221 bnx2_netif_start(bp);
6228 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6230 struct bnx2 *bp = netdev_priv(dev);
6232 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6233 ering->rx_mini_max_pending = 0;
6234 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6236 ering->rx_pending = bp->rx_ring_size;
6237 ering->rx_mini_pending = 0;
6238 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6240 ering->tx_max_pending = MAX_TX_DESC_CNT;
6241 ering->tx_pending = bp->tx_ring_size;
6245 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6247 if (netif_running(bp->dev)) {
6248 bnx2_netif_stop(bp);
6249 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6254 bnx2_set_rx_ring_size(bp, rx);
6255 bp->tx_ring_size = tx;
6257 if (netif_running(bp->dev)) {
6260 rc = bnx2_alloc_mem(bp);
6264 bnx2_netif_start(bp);
6270 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6272 struct bnx2 *bp = netdev_priv(dev);
6275 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6276 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6277 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6281 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6286 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6288 struct bnx2 *bp = netdev_priv(dev);
6290 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6291 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6292 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6296 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6298 struct bnx2 *bp = netdev_priv(dev);
6300 bp->req_flow_ctrl = 0;
6301 if (epause->rx_pause)
6302 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6303 if (epause->tx_pause)
6304 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6306 if (epause->autoneg) {
6307 bp->autoneg |= AUTONEG_FLOW_CTRL;
6310 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6313 spin_lock_bh(&bp->phy_lock);
6315 bnx2_setup_phy(bp, bp->phy_port);
6317 spin_unlock_bh(&bp->phy_lock);
6323 bnx2_get_rx_csum(struct net_device *dev)
6325 struct bnx2 *bp = netdev_priv(dev);
6331 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6333 struct bnx2 *bp = netdev_priv(dev);
6340 bnx2_set_tso(struct net_device *dev, u32 data)
6342 struct bnx2 *bp = netdev_priv(dev);
6345 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6346 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6347 dev->features |= NETIF_F_TSO6;
6349 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6354 #define BNX2_NUM_STATS 46
6357 char string[ETH_GSTRING_LEN];
6358 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6360 { "rx_error_bytes" },
6362 { "tx_error_bytes" },
6363 { "rx_ucast_packets" },
6364 { "rx_mcast_packets" },
6365 { "rx_bcast_packets" },
6366 { "tx_ucast_packets" },
6367 { "tx_mcast_packets" },
6368 { "tx_bcast_packets" },
6369 { "tx_mac_errors" },
6370 { "tx_carrier_errors" },
6371 { "rx_crc_errors" },
6372 { "rx_align_errors" },
6373 { "tx_single_collisions" },
6374 { "tx_multi_collisions" },
6376 { "tx_excess_collisions" },
6377 { "tx_late_collisions" },
6378 { "tx_total_collisions" },
6381 { "rx_undersize_packets" },
6382 { "rx_oversize_packets" },
6383 { "rx_64_byte_packets" },
6384 { "rx_65_to_127_byte_packets" },
6385 { "rx_128_to_255_byte_packets" },
6386 { "rx_256_to_511_byte_packets" },
6387 { "rx_512_to_1023_byte_packets" },
6388 { "rx_1024_to_1522_byte_packets" },
6389 { "rx_1523_to_9022_byte_packets" },
6390 { "tx_64_byte_packets" },
6391 { "tx_65_to_127_byte_packets" },
6392 { "tx_128_to_255_byte_packets" },
6393 { "tx_256_to_511_byte_packets" },
6394 { "tx_512_to_1023_byte_packets" },
6395 { "tx_1024_to_1522_byte_packets" },
6396 { "tx_1523_to_9022_byte_packets" },
6397 { "rx_xon_frames" },
6398 { "rx_xoff_frames" },
6399 { "tx_xon_frames" },
6400 { "tx_xoff_frames" },
6401 { "rx_mac_ctrl_frames" },
6402 { "rx_filtered_packets" },
6404 { "rx_fw_discards" },
6407 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6409 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6410 STATS_OFFSET32(stat_IfHCInOctets_hi),
6411 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6412 STATS_OFFSET32(stat_IfHCOutOctets_hi),
6413 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6414 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6415 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6416 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6417 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6418 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6419 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6420 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6421 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6422 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6423 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6424 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6425 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6426 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6427 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6428 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6429 STATS_OFFSET32(stat_EtherStatsCollisions),
6430 STATS_OFFSET32(stat_EtherStatsFragments),
6431 STATS_OFFSET32(stat_EtherStatsJabbers),
6432 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6433 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6434 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6435 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6436 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6437 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6438 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6439 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6440 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6441 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6442 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6443 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6444 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6445 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6446 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6447 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6448 STATS_OFFSET32(stat_XonPauseFramesReceived),
6449 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6450 STATS_OFFSET32(stat_OutXonSent),
6451 STATS_OFFSET32(stat_OutXoffSent),
6452 STATS_OFFSET32(stat_MacControlFramesReceived),
6453 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6454 STATS_OFFSET32(stat_IfInMBUFDiscards),
6455 STATS_OFFSET32(stat_FwRxDrop),
6458 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6459 * skipped because of errata.
6461 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6462 8,0,8,8,8,8,8,8,8,8,
6463 4,0,4,4,4,4,4,4,4,4,
6464 4,4,4,4,4,4,4,4,4,4,
6465 4,4,4,4,4,4,4,4,4,4,
6469 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6470 8,0,8,8,8,8,8,8,8,8,
6471 4,4,4,4,4,4,4,4,4,4,
6472 4,4,4,4,4,4,4,4,4,4,
6473 4,4,4,4,4,4,4,4,4,4,
6477 #define BNX2_NUM_TESTS 6
6480 char string[ETH_GSTRING_LEN];
6481 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6482 { "register_test (offline)" },
6483 { "memory_test (offline)" },
6484 { "loopback_test (offline)" },
6485 { "nvram_test (online)" },
6486 { "interrupt_test (online)" },
6487 { "link_test (online)" },
6491 bnx2_get_sset_count(struct net_device *dev, int sset)
6495 return BNX2_NUM_TESTS;
6497 return BNX2_NUM_STATS;
6504 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6506 struct bnx2 *bp = netdev_priv(dev);
6508 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6509 if (etest->flags & ETH_TEST_FL_OFFLINE) {
6512 bnx2_netif_stop(bp);
6513 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6516 if (bnx2_test_registers(bp) != 0) {
6518 etest->flags |= ETH_TEST_FL_FAILED;
6520 if (bnx2_test_memory(bp) != 0) {
6522 etest->flags |= ETH_TEST_FL_FAILED;
6524 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6525 etest->flags |= ETH_TEST_FL_FAILED;
6527 if (!netif_running(bp->dev)) {
6528 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6532 bnx2_netif_start(bp);
6535 /* wait for link up */
6536 for (i = 0; i < 7; i++) {
6539 msleep_interruptible(1000);
6543 if (bnx2_test_nvram(bp) != 0) {
6545 etest->flags |= ETH_TEST_FL_FAILED;
6547 if (bnx2_test_intr(bp) != 0) {
6549 etest->flags |= ETH_TEST_FL_FAILED;
6552 if (bnx2_test_link(bp) != 0) {
6554 etest->flags |= ETH_TEST_FL_FAILED;
6560 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6562 switch (stringset) {
6564 memcpy(buf, bnx2_stats_str_arr,
6565 sizeof(bnx2_stats_str_arr));
6568 memcpy(buf, bnx2_tests_str_arr,
6569 sizeof(bnx2_tests_str_arr));
6575 bnx2_get_ethtool_stats(struct net_device *dev,
6576 struct ethtool_stats *stats, u64 *buf)
6578 struct bnx2 *bp = netdev_priv(dev);
6580 u32 *hw_stats = (u32 *) bp->stats_blk;
6581 u8 *stats_len_arr = NULL;
6583 if (hw_stats == NULL) {
6584 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6588 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6589 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6590 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6591 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6592 stats_len_arr = bnx2_5706_stats_len_arr;
6594 stats_len_arr = bnx2_5708_stats_len_arr;
6596 for (i = 0; i < BNX2_NUM_STATS; i++) {
6597 if (stats_len_arr[i] == 0) {
6598 /* skip this counter */
6602 if (stats_len_arr[i] == 4) {
6603 /* 4-byte counter */
6605 *(hw_stats + bnx2_stats_offset_arr[i]);
6608 /* 8-byte counter */
6609 buf[i] = (((u64) *(hw_stats +
6610 bnx2_stats_offset_arr[i])) << 32) +
6611 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6616 bnx2_phys_id(struct net_device *dev, u32 data)
6618 struct bnx2 *bp = netdev_priv(dev);
6625 save = REG_RD(bp, BNX2_MISC_CFG);
6626 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6628 for (i = 0; i < (data * 2); i++) {
6630 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6633 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6634 BNX2_EMAC_LED_1000MB_OVERRIDE |
6635 BNX2_EMAC_LED_100MB_OVERRIDE |
6636 BNX2_EMAC_LED_10MB_OVERRIDE |
6637 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6638 BNX2_EMAC_LED_TRAFFIC);
6640 msleep_interruptible(500);
6641 if (signal_pending(current))
6644 REG_WR(bp, BNX2_EMAC_LED, 0);
6645 REG_WR(bp, BNX2_MISC_CFG, save);
6650 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6652 struct bnx2 *bp = netdev_priv(dev);
6654 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6655 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6657 return (ethtool_op_set_tx_csum(dev, data));
6660 static const struct ethtool_ops bnx2_ethtool_ops = {
6661 .get_settings = bnx2_get_settings,
6662 .set_settings = bnx2_set_settings,
6663 .get_drvinfo = bnx2_get_drvinfo,
6664 .get_regs_len = bnx2_get_regs_len,
6665 .get_regs = bnx2_get_regs,
6666 .get_wol = bnx2_get_wol,
6667 .set_wol = bnx2_set_wol,
6668 .nway_reset = bnx2_nway_reset,
6669 .get_link = ethtool_op_get_link,
6670 .get_eeprom_len = bnx2_get_eeprom_len,
6671 .get_eeprom = bnx2_get_eeprom,
6672 .set_eeprom = bnx2_set_eeprom,
6673 .get_coalesce = bnx2_get_coalesce,
6674 .set_coalesce = bnx2_set_coalesce,
6675 .get_ringparam = bnx2_get_ringparam,
6676 .set_ringparam = bnx2_set_ringparam,
6677 .get_pauseparam = bnx2_get_pauseparam,
6678 .set_pauseparam = bnx2_set_pauseparam,
6679 .get_rx_csum = bnx2_get_rx_csum,
6680 .set_rx_csum = bnx2_set_rx_csum,
6681 .set_tx_csum = bnx2_set_tx_csum,
6682 .set_sg = ethtool_op_set_sg,
6683 .set_tso = bnx2_set_tso,
6684 .self_test = bnx2_self_test,
6685 .get_strings = bnx2_get_strings,
6686 .phys_id = bnx2_phys_id,
6687 .get_ethtool_stats = bnx2_get_ethtool_stats,
6688 .get_sset_count = bnx2_get_sset_count,
6691 /* Called with rtnl_lock */
6693 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6695 struct mii_ioctl_data *data = if_mii(ifr);
6696 struct bnx2 *bp = netdev_priv(dev);
6701 data->phy_id = bp->phy_addr;
6707 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6710 if (!netif_running(dev))
6713 spin_lock_bh(&bp->phy_lock);
6714 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6715 spin_unlock_bh(&bp->phy_lock);
6717 data->val_out = mii_regval;
6723 if (!capable(CAP_NET_ADMIN))
6726 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6729 if (!netif_running(dev))
6732 spin_lock_bh(&bp->phy_lock);
6733 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6734 spin_unlock_bh(&bp->phy_lock);
6745 /* Called with rtnl_lock */
6747 bnx2_change_mac_addr(struct net_device *dev, void *p)
6749 struct sockaddr *addr = p;
6750 struct bnx2 *bp = netdev_priv(dev);
6752 if (!is_valid_ether_addr(addr->sa_data))
6755 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6756 if (netif_running(dev))
6757 bnx2_set_mac_addr(bp);
6762 /* Called with rtnl_lock */
6764 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6766 struct bnx2 *bp = netdev_priv(dev);
6768 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6769 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6773 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
6776 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6778 poll_bnx2(struct net_device *dev)
6780 struct bnx2 *bp = netdev_priv(dev);
6782 disable_irq(bp->pdev->irq);
6783 bnx2_interrupt(bp->pdev->irq, dev);
6784 enable_irq(bp->pdev->irq);
6788 static void __devinit
6789 bnx2_get_5709_media(struct bnx2 *bp)
6791 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6792 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6795 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6797 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6798 bp->phy_flags |= PHY_SERDES_FLAG;
6802 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6803 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6805 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6807 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6812 bp->phy_flags |= PHY_SERDES_FLAG;
6820 bp->phy_flags |= PHY_SERDES_FLAG;
6826 static void __devinit
6827 bnx2_get_pci_speed(struct bnx2 *bp)
6831 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6832 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6835 bp->flags |= PCIX_FLAG;
6837 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6839 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6841 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6842 bp->bus_speed_mhz = 133;
6845 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6846 bp->bus_speed_mhz = 100;
6849 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6850 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6851 bp->bus_speed_mhz = 66;
6854 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6855 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6856 bp->bus_speed_mhz = 50;
6859 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6860 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6861 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6862 bp->bus_speed_mhz = 33;
6867 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6868 bp->bus_speed_mhz = 66;
6870 bp->bus_speed_mhz = 33;
6873 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6874 bp->flags |= PCI_32BIT_FLAG;
6878 static int __devinit
6879 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6882 unsigned long mem_len;
6885 u64 dma_mask, persist_dma_mask;
6887 SET_NETDEV_DEV(dev, &pdev->dev);
6888 bp = netdev_priv(dev);
6893 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6894 rc = pci_enable_device(pdev);
6896 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
6900 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6902 "Cannot find PCI device base address, aborting.\n");
6904 goto err_out_disable;
6907 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6909 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
6910 goto err_out_disable;
6913 pci_set_master(pdev);
6915 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6916 if (bp->pm_cap == 0) {
6918 "Cannot find power management capability, aborting.\n");
6920 goto err_out_release;
6926 spin_lock_init(&bp->phy_lock);
6927 spin_lock_init(&bp->indirect_lock);
6928 INIT_WORK(&bp->reset_task, bnx2_reset_task);
6930 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
6931 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
6932 dev->mem_end = dev->mem_start + mem_len;
6933 dev->irq = pdev->irq;
6935 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6938 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
6940 goto err_out_release;
6943 /* Configure byte swap and enable write to the reg_window registers.
6944 * Rely on CPU to do target byte swapping on big endian systems
6945 * The chip's target access swapping will not swap all accesses
6947 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6948 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6949 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6951 bnx2_set_power_state(bp, PCI_D0);
6953 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6955 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6956 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6958 "Cannot find PCIE capability, aborting.\n");
6962 bp->flags |= PCIE_FLAG;
6964 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6965 if (bp->pcix_cap == 0) {
6967 "Cannot find PCIX capability, aborting.\n");
6973 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
6974 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
6975 bp->flags |= MSIX_CAP_FLAG;
6978 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6979 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6980 bp->flags |= MSI_CAP_FLAG;
6983 /* 5708 cannot support DMA addresses > 40-bit. */
6984 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6985 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6987 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6989 /* Configure DMA attributes. */
6990 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6991 dev->features |= NETIF_F_HIGHDMA;
6992 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6995 "pci_set_consistent_dma_mask failed, aborting.\n");
6998 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6999 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7003 if (!(bp->flags & PCIE_FLAG))
7004 bnx2_get_pci_speed(bp);
7006 /* 5706A0 may falsely detect SERR and PERR. */
7007 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7008 reg = REG_RD(bp, PCI_COMMAND);
7009 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7010 REG_WR(bp, PCI_COMMAND, reg);
7012 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7013 !(bp->flags & PCIX_FLAG)) {
7016 "5706 A1 can only be used in a PCIX bus, aborting.\n");
7020 bnx2_init_nvram(bp);
7022 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
7024 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7025 BNX2_SHM_HDR_SIGNATURE_SIG) {
7026 u32 off = PCI_FUNC(pdev->devfn) << 2;
7028 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
7030 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7032 /* Get the permanent MAC address. First we need to make sure the
7033 * firmware is actually running.
7035 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
7037 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7038 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7039 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7044 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
7045 for (i = 0, j = 0; i < 3; i++) {
7048 num = (u8) (reg >> (24 - (i * 8)));
7049 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7050 if (num >= k || !skip0 || k == 1) {
7051 bp->fw_version[j++] = (num / k) + '0';
7056 bp->fw_version[j++] = '.';
7058 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE);
7059 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7062 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7063 bp->flags |= ASF_ENABLE_FLAG;
7065 for (i = 0; i < 30; i++) {
7066 reg = REG_RD_IND(bp, bp->shmem_base +
7067 BNX2_BC_STATE_CONDITION);
7068 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7073 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
7074 reg &= BNX2_CONDITION_MFW_RUN_MASK;
7075 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7076 reg != BNX2_CONDITION_MFW_RUN_NONE) {
7078 u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
7080 bp->fw_version[j++] = ' ';
7081 for (i = 0; i < 3; i++) {
7082 reg = REG_RD_IND(bp, addr + i * 4);
7084 memcpy(&bp->fw_version[j], ®, 4);
7089 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
7090 bp->mac_addr[0] = (u8) (reg >> 8);
7091 bp->mac_addr[1] = (u8) reg;
7093 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
7094 bp->mac_addr[2] = (u8) (reg >> 24);
7095 bp->mac_addr[3] = (u8) (reg >> 16);
7096 bp->mac_addr[4] = (u8) (reg >> 8);
7097 bp->mac_addr[5] = (u8) reg;
7099 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
7101 bp->tx_ring_size = MAX_TX_DESC_CNT;
7102 bnx2_set_rx_ring_size(bp, 255);
7106 bp->tx_quick_cons_trip_int = 20;
7107 bp->tx_quick_cons_trip = 20;
7108 bp->tx_ticks_int = 80;
7111 bp->rx_quick_cons_trip_int = 6;
7112 bp->rx_quick_cons_trip = 6;
7113 bp->rx_ticks_int = 18;
7116 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7118 bp->timer_interval = HZ;
7119 bp->current_interval = HZ;
7123 /* Disable WOL support if we are running on a SERDES chip. */
7124 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7125 bnx2_get_5709_media(bp);
7126 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7127 bp->phy_flags |= PHY_SERDES_FLAG;
7129 bp->phy_port = PORT_TP;
7130 if (bp->phy_flags & PHY_SERDES_FLAG) {
7131 bp->phy_port = PORT_FIBRE;
7132 reg = REG_RD_IND(bp, bp->shmem_base +
7133 BNX2_SHARED_HW_CFG_CONFIG);
7134 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7135 bp->flags |= NO_WOL_FLAG;
7138 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
7140 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7141 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
7143 bnx2_init_remote_phy(bp);
7145 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7146 CHIP_NUM(bp) == CHIP_NUM_5708)
7147 bp->phy_flags |= PHY_CRC_FIX_FLAG;
7148 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7149 (CHIP_REV(bp) == CHIP_REV_Ax ||
7150 CHIP_REV(bp) == CHIP_REV_Bx))
7151 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
7153 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7154 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7155 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
7156 bp->flags |= NO_WOL_FLAG;
7160 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7161 bp->tx_quick_cons_trip_int =
7162 bp->tx_quick_cons_trip;
7163 bp->tx_ticks_int = bp->tx_ticks;
7164 bp->rx_quick_cons_trip_int =
7165 bp->rx_quick_cons_trip;
7166 bp->rx_ticks_int = bp->rx_ticks;
7167 bp->comp_prod_trip_int = bp->comp_prod_trip;
7168 bp->com_ticks_int = bp->com_ticks;
7169 bp->cmd_ticks_int = bp->cmd_ticks;
7172 /* Disable MSI on 5706 if AMD 8132 bridge is found.
7174 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
7175 * with byte enables disabled on the unused 32-bit word. This is legal
7176 * but causes problems on the AMD 8132 which will eventually stop
7177 * responding after a while.
7179 * AMD believes this incompatibility is unique to the 5706, and
7180 * prefers to locally disable MSI rather than globally disabling it.
7182 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7183 struct pci_dev *amd_8132 = NULL;
7185 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7186 PCI_DEVICE_ID_AMD_8132_BRIDGE,
7189 if (amd_8132->revision >= 0x10 &&
7190 amd_8132->revision <= 0x13) {
7192 pci_dev_put(amd_8132);
7198 bnx2_set_default_link(bp);
7199 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7201 init_timer(&bp->timer);
7202 bp->timer.expires = RUN_AT(bp->timer_interval);
7203 bp->timer.data = (unsigned long) bp;
7204 bp->timer.function = bnx2_timer;
7210 iounmap(bp->regview);
7215 pci_release_regions(pdev);
7218 pci_disable_device(pdev);
7219 pci_set_drvdata(pdev, NULL);
7225 static char * __devinit
7226 bnx2_bus_string(struct bnx2 *bp, char *str)
7230 if (bp->flags & PCIE_FLAG) {
7231 s += sprintf(s, "PCI Express");
7233 s += sprintf(s, "PCI");
7234 if (bp->flags & PCIX_FLAG)
7235 s += sprintf(s, "-X");
7236 if (bp->flags & PCI_32BIT_FLAG)
7237 s += sprintf(s, " 32-bit");
7239 s += sprintf(s, " 64-bit");
7240 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7245 static int __devinit
7246 bnx2_init_napi(struct bnx2 *bp)
7249 struct bnx2_napi *bnapi;
7251 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7252 bnapi = &bp->bnx2_napi[i];
7255 netif_napi_add(bp->dev, &bp->bnx2_napi[0].napi, bnx2_poll, 64);
7258 static int __devinit
7259 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7261 static int version_printed = 0;
7262 struct net_device *dev = NULL;
7266 DECLARE_MAC_BUF(mac);
7268 if (version_printed++ == 0)
7269 printk(KERN_INFO "%s", version);
7271 /* dev zeroed in init_etherdev */
7272 dev = alloc_etherdev(sizeof(*bp));
7277 rc = bnx2_init_board(pdev, dev);
7283 dev->open = bnx2_open;
7284 dev->hard_start_xmit = bnx2_start_xmit;
7285 dev->stop = bnx2_close;
7286 dev->get_stats = bnx2_get_stats;
7287 dev->set_multicast_list = bnx2_set_rx_mode;
7288 dev->do_ioctl = bnx2_ioctl;
7289 dev->set_mac_address = bnx2_change_mac_addr;
7290 dev->change_mtu = bnx2_change_mtu;
7291 dev->tx_timeout = bnx2_tx_timeout;
7292 dev->watchdog_timeo = TX_TIMEOUT;
7294 dev->vlan_rx_register = bnx2_vlan_rx_register;
7296 dev->ethtool_ops = &bnx2_ethtool_ops;
7298 bp = netdev_priv(dev);
7301 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7302 dev->poll_controller = poll_bnx2;
7305 pci_set_drvdata(pdev, dev);
7307 memcpy(dev->dev_addr, bp->mac_addr, 6);
7308 memcpy(dev->perm_addr, bp->mac_addr, 6);
7309 bp->name = board_info[ent->driver_data].name;
7311 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7312 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7313 dev->features |= NETIF_F_IPV6_CSUM;
7316 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7318 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7319 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7320 dev->features |= NETIF_F_TSO6;
7322 if ((rc = register_netdev(dev))) {
7323 dev_err(&pdev->dev, "Cannot register net device\n");
7325 iounmap(bp->regview);
7326 pci_release_regions(pdev);
7327 pci_disable_device(pdev);
7328 pci_set_drvdata(pdev, NULL);
7333 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7334 "IRQ %d, node addr %s\n",
7337 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7338 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7339 bnx2_bus_string(bp, str),
7341 bp->pdev->irq, print_mac(mac, dev->dev_addr));
7346 static void __devexit
7347 bnx2_remove_one(struct pci_dev *pdev)
7349 struct net_device *dev = pci_get_drvdata(pdev);
7350 struct bnx2 *bp = netdev_priv(dev);
7352 flush_scheduled_work();
7354 unregister_netdev(dev);
7357 iounmap(bp->regview);
7360 pci_release_regions(pdev);
7361 pci_disable_device(pdev);
7362 pci_set_drvdata(pdev, NULL);
7366 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7368 struct net_device *dev = pci_get_drvdata(pdev);
7369 struct bnx2 *bp = netdev_priv(dev);
7372 /* PCI register 4 needs to be saved whether netif_running() or not.
7373 * MSI address and data need to be saved if using MSI and
7376 pci_save_state(pdev);
7377 if (!netif_running(dev))
7380 flush_scheduled_work();
7381 bnx2_netif_stop(bp);
7382 netif_device_detach(dev);
7383 del_timer_sync(&bp->timer);
7384 if (bp->flags & NO_WOL_FLAG)
7385 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
7387 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7389 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7390 bnx2_reset_chip(bp, reset_code);
7392 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7397 bnx2_resume(struct pci_dev *pdev)
7399 struct net_device *dev = pci_get_drvdata(pdev);
7400 struct bnx2 *bp = netdev_priv(dev);
7402 pci_restore_state(pdev);
7403 if (!netif_running(dev))
7406 bnx2_set_power_state(bp, PCI_D0);
7407 netif_device_attach(dev);
7409 bnx2_netif_start(bp);
7413 static struct pci_driver bnx2_pci_driver = {
7414 .name = DRV_MODULE_NAME,
7415 .id_table = bnx2_pci_tbl,
7416 .probe = bnx2_init_one,
7417 .remove = __devexit_p(bnx2_remove_one),
7418 .suspend = bnx2_suspend,
7419 .resume = bnx2_resume,
7422 static int __init bnx2_init(void)
7424 return pci_register_driver(&bnx2_pci_driver);
7427 static void __exit bnx2_cleanup(void)
7429 pci_unregister_driver(&bnx2_pci_driver);
7432 module_init(bnx2_init);
7433 module_exit(bnx2_cleanup);