1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2007 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
55 #define FW_BUF_SIZE 0x8000
57 #define DRV_MODULE_NAME "bnx2"
58 #define PFX DRV_MODULE_NAME ": "
59 #define DRV_MODULE_VERSION "1.6.6"
60 #define DRV_MODULE_RELDATE "October 2, 2007"
62 #define RUN_AT(x) (jiffies + (x))
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT (5*HZ)
67 static const char version[] __devinitdata =
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
75 static int disable_msi = 0;
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
92 /* indexed by board_t, above */
95 } board_info[] __devinitdata = {
96 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97 { "HP NC370T Multifunction Gigabit Server Adapter" },
98 { "HP NC370i Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100 { "HP NC370F Multifunction Gigabit Server Adapter" },
101 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
107 static struct pci_device_id bnx2_pci_tbl[] = {
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
129 static struct flash_spec flash_table[] =
131 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
134 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
138 /* Expansion entry 0001 */
139 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
143 /* Saifun SA25F010 (non-buffered flash) */
144 /* strap, cfg1, & write1 need updates */
145 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148 "Non-buffered flash (128kB)"},
149 /* Saifun SA25F020 (non-buffered flash) */
150 /* strap, cfg1, & write1 need updates */
151 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154 "Non-buffered flash (256kB)"},
155 /* Expansion entry 0100 */
156 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
160 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170 /* Saifun SA25F005 (non-buffered flash) */
171 /* strap, cfg1, & write1 need updates */
172 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175 "Non-buffered flash (64kB)"},
177 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
181 /* Expansion entry 1001 */
182 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186 /* Expansion entry 1010 */
187 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
191 /* ATMEL AT45DB011B (buffered flash) */
192 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195 "Buffered flash (128kB)"},
196 /* Expansion entry 1100 */
197 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201 /* Expansion entry 1101 */
202 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
206 /* Ateml Expansion entry 1110 */
207 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1110 (Atmel)"},
211 /* ATMEL AT45DB021B (buffered flash) */
212 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215 "Buffered flash (256kB)"},
218 static struct flash_spec flash_5709 = {
219 .flags = BNX2_NV_BUFFERED,
220 .page_bits = BCM5709_FLASH_PAGE_BITS,
221 .page_size = BCM5709_FLASH_PAGE_SIZE,
222 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
223 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
224 .name = "5709 Buffered flash (256kB)",
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
235 /* The ring uses 256 indices for 255 entries, one of them
236 * needs to be skipped.
238 diff = bp->tx_prod - bp->tx_cons;
239 if (unlikely(diff >= TX_DESC_CNT)) {
241 if (diff == TX_DESC_CNT)
242 diff = MAX_TX_DESC_CNT;
244 return (bp->tx_ring_size - diff);
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
252 spin_lock_bh(&bp->indirect_lock);
253 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255 spin_unlock_bh(&bp->indirect_lock);
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
262 spin_lock_bh(&bp->indirect_lock);
263 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265 spin_unlock_bh(&bp->indirect_lock);
269 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
272 spin_lock_bh(&bp->indirect_lock);
273 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
276 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
277 REG_WR(bp, BNX2_CTX_CTX_CTRL,
278 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
279 for (i = 0; i < 5; i++) {
281 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
282 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
287 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
288 REG_WR(bp, BNX2_CTX_DATA, val);
290 spin_unlock_bh(&bp->indirect_lock);
294 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
299 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
300 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
303 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
304 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
309 val1 = (bp->phy_addr << 21) | (reg << 16) |
310 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
311 BNX2_EMAC_MDIO_COMM_START_BUSY;
312 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
314 for (i = 0; i < 50; i++) {
317 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
318 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
321 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
328 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
337 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
338 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
341 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
342 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
351 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
356 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
357 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
360 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
366 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
367 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
368 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
369 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
371 for (i = 0; i < 50; i++) {
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
375 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
381 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
386 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
387 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
390 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
391 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
400 bnx2_disable_int(struct bnx2 *bp)
402 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
403 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
404 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
408 bnx2_enable_int(struct bnx2 *bp)
410 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
411 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
412 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
414 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
415 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
417 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
421 bnx2_disable_int_sync(struct bnx2 *bp)
423 atomic_inc(&bp->intr_sem);
424 bnx2_disable_int(bp);
425 synchronize_irq(bp->pdev->irq);
429 bnx2_netif_stop(struct bnx2 *bp)
431 bnx2_disable_int_sync(bp);
432 if (netif_running(bp->dev)) {
433 napi_disable(&bp->napi);
434 netif_tx_disable(bp->dev);
435 bp->dev->trans_start = jiffies; /* prevent tx timeout */
440 bnx2_netif_start(struct bnx2 *bp)
442 if (atomic_dec_and_test(&bp->intr_sem)) {
443 if (netif_running(bp->dev)) {
444 netif_wake_queue(bp->dev);
445 napi_enable(&bp->napi);
452 bnx2_free_mem(struct bnx2 *bp)
456 for (i = 0; i < bp->ctx_pages; i++) {
457 if (bp->ctx_blk[i]) {
458 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
460 bp->ctx_blk_mapping[i]);
461 bp->ctx_blk[i] = NULL;
464 if (bp->status_blk) {
465 pci_free_consistent(bp->pdev, bp->status_stats_size,
466 bp->status_blk, bp->status_blk_mapping);
467 bp->status_blk = NULL;
468 bp->stats_blk = NULL;
470 if (bp->tx_desc_ring) {
471 pci_free_consistent(bp->pdev,
472 sizeof(struct tx_bd) * TX_DESC_CNT,
473 bp->tx_desc_ring, bp->tx_desc_mapping);
474 bp->tx_desc_ring = NULL;
476 kfree(bp->tx_buf_ring);
477 bp->tx_buf_ring = NULL;
478 for (i = 0; i < bp->rx_max_ring; i++) {
479 if (bp->rx_desc_ring[i])
480 pci_free_consistent(bp->pdev,
481 sizeof(struct rx_bd) * RX_DESC_CNT,
483 bp->rx_desc_mapping[i]);
484 bp->rx_desc_ring[i] = NULL;
486 vfree(bp->rx_buf_ring);
487 bp->rx_buf_ring = NULL;
491 bnx2_alloc_mem(struct bnx2 *bp)
493 int i, status_blk_size;
495 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
497 if (bp->tx_buf_ring == NULL)
500 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
501 sizeof(struct tx_bd) *
503 &bp->tx_desc_mapping);
504 if (bp->tx_desc_ring == NULL)
507 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
509 if (bp->rx_buf_ring == NULL)
512 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
515 for (i = 0; i < bp->rx_max_ring; i++) {
516 bp->rx_desc_ring[i] =
517 pci_alloc_consistent(bp->pdev,
518 sizeof(struct rx_bd) * RX_DESC_CNT,
519 &bp->rx_desc_mapping[i]);
520 if (bp->rx_desc_ring[i] == NULL)
525 /* Combine status and statistics blocks into one allocation. */
526 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
527 bp->status_stats_size = status_blk_size +
528 sizeof(struct statistics_block);
530 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
531 &bp->status_blk_mapping);
532 if (bp->status_blk == NULL)
535 memset(bp->status_blk, 0, bp->status_stats_size);
537 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
540 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
542 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
543 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
544 if (bp->ctx_pages == 0)
546 for (i = 0; i < bp->ctx_pages; i++) {
547 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
549 &bp->ctx_blk_mapping[i]);
550 if (bp->ctx_blk[i] == NULL)
562 bnx2_report_fw_link(struct bnx2 *bp)
564 u32 fw_link_status = 0;
566 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
572 switch (bp->line_speed) {
574 if (bp->duplex == DUPLEX_HALF)
575 fw_link_status = BNX2_LINK_STATUS_10HALF;
577 fw_link_status = BNX2_LINK_STATUS_10FULL;
580 if (bp->duplex == DUPLEX_HALF)
581 fw_link_status = BNX2_LINK_STATUS_100HALF;
583 fw_link_status = BNX2_LINK_STATUS_100FULL;
586 if (bp->duplex == DUPLEX_HALF)
587 fw_link_status = BNX2_LINK_STATUS_1000HALF;
589 fw_link_status = BNX2_LINK_STATUS_1000FULL;
592 if (bp->duplex == DUPLEX_HALF)
593 fw_link_status = BNX2_LINK_STATUS_2500HALF;
595 fw_link_status = BNX2_LINK_STATUS_2500FULL;
599 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
602 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
604 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
605 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
607 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
608 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
609 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
611 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
615 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
617 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
621 bnx2_xceiver_str(struct bnx2 *bp)
623 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
624 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
629 bnx2_report_link(struct bnx2 *bp)
632 netif_carrier_on(bp->dev);
633 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
634 bnx2_xceiver_str(bp));
636 printk("%d Mbps ", bp->line_speed);
638 if (bp->duplex == DUPLEX_FULL)
639 printk("full duplex");
641 printk("half duplex");
644 if (bp->flow_ctrl & FLOW_CTRL_RX) {
645 printk(", receive ");
646 if (bp->flow_ctrl & FLOW_CTRL_TX)
647 printk("& transmit ");
650 printk(", transmit ");
652 printk("flow control ON");
657 netif_carrier_off(bp->dev);
658 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
659 bnx2_xceiver_str(bp));
662 bnx2_report_fw_link(bp);
666 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
668 u32 local_adv, remote_adv;
671 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
672 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
674 if (bp->duplex == DUPLEX_FULL) {
675 bp->flow_ctrl = bp->req_flow_ctrl;
680 if (bp->duplex != DUPLEX_FULL) {
684 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
685 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
688 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
689 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
690 bp->flow_ctrl |= FLOW_CTRL_TX;
691 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
692 bp->flow_ctrl |= FLOW_CTRL_RX;
696 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
697 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
699 if (bp->phy_flags & PHY_SERDES_FLAG) {
700 u32 new_local_adv = 0;
701 u32 new_remote_adv = 0;
703 if (local_adv & ADVERTISE_1000XPAUSE)
704 new_local_adv |= ADVERTISE_PAUSE_CAP;
705 if (local_adv & ADVERTISE_1000XPSE_ASYM)
706 new_local_adv |= ADVERTISE_PAUSE_ASYM;
707 if (remote_adv & ADVERTISE_1000XPAUSE)
708 new_remote_adv |= ADVERTISE_PAUSE_CAP;
709 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
710 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
712 local_adv = new_local_adv;
713 remote_adv = new_remote_adv;
716 /* See Table 28B-3 of 802.3ab-1999 spec. */
717 if (local_adv & ADVERTISE_PAUSE_CAP) {
718 if(local_adv & ADVERTISE_PAUSE_ASYM) {
719 if (remote_adv & ADVERTISE_PAUSE_CAP) {
720 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
722 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
723 bp->flow_ctrl = FLOW_CTRL_RX;
727 if (remote_adv & ADVERTISE_PAUSE_CAP) {
728 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
732 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
733 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
734 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
736 bp->flow_ctrl = FLOW_CTRL_TX;
742 bnx2_5709s_linkup(struct bnx2 *bp)
748 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
749 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
750 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
752 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
753 bp->line_speed = bp->req_line_speed;
754 bp->duplex = bp->req_duplex;
757 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
759 case MII_BNX2_GP_TOP_AN_SPEED_10:
760 bp->line_speed = SPEED_10;
762 case MII_BNX2_GP_TOP_AN_SPEED_100:
763 bp->line_speed = SPEED_100;
765 case MII_BNX2_GP_TOP_AN_SPEED_1G:
766 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
767 bp->line_speed = SPEED_1000;
769 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
770 bp->line_speed = SPEED_2500;
773 if (val & MII_BNX2_GP_TOP_AN_FD)
774 bp->duplex = DUPLEX_FULL;
776 bp->duplex = DUPLEX_HALF;
781 bnx2_5708s_linkup(struct bnx2 *bp)
786 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
787 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
788 case BCM5708S_1000X_STAT1_SPEED_10:
789 bp->line_speed = SPEED_10;
791 case BCM5708S_1000X_STAT1_SPEED_100:
792 bp->line_speed = SPEED_100;
794 case BCM5708S_1000X_STAT1_SPEED_1G:
795 bp->line_speed = SPEED_1000;
797 case BCM5708S_1000X_STAT1_SPEED_2G5:
798 bp->line_speed = SPEED_2500;
801 if (val & BCM5708S_1000X_STAT1_FD)
802 bp->duplex = DUPLEX_FULL;
804 bp->duplex = DUPLEX_HALF;
810 bnx2_5706s_linkup(struct bnx2 *bp)
812 u32 bmcr, local_adv, remote_adv, common;
815 bp->line_speed = SPEED_1000;
817 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
818 if (bmcr & BMCR_FULLDPLX) {
819 bp->duplex = DUPLEX_FULL;
822 bp->duplex = DUPLEX_HALF;
825 if (!(bmcr & BMCR_ANENABLE)) {
829 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
830 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
832 common = local_adv & remote_adv;
833 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
835 if (common & ADVERTISE_1000XFULL) {
836 bp->duplex = DUPLEX_FULL;
839 bp->duplex = DUPLEX_HALF;
847 bnx2_copper_linkup(struct bnx2 *bp)
851 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
852 if (bmcr & BMCR_ANENABLE) {
853 u32 local_adv, remote_adv, common;
855 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
856 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
858 common = local_adv & (remote_adv >> 2);
859 if (common & ADVERTISE_1000FULL) {
860 bp->line_speed = SPEED_1000;
861 bp->duplex = DUPLEX_FULL;
863 else if (common & ADVERTISE_1000HALF) {
864 bp->line_speed = SPEED_1000;
865 bp->duplex = DUPLEX_HALF;
868 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
869 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
871 common = local_adv & remote_adv;
872 if (common & ADVERTISE_100FULL) {
873 bp->line_speed = SPEED_100;
874 bp->duplex = DUPLEX_FULL;
876 else if (common & ADVERTISE_100HALF) {
877 bp->line_speed = SPEED_100;
878 bp->duplex = DUPLEX_HALF;
880 else if (common & ADVERTISE_10FULL) {
881 bp->line_speed = SPEED_10;
882 bp->duplex = DUPLEX_FULL;
884 else if (common & ADVERTISE_10HALF) {
885 bp->line_speed = SPEED_10;
886 bp->duplex = DUPLEX_HALF;
895 if (bmcr & BMCR_SPEED100) {
896 bp->line_speed = SPEED_100;
899 bp->line_speed = SPEED_10;
901 if (bmcr & BMCR_FULLDPLX) {
902 bp->duplex = DUPLEX_FULL;
905 bp->duplex = DUPLEX_HALF;
913 bnx2_set_mac_link(struct bnx2 *bp)
917 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
918 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
919 (bp->duplex == DUPLEX_HALF)) {
920 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
923 /* Configure the EMAC mode register. */
924 val = REG_RD(bp, BNX2_EMAC_MODE);
926 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
927 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
928 BNX2_EMAC_MODE_25G_MODE);
931 switch (bp->line_speed) {
933 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
934 val |= BNX2_EMAC_MODE_PORT_MII_10M;
939 val |= BNX2_EMAC_MODE_PORT_MII;
942 val |= BNX2_EMAC_MODE_25G_MODE;
945 val |= BNX2_EMAC_MODE_PORT_GMII;
950 val |= BNX2_EMAC_MODE_PORT_GMII;
953 /* Set the MAC to operate in the appropriate duplex mode. */
954 if (bp->duplex == DUPLEX_HALF)
955 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
956 REG_WR(bp, BNX2_EMAC_MODE, val);
958 /* Enable/disable rx PAUSE. */
959 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
961 if (bp->flow_ctrl & FLOW_CTRL_RX)
962 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
963 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
965 /* Enable/disable tx PAUSE. */
966 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
967 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
969 if (bp->flow_ctrl & FLOW_CTRL_TX)
970 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
971 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
973 /* Acknowledge the interrupt. */
974 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
980 bnx2_enable_bmsr1(struct bnx2 *bp)
982 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
983 (CHIP_NUM(bp) == CHIP_NUM_5709))
984 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
985 MII_BNX2_BLK_ADDR_GP_STATUS);
989 bnx2_disable_bmsr1(struct bnx2 *bp)
991 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
992 (CHIP_NUM(bp) == CHIP_NUM_5709))
993 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
994 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
998 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1003 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1006 if (bp->autoneg & AUTONEG_SPEED)
1007 bp->advertising |= ADVERTISED_2500baseX_Full;
1009 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1010 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1012 bnx2_read_phy(bp, bp->mii_up1, &up1);
1013 if (!(up1 & BCM5708S_UP1_2G5)) {
1014 up1 |= BCM5708S_UP1_2G5;
1015 bnx2_write_phy(bp, bp->mii_up1, up1);
1019 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1020 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1021 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1027 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1032 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1035 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1036 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1038 bnx2_read_phy(bp, bp->mii_up1, &up1);
1039 if (up1 & BCM5708S_UP1_2G5) {
1040 up1 &= ~BCM5708S_UP1_2G5;
1041 bnx2_write_phy(bp, bp->mii_up1, up1);
1045 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1046 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1047 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1053 bnx2_enable_forced_2g5(struct bnx2 *bp)
1057 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1060 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1063 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1064 MII_BNX2_BLK_ADDR_SERDES_DIG);
1065 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1066 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1067 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1068 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1070 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1071 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1072 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1074 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1075 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1076 bmcr |= BCM5708S_BMCR_FORCE_2500;
1079 if (bp->autoneg & AUTONEG_SPEED) {
1080 bmcr &= ~BMCR_ANENABLE;
1081 if (bp->req_duplex == DUPLEX_FULL)
1082 bmcr |= BMCR_FULLDPLX;
1084 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1088 bnx2_disable_forced_2g5(struct bnx2 *bp)
1092 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1095 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1098 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1099 MII_BNX2_BLK_ADDR_SERDES_DIG);
1100 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1101 val &= ~MII_BNX2_SD_MISC1_FORCE;
1102 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1104 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1105 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1106 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1108 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1109 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1110 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1113 if (bp->autoneg & AUTONEG_SPEED)
1114 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1115 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1119 bnx2_set_link(struct bnx2 *bp)
1124 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1129 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1132 link_up = bp->link_up;
1134 bnx2_enable_bmsr1(bp);
1135 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1136 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1137 bnx2_disable_bmsr1(bp);
1139 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1140 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1143 val = REG_RD(bp, BNX2_EMAC_STATUS);
1144 if (val & BNX2_EMAC_STATUS_LINK)
1145 bmsr |= BMSR_LSTATUS;
1147 bmsr &= ~BMSR_LSTATUS;
1150 if (bmsr & BMSR_LSTATUS) {
1153 if (bp->phy_flags & PHY_SERDES_FLAG) {
1154 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1155 bnx2_5706s_linkup(bp);
1156 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1157 bnx2_5708s_linkup(bp);
1158 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1159 bnx2_5709s_linkup(bp);
1162 bnx2_copper_linkup(bp);
1164 bnx2_resolve_flow_ctrl(bp);
1167 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1168 (bp->autoneg & AUTONEG_SPEED))
1169 bnx2_disable_forced_2g5(bp);
1171 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1175 if (bp->link_up != link_up) {
1176 bnx2_report_link(bp);
1179 bnx2_set_mac_link(bp);
1185 bnx2_reset_phy(struct bnx2 *bp)
1190 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1192 #define PHY_RESET_MAX_WAIT 100
1193 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1196 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1197 if (!(reg & BMCR_RESET)) {
1202 if (i == PHY_RESET_MAX_WAIT) {
1209 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1213 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1214 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1216 if (bp->phy_flags & PHY_SERDES_FLAG) {
1217 adv = ADVERTISE_1000XPAUSE;
1220 adv = ADVERTISE_PAUSE_CAP;
1223 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1224 if (bp->phy_flags & PHY_SERDES_FLAG) {
1225 adv = ADVERTISE_1000XPSE_ASYM;
1228 adv = ADVERTISE_PAUSE_ASYM;
1231 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1232 if (bp->phy_flags & PHY_SERDES_FLAG) {
1233 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1236 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1242 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1245 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1247 u32 speed_arg = 0, pause_adv;
1249 pause_adv = bnx2_phy_get_pause_adv(bp);
1251 if (bp->autoneg & AUTONEG_SPEED) {
1252 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1253 if (bp->advertising & ADVERTISED_10baseT_Half)
1254 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1255 if (bp->advertising & ADVERTISED_10baseT_Full)
1256 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1257 if (bp->advertising & ADVERTISED_100baseT_Half)
1258 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1259 if (bp->advertising & ADVERTISED_100baseT_Full)
1260 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1261 if (bp->advertising & ADVERTISED_1000baseT_Full)
1262 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1263 if (bp->advertising & ADVERTISED_2500baseX_Full)
1264 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1266 if (bp->req_line_speed == SPEED_2500)
1267 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1268 else if (bp->req_line_speed == SPEED_1000)
1269 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1270 else if (bp->req_line_speed == SPEED_100) {
1271 if (bp->req_duplex == DUPLEX_FULL)
1272 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1274 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1275 } else if (bp->req_line_speed == SPEED_10) {
1276 if (bp->req_duplex == DUPLEX_FULL)
1277 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1279 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1283 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1284 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1285 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1286 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1288 if (port == PORT_TP)
1289 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1290 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1292 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1294 spin_unlock_bh(&bp->phy_lock);
1295 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1296 spin_lock_bh(&bp->phy_lock);
1302 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1307 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1308 return (bnx2_setup_remote_phy(bp, port));
1310 if (!(bp->autoneg & AUTONEG_SPEED)) {
1312 int force_link_down = 0;
1314 if (bp->req_line_speed == SPEED_2500) {
1315 if (!bnx2_test_and_enable_2g5(bp))
1316 force_link_down = 1;
1317 } else if (bp->req_line_speed == SPEED_1000) {
1318 if (bnx2_test_and_disable_2g5(bp))
1319 force_link_down = 1;
1321 bnx2_read_phy(bp, bp->mii_adv, &adv);
1322 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1324 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1325 new_bmcr = bmcr & ~BMCR_ANENABLE;
1326 new_bmcr |= BMCR_SPEED1000;
1328 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1329 if (bp->req_line_speed == SPEED_2500)
1330 bnx2_enable_forced_2g5(bp);
1331 else if (bp->req_line_speed == SPEED_1000) {
1332 bnx2_disable_forced_2g5(bp);
1333 new_bmcr &= ~0x2000;
1336 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1337 if (bp->req_line_speed == SPEED_2500)
1338 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1340 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1343 if (bp->req_duplex == DUPLEX_FULL) {
1344 adv |= ADVERTISE_1000XFULL;
1345 new_bmcr |= BMCR_FULLDPLX;
1348 adv |= ADVERTISE_1000XHALF;
1349 new_bmcr &= ~BMCR_FULLDPLX;
1351 if ((new_bmcr != bmcr) || (force_link_down)) {
1352 /* Force a link down visible on the other side */
1354 bnx2_write_phy(bp, bp->mii_adv, adv &
1355 ~(ADVERTISE_1000XFULL |
1356 ADVERTISE_1000XHALF));
1357 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1358 BMCR_ANRESTART | BMCR_ANENABLE);
1361 netif_carrier_off(bp->dev);
1362 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1363 bnx2_report_link(bp);
1365 bnx2_write_phy(bp, bp->mii_adv, adv);
1366 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1368 bnx2_resolve_flow_ctrl(bp);
1369 bnx2_set_mac_link(bp);
1374 bnx2_test_and_enable_2g5(bp);
1376 if (bp->advertising & ADVERTISED_1000baseT_Full)
1377 new_adv |= ADVERTISE_1000XFULL;
1379 new_adv |= bnx2_phy_get_pause_adv(bp);
1381 bnx2_read_phy(bp, bp->mii_adv, &adv);
1382 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1384 bp->serdes_an_pending = 0;
1385 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1386 /* Force a link down visible on the other side */
1388 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1389 spin_unlock_bh(&bp->phy_lock);
1391 spin_lock_bh(&bp->phy_lock);
1394 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1395 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1397 /* Speed up link-up time when the link partner
1398 * does not autonegotiate which is very common
1399 * in blade servers. Some blade servers use
1400 * IPMI for kerboard input and it's important
1401 * to minimize link disruptions. Autoneg. involves
1402 * exchanging base pages plus 3 next pages and
1403 * normally completes in about 120 msec.
1405 bp->current_interval = SERDES_AN_TIMEOUT;
1406 bp->serdes_an_pending = 1;
1407 mod_timer(&bp->timer, jiffies + bp->current_interval);
1409 bnx2_resolve_flow_ctrl(bp);
1410 bnx2_set_mac_link(bp);
1416 #define ETHTOOL_ALL_FIBRE_SPEED \
1417 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1418 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1419 (ADVERTISED_1000baseT_Full)
1421 #define ETHTOOL_ALL_COPPER_SPEED \
1422 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1423 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1424 ADVERTISED_1000baseT_Full)
1426 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1427 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1429 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1432 bnx2_set_default_remote_link(struct bnx2 *bp)
1436 if (bp->phy_port == PORT_TP)
1437 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1439 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1441 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1442 bp->req_line_speed = 0;
1443 bp->autoneg |= AUTONEG_SPEED;
1444 bp->advertising = ADVERTISED_Autoneg;
1445 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1446 bp->advertising |= ADVERTISED_10baseT_Half;
1447 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1448 bp->advertising |= ADVERTISED_10baseT_Full;
1449 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1450 bp->advertising |= ADVERTISED_100baseT_Half;
1451 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1452 bp->advertising |= ADVERTISED_100baseT_Full;
1453 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1454 bp->advertising |= ADVERTISED_1000baseT_Full;
1455 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1456 bp->advertising |= ADVERTISED_2500baseX_Full;
1459 bp->advertising = 0;
1460 bp->req_duplex = DUPLEX_FULL;
1461 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1462 bp->req_line_speed = SPEED_10;
1463 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1464 bp->req_duplex = DUPLEX_HALF;
1466 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1467 bp->req_line_speed = SPEED_100;
1468 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1469 bp->req_duplex = DUPLEX_HALF;
1471 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1472 bp->req_line_speed = SPEED_1000;
1473 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1474 bp->req_line_speed = SPEED_2500;
1479 bnx2_set_default_link(struct bnx2 *bp)
1481 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1482 return bnx2_set_default_remote_link(bp);
1484 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1485 bp->req_line_speed = 0;
1486 if (bp->phy_flags & PHY_SERDES_FLAG) {
1489 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1491 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1492 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1493 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1495 bp->req_line_speed = bp->line_speed = SPEED_1000;
1496 bp->req_duplex = DUPLEX_FULL;
1499 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1503 bnx2_send_heart_beat(struct bnx2 *bp)
1508 spin_lock(&bp->indirect_lock);
1509 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1510 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1511 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1512 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1513 spin_unlock(&bp->indirect_lock);
1517 bnx2_remote_phy_event(struct bnx2 *bp)
1520 u8 link_up = bp->link_up;
1523 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1525 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1526 bnx2_send_heart_beat(bp);
1528 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1530 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1536 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1537 bp->duplex = DUPLEX_FULL;
1539 case BNX2_LINK_STATUS_10HALF:
1540 bp->duplex = DUPLEX_HALF;
1541 case BNX2_LINK_STATUS_10FULL:
1542 bp->line_speed = SPEED_10;
1544 case BNX2_LINK_STATUS_100HALF:
1545 bp->duplex = DUPLEX_HALF;
1546 case BNX2_LINK_STATUS_100BASE_T4:
1547 case BNX2_LINK_STATUS_100FULL:
1548 bp->line_speed = SPEED_100;
1550 case BNX2_LINK_STATUS_1000HALF:
1551 bp->duplex = DUPLEX_HALF;
1552 case BNX2_LINK_STATUS_1000FULL:
1553 bp->line_speed = SPEED_1000;
1555 case BNX2_LINK_STATUS_2500HALF:
1556 bp->duplex = DUPLEX_HALF;
1557 case BNX2_LINK_STATUS_2500FULL:
1558 bp->line_speed = SPEED_2500;
1565 spin_lock(&bp->phy_lock);
1567 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1568 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1569 if (bp->duplex == DUPLEX_FULL)
1570 bp->flow_ctrl = bp->req_flow_ctrl;
1572 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1573 bp->flow_ctrl |= FLOW_CTRL_TX;
1574 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1575 bp->flow_ctrl |= FLOW_CTRL_RX;
1578 old_port = bp->phy_port;
1579 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1580 bp->phy_port = PORT_FIBRE;
1582 bp->phy_port = PORT_TP;
1584 if (old_port != bp->phy_port)
1585 bnx2_set_default_link(bp);
1587 spin_unlock(&bp->phy_lock);
1589 if (bp->link_up != link_up)
1590 bnx2_report_link(bp);
1592 bnx2_set_mac_link(bp);
1596 bnx2_set_remote_link(struct bnx2 *bp)
1600 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1602 case BNX2_FW_EVT_CODE_LINK_EVENT:
1603 bnx2_remote_phy_event(bp);
1605 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1607 bnx2_send_heart_beat(bp);
1614 bnx2_setup_copper_phy(struct bnx2 *bp)
1619 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1621 if (bp->autoneg & AUTONEG_SPEED) {
1622 u32 adv_reg, adv1000_reg;
1623 u32 new_adv_reg = 0;
1624 u32 new_adv1000_reg = 0;
1626 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1627 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1628 ADVERTISE_PAUSE_ASYM);
1630 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1631 adv1000_reg &= PHY_ALL_1000_SPEED;
1633 if (bp->advertising & ADVERTISED_10baseT_Half)
1634 new_adv_reg |= ADVERTISE_10HALF;
1635 if (bp->advertising & ADVERTISED_10baseT_Full)
1636 new_adv_reg |= ADVERTISE_10FULL;
1637 if (bp->advertising & ADVERTISED_100baseT_Half)
1638 new_adv_reg |= ADVERTISE_100HALF;
1639 if (bp->advertising & ADVERTISED_100baseT_Full)
1640 new_adv_reg |= ADVERTISE_100FULL;
1641 if (bp->advertising & ADVERTISED_1000baseT_Full)
1642 new_adv1000_reg |= ADVERTISE_1000FULL;
1644 new_adv_reg |= ADVERTISE_CSMA;
1646 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1648 if ((adv1000_reg != new_adv1000_reg) ||
1649 (adv_reg != new_adv_reg) ||
1650 ((bmcr & BMCR_ANENABLE) == 0)) {
1652 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1653 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1654 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1657 else if (bp->link_up) {
1658 /* Flow ctrl may have changed from auto to forced */
1659 /* or vice-versa. */
1661 bnx2_resolve_flow_ctrl(bp);
1662 bnx2_set_mac_link(bp);
1668 if (bp->req_line_speed == SPEED_100) {
1669 new_bmcr |= BMCR_SPEED100;
1671 if (bp->req_duplex == DUPLEX_FULL) {
1672 new_bmcr |= BMCR_FULLDPLX;
1674 if (new_bmcr != bmcr) {
1677 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1678 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1680 if (bmsr & BMSR_LSTATUS) {
1681 /* Force link down */
1682 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1683 spin_unlock_bh(&bp->phy_lock);
1685 spin_lock_bh(&bp->phy_lock);
1687 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1688 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1691 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1693 /* Normally, the new speed is setup after the link has
1694 * gone down and up again. In some cases, link will not go
1695 * down so we need to set up the new speed here.
1697 if (bmsr & BMSR_LSTATUS) {
1698 bp->line_speed = bp->req_line_speed;
1699 bp->duplex = bp->req_duplex;
1700 bnx2_resolve_flow_ctrl(bp);
1701 bnx2_set_mac_link(bp);
1704 bnx2_resolve_flow_ctrl(bp);
1705 bnx2_set_mac_link(bp);
1711 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1713 if (bp->loopback == MAC_LOOPBACK)
1716 if (bp->phy_flags & PHY_SERDES_FLAG) {
1717 return (bnx2_setup_serdes_phy(bp, port));
1720 return (bnx2_setup_copper_phy(bp));
1725 bnx2_init_5709s_phy(struct bnx2 *bp)
1729 bp->mii_bmcr = MII_BMCR + 0x10;
1730 bp->mii_bmsr = MII_BMSR + 0x10;
1731 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1732 bp->mii_adv = MII_ADVERTISE + 0x10;
1733 bp->mii_lpa = MII_LPA + 0x10;
1734 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1736 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1737 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1739 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1742 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1744 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1745 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1746 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1747 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1749 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1750 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1751 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1752 val |= BCM5708S_UP1_2G5;
1754 val &= ~BCM5708S_UP1_2G5;
1755 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1757 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1758 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1759 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1760 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1762 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1764 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1765 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1766 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1768 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1774 bnx2_init_5708s_phy(struct bnx2 *bp)
1780 bp->mii_up1 = BCM5708S_UP1;
1782 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1783 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1784 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1786 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1787 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1788 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1790 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1791 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1792 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1794 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1795 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1796 val |= BCM5708S_UP1_2G5;
1797 bnx2_write_phy(bp, BCM5708S_UP1, val);
1800 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1801 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1802 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1803 /* increase tx signal amplitude */
1804 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1805 BCM5708S_BLK_ADDR_TX_MISC);
1806 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1807 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1808 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1809 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1812 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1813 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1818 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1819 BNX2_SHARED_HW_CFG_CONFIG);
1820 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1821 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1822 BCM5708S_BLK_ADDR_TX_MISC);
1823 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1824 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1825 BCM5708S_BLK_ADDR_DIG);
1832 bnx2_init_5706s_phy(struct bnx2 *bp)
1836 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1838 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1839 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1841 if (bp->dev->mtu > 1500) {
1844 /* Set extended packet length bit */
1845 bnx2_write_phy(bp, 0x18, 0x7);
1846 bnx2_read_phy(bp, 0x18, &val);
1847 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1849 bnx2_write_phy(bp, 0x1c, 0x6c00);
1850 bnx2_read_phy(bp, 0x1c, &val);
1851 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1856 bnx2_write_phy(bp, 0x18, 0x7);
1857 bnx2_read_phy(bp, 0x18, &val);
1858 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1860 bnx2_write_phy(bp, 0x1c, 0x6c00);
1861 bnx2_read_phy(bp, 0x1c, &val);
1862 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1869 bnx2_init_copper_phy(struct bnx2 *bp)
1875 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1876 bnx2_write_phy(bp, 0x18, 0x0c00);
1877 bnx2_write_phy(bp, 0x17, 0x000a);
1878 bnx2_write_phy(bp, 0x15, 0x310b);
1879 bnx2_write_phy(bp, 0x17, 0x201f);
1880 bnx2_write_phy(bp, 0x15, 0x9506);
1881 bnx2_write_phy(bp, 0x17, 0x401f);
1882 bnx2_write_phy(bp, 0x15, 0x14e2);
1883 bnx2_write_phy(bp, 0x18, 0x0400);
1886 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1887 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1888 MII_BNX2_DSP_EXPAND_REG | 0x8);
1889 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1891 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1894 if (bp->dev->mtu > 1500) {
1895 /* Set extended packet length bit */
1896 bnx2_write_phy(bp, 0x18, 0x7);
1897 bnx2_read_phy(bp, 0x18, &val);
1898 bnx2_write_phy(bp, 0x18, val | 0x4000);
1900 bnx2_read_phy(bp, 0x10, &val);
1901 bnx2_write_phy(bp, 0x10, val | 0x1);
1904 bnx2_write_phy(bp, 0x18, 0x7);
1905 bnx2_read_phy(bp, 0x18, &val);
1906 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1908 bnx2_read_phy(bp, 0x10, &val);
1909 bnx2_write_phy(bp, 0x10, val & ~0x1);
1912 /* ethernet@wirespeed */
1913 bnx2_write_phy(bp, 0x18, 0x7007);
1914 bnx2_read_phy(bp, 0x18, &val);
1915 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1921 bnx2_init_phy(struct bnx2 *bp)
1926 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1927 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1929 bp->mii_bmcr = MII_BMCR;
1930 bp->mii_bmsr = MII_BMSR;
1931 bp->mii_bmsr1 = MII_BMSR;
1932 bp->mii_adv = MII_ADVERTISE;
1933 bp->mii_lpa = MII_LPA;
1935 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1937 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1940 bnx2_read_phy(bp, MII_PHYSID1, &val);
1941 bp->phy_id = val << 16;
1942 bnx2_read_phy(bp, MII_PHYSID2, &val);
1943 bp->phy_id |= val & 0xffff;
1945 if (bp->phy_flags & PHY_SERDES_FLAG) {
1946 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1947 rc = bnx2_init_5706s_phy(bp);
1948 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1949 rc = bnx2_init_5708s_phy(bp);
1950 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1951 rc = bnx2_init_5709s_phy(bp);
1954 rc = bnx2_init_copper_phy(bp);
1959 rc = bnx2_setup_phy(bp, bp->phy_port);
1965 bnx2_set_mac_loopback(struct bnx2 *bp)
1969 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1970 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1971 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1972 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1977 static int bnx2_test_link(struct bnx2 *);
1980 bnx2_set_phy_loopback(struct bnx2 *bp)
1985 spin_lock_bh(&bp->phy_lock);
1986 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
1988 spin_unlock_bh(&bp->phy_lock);
1992 for (i = 0; i < 10; i++) {
1993 if (bnx2_test_link(bp) == 0)
1998 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1999 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2000 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2001 BNX2_EMAC_MODE_25G_MODE);
2003 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2004 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2010 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2016 msg_data |= bp->fw_wr_seq;
2018 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2020 /* wait for an acknowledgement. */
2021 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2024 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
2026 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2029 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2032 /* If we timed out, inform the firmware that this is the case. */
2033 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2035 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2038 msg_data &= ~BNX2_DRV_MSG_CODE;
2039 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2041 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2046 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2053 bnx2_init_5709_context(struct bnx2 *bp)
2058 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2059 val |= (BCM_PAGE_BITS - 8) << 16;
2060 REG_WR(bp, BNX2_CTX_COMMAND, val);
2061 for (i = 0; i < 10; i++) {
2062 val = REG_RD(bp, BNX2_CTX_COMMAND);
2063 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2067 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2070 for (i = 0; i < bp->ctx_pages; i++) {
2073 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2074 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2075 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2076 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2077 (u64) bp->ctx_blk_mapping[i] >> 32);
2078 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2079 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2080 for (j = 0; j < 10; j++) {
2082 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2083 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2087 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2096 bnx2_init_context(struct bnx2 *bp)
2102 u32 vcid_addr, pcid_addr, offset;
2107 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2110 vcid_addr = GET_PCID_ADDR(vcid);
2112 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2117 pcid_addr = GET_PCID_ADDR(new_vcid);
2120 vcid_addr = GET_CID_ADDR(vcid);
2121 pcid_addr = vcid_addr;
2124 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2125 vcid_addr += (i << PHY_CTX_SHIFT);
2126 pcid_addr += (i << PHY_CTX_SHIFT);
2128 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
2129 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2131 /* Zero out the context. */
2132 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2133 CTX_WR(bp, 0x00, offset, 0);
2135 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2136 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2142 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2148 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2149 if (good_mbuf == NULL) {
2150 printk(KERN_ERR PFX "Failed to allocate memory in "
2151 "bnx2_alloc_bad_rbuf\n");
2155 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2156 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2160 /* Allocate a bunch of mbufs and save the good ones in an array. */
2161 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2162 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2163 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2165 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2167 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2169 /* The addresses with Bit 9 set are bad memory blocks. */
2170 if (!(val & (1 << 9))) {
2171 good_mbuf[good_mbuf_cnt] = (u16) val;
2175 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2178 /* Free the good ones back to the mbuf pool thus discarding
2179 * all the bad ones. */
2180 while (good_mbuf_cnt) {
2183 val = good_mbuf[good_mbuf_cnt];
2184 val = (val << 9) | val | 1;
2186 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2193 bnx2_set_mac_addr(struct bnx2 *bp)
2196 u8 *mac_addr = bp->dev->dev_addr;
2198 val = (mac_addr[0] << 8) | mac_addr[1];
2200 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2202 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2203 (mac_addr[4] << 8) | mac_addr[5];
2205 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2209 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2211 struct sk_buff *skb;
2212 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2214 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2215 unsigned long align;
2217 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2222 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2223 skb_reserve(skb, BNX2_RX_ALIGN - align);
2225 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2226 PCI_DMA_FROMDEVICE);
2229 pci_unmap_addr_set(rx_buf, mapping, mapping);
2231 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2232 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2234 bp->rx_prod_bseq += bp->rx_buf_use_size;
2240 bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
2242 struct status_block *sblk = bp->status_blk;
2243 u32 new_link_state, old_link_state;
2246 new_link_state = sblk->status_attn_bits & event;
2247 old_link_state = sblk->status_attn_bits_ack & event;
2248 if (new_link_state != old_link_state) {
2250 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2252 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2260 bnx2_phy_int(struct bnx2 *bp)
2262 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2263 spin_lock(&bp->phy_lock);
2265 spin_unlock(&bp->phy_lock);
2267 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2268 bnx2_set_remote_link(bp);
2273 bnx2_tx_int(struct bnx2 *bp)
2275 struct status_block *sblk = bp->status_blk;
2276 u16 hw_cons, sw_cons, sw_ring_cons;
2279 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
2280 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2283 sw_cons = bp->tx_cons;
2285 while (sw_cons != hw_cons) {
2286 struct sw_bd *tx_buf;
2287 struct sk_buff *skb;
2290 sw_ring_cons = TX_RING_IDX(sw_cons);
2292 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2295 /* partial BD completions possible with TSO packets */
2296 if (skb_is_gso(skb)) {
2297 u16 last_idx, last_ring_idx;
2299 last_idx = sw_cons +
2300 skb_shinfo(skb)->nr_frags + 1;
2301 last_ring_idx = sw_ring_cons +
2302 skb_shinfo(skb)->nr_frags + 1;
2303 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2306 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2311 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2312 skb_headlen(skb), PCI_DMA_TODEVICE);
2315 last = skb_shinfo(skb)->nr_frags;
2317 for (i = 0; i < last; i++) {
2318 sw_cons = NEXT_TX_BD(sw_cons);
2320 pci_unmap_page(bp->pdev,
2322 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2324 skb_shinfo(skb)->frags[i].size,
2328 sw_cons = NEXT_TX_BD(sw_cons);
2330 tx_free_bd += last + 1;
2334 hw_cons = bp->hw_tx_cons =
2335 sblk->status_tx_quick_consumer_index0;
2337 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2342 bp->tx_cons = sw_cons;
2343 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2344 * before checking for netif_queue_stopped(). Without the
2345 * memory barrier, there is a small possibility that bnx2_start_xmit()
2346 * will miss it and cause the queue to be stopped forever.
2350 if (unlikely(netif_queue_stopped(bp->dev)) &&
2351 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2352 netif_tx_lock(bp->dev);
2353 if ((netif_queue_stopped(bp->dev)) &&
2354 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2355 netif_wake_queue(bp->dev);
2356 netif_tx_unlock(bp->dev);
2361 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2364 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2365 struct rx_bd *cons_bd, *prod_bd;
2367 cons_rx_buf = &bp->rx_buf_ring[cons];
2368 prod_rx_buf = &bp->rx_buf_ring[prod];
2370 pci_dma_sync_single_for_device(bp->pdev,
2371 pci_unmap_addr(cons_rx_buf, mapping),
2372 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2374 bp->rx_prod_bseq += bp->rx_buf_use_size;
2376 prod_rx_buf->skb = skb;
2381 pci_unmap_addr_set(prod_rx_buf, mapping,
2382 pci_unmap_addr(cons_rx_buf, mapping));
2384 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2385 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2386 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2387 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2391 bnx2_rx_int(struct bnx2 *bp, int budget)
2393 struct status_block *sblk = bp->status_blk;
2394 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2395 struct l2_fhdr *rx_hdr;
2398 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
2399 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2402 sw_cons = bp->rx_cons;
2403 sw_prod = bp->rx_prod;
2405 /* Memory barrier necessary as speculative reads of the rx
2406 * buffer can be ahead of the index in the status block
2409 while (sw_cons != hw_cons) {
2412 struct sw_bd *rx_buf;
2413 struct sk_buff *skb;
2414 dma_addr_t dma_addr;
2416 sw_ring_cons = RX_RING_IDX(sw_cons);
2417 sw_ring_prod = RX_RING_IDX(sw_prod);
2419 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2424 dma_addr = pci_unmap_addr(rx_buf, mapping);
2426 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2427 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2429 rx_hdr = (struct l2_fhdr *) skb->data;
2430 len = rx_hdr->l2_fhdr_pkt_len - 4;
2432 if ((status = rx_hdr->l2_fhdr_status) &
2433 (L2_FHDR_ERRORS_BAD_CRC |
2434 L2_FHDR_ERRORS_PHY_DECODE |
2435 L2_FHDR_ERRORS_ALIGNMENT |
2436 L2_FHDR_ERRORS_TOO_SHORT |
2437 L2_FHDR_ERRORS_GIANT_FRAME)) {
2442 /* Since we don't have a jumbo ring, copy small packets
2445 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2446 struct sk_buff *new_skb;
2448 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2449 if (new_skb == NULL)
2453 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2454 new_skb->data, len + 2);
2455 skb_reserve(new_skb, 2);
2456 skb_put(new_skb, len);
2458 bnx2_reuse_rx_skb(bp, skb,
2459 sw_ring_cons, sw_ring_prod);
2463 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
2464 pci_unmap_single(bp->pdev, dma_addr,
2465 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2467 skb_reserve(skb, bp->rx_offset);
2472 bnx2_reuse_rx_skb(bp, skb,
2473 sw_ring_cons, sw_ring_prod);
2477 skb->protocol = eth_type_trans(skb, bp->dev);
2479 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2480 (ntohs(skb->protocol) != 0x8100)) {
2487 skb->ip_summed = CHECKSUM_NONE;
2489 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2490 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2492 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2493 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2494 skb->ip_summed = CHECKSUM_UNNECESSARY;
2498 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2499 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2500 rx_hdr->l2_fhdr_vlan_tag);
2504 netif_receive_skb(skb);
2506 bp->dev->last_rx = jiffies;
2510 sw_cons = NEXT_RX_BD(sw_cons);
2511 sw_prod = NEXT_RX_BD(sw_prod);
2513 if ((rx_pkt == budget))
2516 /* Refresh hw_cons to see if there is new work */
2517 if (sw_cons == hw_cons) {
2518 hw_cons = bp->hw_rx_cons =
2519 sblk->status_rx_quick_consumer_index0;
2520 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2525 bp->rx_cons = sw_cons;
2526 bp->rx_prod = sw_prod;
2528 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2530 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2538 /* MSI ISR - The only difference between this and the INTx ISR
2539 * is that the MSI interrupt is always serviced.
2542 bnx2_msi(int irq, void *dev_instance)
2544 struct net_device *dev = dev_instance;
2545 struct bnx2 *bp = netdev_priv(dev);
2547 prefetch(bp->status_blk);
2548 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2549 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2550 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2552 /* Return here if interrupt is disabled. */
2553 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2556 netif_rx_schedule(dev, &bp->napi);
2562 bnx2_msi_1shot(int irq, void *dev_instance)
2564 struct net_device *dev = dev_instance;
2565 struct bnx2 *bp = netdev_priv(dev);
2567 prefetch(bp->status_blk);
2569 /* Return here if interrupt is disabled. */
2570 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2573 netif_rx_schedule(dev, &bp->napi);
2579 bnx2_interrupt(int irq, void *dev_instance)
2581 struct net_device *dev = dev_instance;
2582 struct bnx2 *bp = netdev_priv(dev);
2583 struct status_block *sblk = bp->status_blk;
2585 /* When using INTx, it is possible for the interrupt to arrive
2586 * at the CPU before the status block posted prior to the
2587 * interrupt. Reading a register will flush the status block.
2588 * When using MSI, the MSI message will always complete after
2589 * the status block write.
2591 if ((sblk->status_idx == bp->last_status_idx) &&
2592 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2593 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2596 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2597 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2598 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2600 /* Read back to deassert IRQ immediately to avoid too many
2601 * spurious interrupts.
2603 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2605 /* Return here if interrupt is shared and is disabled. */
2606 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2609 if (netif_rx_schedule_prep(dev, &bp->napi)) {
2610 bp->last_status_idx = sblk->status_idx;
2611 __netif_rx_schedule(dev, &bp->napi);
2617 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2618 STATUS_ATTN_BITS_TIMER_ABORT)
2621 bnx2_has_work(struct bnx2 *bp)
2623 struct status_block *sblk = bp->status_blk;
2625 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2626 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2629 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2630 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2637 bnx2_poll(struct napi_struct *napi, int budget)
2639 struct bnx2 *bp = container_of(napi, struct bnx2, napi);
2640 struct net_device *dev = bp->dev;
2641 struct status_block *sblk = bp->status_blk;
2642 u32 status_attn_bits = sblk->status_attn_bits;
2643 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2646 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2647 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2651 /* This is needed to take care of transient status
2652 * during link changes.
2654 REG_WR(bp, BNX2_HC_COMMAND,
2655 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2656 REG_RD(bp, BNX2_HC_COMMAND);
2659 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2662 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons)
2663 work_done = bnx2_rx_int(bp, budget);
2665 bp->last_status_idx = bp->status_blk->status_idx;
2668 if (!bnx2_has_work(bp)) {
2669 netif_rx_complete(dev, napi);
2670 if (likely(bp->flags & USING_MSI_FLAG)) {
2671 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2672 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2673 bp->last_status_idx);
2676 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2677 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2678 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2679 bp->last_status_idx);
2681 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2682 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2683 bp->last_status_idx);
2689 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2690 * from set_multicast.
2693 bnx2_set_rx_mode(struct net_device *dev)
2695 struct bnx2 *bp = netdev_priv(dev);
2696 u32 rx_mode, sort_mode;
2699 spin_lock_bh(&bp->phy_lock);
2701 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2702 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2703 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2705 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2706 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2708 if (!(bp->flags & ASF_ENABLE_FLAG))
2709 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2711 if (dev->flags & IFF_PROMISC) {
2712 /* Promiscuous mode. */
2713 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2714 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2715 BNX2_RPM_SORT_USER0_PROM_VLAN;
2717 else if (dev->flags & IFF_ALLMULTI) {
2718 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2719 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2722 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2725 /* Accept one or more multicast(s). */
2726 struct dev_mc_list *mclist;
2727 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2732 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2734 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2735 i++, mclist = mclist->next) {
2737 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2739 regidx = (bit & 0xe0) >> 5;
2741 mc_filter[regidx] |= (1 << bit);
2744 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2745 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2749 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2752 if (rx_mode != bp->rx_mode) {
2753 bp->rx_mode = rx_mode;
2754 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2757 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2758 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2759 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2761 spin_unlock_bh(&bp->phy_lock);
2765 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2772 for (i = 0; i < rv2p_code_len; i += 8) {
2773 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2775 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2778 if (rv2p_proc == RV2P_PROC1) {
2779 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2780 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2783 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2784 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2788 /* Reset the processor, un-stall is done later. */
2789 if (rv2p_proc == RV2P_PROC1) {
2790 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2793 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2798 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2805 val = REG_RD_IND(bp, cpu_reg->mode);
2806 val |= cpu_reg->mode_value_halt;
2807 REG_WR_IND(bp, cpu_reg->mode, val);
2808 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2810 /* Load the Text area. */
2811 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2815 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
2820 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2821 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2825 /* Load the Data area. */
2826 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2830 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2831 REG_WR_IND(bp, offset, fw->data[j]);
2835 /* Load the SBSS area. */
2836 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2840 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2841 REG_WR_IND(bp, offset, 0);
2845 /* Load the BSS area. */
2846 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2850 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2851 REG_WR_IND(bp, offset, 0);
2855 /* Load the Read-Only area. */
2856 offset = cpu_reg->spad_base +
2857 (fw->rodata_addr - cpu_reg->mips_view_base);
2861 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2862 REG_WR_IND(bp, offset, fw->rodata[j]);
2866 /* Clear the pre-fetch instruction. */
2867 REG_WR_IND(bp, cpu_reg->inst, 0);
2868 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2870 /* Start the CPU. */
2871 val = REG_RD_IND(bp, cpu_reg->mode);
2872 val &= ~cpu_reg->mode_value_halt;
2873 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2874 REG_WR_IND(bp, cpu_reg->mode, val);
2880 bnx2_init_cpus(struct bnx2 *bp)
2882 struct cpu_reg cpu_reg;
2887 /* Initialize the RV2P processor. */
2888 text = vmalloc(FW_BUF_SIZE);
2891 rc = zlib_inflate_blob(text, FW_BUF_SIZE, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1));
2895 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
2897 rc = zlib_inflate_blob(text, FW_BUF_SIZE, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2));
2901 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
2903 /* Initialize the RX Processor. */
2904 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2905 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2906 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2907 cpu_reg.state = BNX2_RXP_CPU_STATE;
2908 cpu_reg.state_value_clear = 0xffffff;
2909 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2910 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2911 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2912 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2913 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2914 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2915 cpu_reg.mips_view_base = 0x8000000;
2917 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2918 fw = &bnx2_rxp_fw_09;
2920 fw = &bnx2_rxp_fw_06;
2923 rc = load_cpu_fw(bp, &cpu_reg, fw);
2927 /* Initialize the TX Processor. */
2928 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2929 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2930 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2931 cpu_reg.state = BNX2_TXP_CPU_STATE;
2932 cpu_reg.state_value_clear = 0xffffff;
2933 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2934 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2935 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2936 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2937 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2938 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2939 cpu_reg.mips_view_base = 0x8000000;
2941 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2942 fw = &bnx2_txp_fw_09;
2944 fw = &bnx2_txp_fw_06;
2947 rc = load_cpu_fw(bp, &cpu_reg, fw);
2951 /* Initialize the TX Patch-up Processor. */
2952 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2953 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2954 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2955 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2956 cpu_reg.state_value_clear = 0xffffff;
2957 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2958 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2959 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2960 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2961 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2962 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2963 cpu_reg.mips_view_base = 0x8000000;
2965 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2966 fw = &bnx2_tpat_fw_09;
2968 fw = &bnx2_tpat_fw_06;
2971 rc = load_cpu_fw(bp, &cpu_reg, fw);
2975 /* Initialize the Completion Processor. */
2976 cpu_reg.mode = BNX2_COM_CPU_MODE;
2977 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2978 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2979 cpu_reg.state = BNX2_COM_CPU_STATE;
2980 cpu_reg.state_value_clear = 0xffffff;
2981 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2982 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2983 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2984 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2985 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2986 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2987 cpu_reg.mips_view_base = 0x8000000;
2989 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2990 fw = &bnx2_com_fw_09;
2992 fw = &bnx2_com_fw_06;
2995 rc = load_cpu_fw(bp, &cpu_reg, fw);
2999 /* Initialize the Command Processor. */
3000 cpu_reg.mode = BNX2_CP_CPU_MODE;
3001 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3002 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3003 cpu_reg.state = BNX2_CP_CPU_STATE;
3004 cpu_reg.state_value_clear = 0xffffff;
3005 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3006 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3007 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3008 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3009 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3010 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3011 cpu_reg.mips_view_base = 0x8000000;
3013 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3014 fw = &bnx2_cp_fw_09;
3017 rc = load_cpu_fw(bp, &cpu_reg, fw);
3027 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3031 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3037 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3038 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3039 PCI_PM_CTRL_PME_STATUS);
3041 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3042 /* delay required during transition out of D3hot */
3045 val = REG_RD(bp, BNX2_EMAC_MODE);
3046 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3047 val &= ~BNX2_EMAC_MODE_MPKT;
3048 REG_WR(bp, BNX2_EMAC_MODE, val);
3050 val = REG_RD(bp, BNX2_RPM_CONFIG);
3051 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3052 REG_WR(bp, BNX2_RPM_CONFIG, val);
3063 autoneg = bp->autoneg;
3064 advertising = bp->advertising;
3066 bp->autoneg = AUTONEG_SPEED;
3067 bp->advertising = ADVERTISED_10baseT_Half |
3068 ADVERTISED_10baseT_Full |
3069 ADVERTISED_100baseT_Half |
3070 ADVERTISED_100baseT_Full |
3073 bnx2_setup_copper_phy(bp);
3075 bp->autoneg = autoneg;
3076 bp->advertising = advertising;
3078 bnx2_set_mac_addr(bp);
3080 val = REG_RD(bp, BNX2_EMAC_MODE);
3082 /* Enable port mode. */
3083 val &= ~BNX2_EMAC_MODE_PORT;
3084 val |= BNX2_EMAC_MODE_PORT_MII |
3085 BNX2_EMAC_MODE_MPKT_RCVD |
3086 BNX2_EMAC_MODE_ACPI_RCVD |
3087 BNX2_EMAC_MODE_MPKT;
3089 REG_WR(bp, BNX2_EMAC_MODE, val);
3091 /* receive all multicast */
3092 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3093 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3096 REG_WR(bp, BNX2_EMAC_RX_MODE,
3097 BNX2_EMAC_RX_MODE_SORT_MODE);
3099 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3100 BNX2_RPM_SORT_USER0_MC_EN;
3101 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3102 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3103 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3104 BNX2_RPM_SORT_USER0_ENA);
3106 /* Need to enable EMAC and RPM for WOL. */
3107 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3108 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3109 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3110 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3112 val = REG_RD(bp, BNX2_RPM_CONFIG);
3113 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3114 REG_WR(bp, BNX2_RPM_CONFIG, val);
3116 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3119 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3122 if (!(bp->flags & NO_WOL_FLAG))
3123 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3125 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3126 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3127 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3136 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3138 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3141 /* No more memory access after this point until
3142 * device is brought back to D0.
3154 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3159 /* Request access to the flash interface. */
3160 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3161 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3162 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3163 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3169 if (j >= NVRAM_TIMEOUT_COUNT)
3176 bnx2_release_nvram_lock(struct bnx2 *bp)
3181 /* Relinquish nvram interface. */
3182 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3184 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3185 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3186 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3192 if (j >= NVRAM_TIMEOUT_COUNT)
3200 bnx2_enable_nvram_write(struct bnx2 *bp)
3204 val = REG_RD(bp, BNX2_MISC_CFG);
3205 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3207 if (bp->flash_info->flags & BNX2_NV_WREN) {
3210 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3211 REG_WR(bp, BNX2_NVM_COMMAND,
3212 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3214 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3217 val = REG_RD(bp, BNX2_NVM_COMMAND);
3218 if (val & BNX2_NVM_COMMAND_DONE)
3222 if (j >= NVRAM_TIMEOUT_COUNT)
3229 bnx2_disable_nvram_write(struct bnx2 *bp)
3233 val = REG_RD(bp, BNX2_MISC_CFG);
3234 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3239 bnx2_enable_nvram_access(struct bnx2 *bp)
3243 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3244 /* Enable both bits, even on read. */
3245 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3246 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3250 bnx2_disable_nvram_access(struct bnx2 *bp)
3254 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3255 /* Disable both bits, even after read. */
3256 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3257 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3258 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3262 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3267 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3268 /* Buffered flash, no erase needed */
3271 /* Build an erase command */
3272 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3273 BNX2_NVM_COMMAND_DOIT;
3275 /* Need to clear DONE bit separately. */
3276 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3278 /* Address of the NVRAM to read from. */
3279 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3281 /* Issue an erase command. */
3282 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3284 /* Wait for completion. */
3285 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3290 val = REG_RD(bp, BNX2_NVM_COMMAND);
3291 if (val & BNX2_NVM_COMMAND_DONE)
3295 if (j >= NVRAM_TIMEOUT_COUNT)
3302 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3307 /* Build the command word. */
3308 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3310 /* Calculate an offset of a buffered flash, not needed for 5709. */
3311 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3312 offset = ((offset / bp->flash_info->page_size) <<
3313 bp->flash_info->page_bits) +
3314 (offset % bp->flash_info->page_size);
3317 /* Need to clear DONE bit separately. */
3318 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3320 /* Address of the NVRAM to read from. */
3321 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3323 /* Issue a read command. */
3324 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3326 /* Wait for completion. */
3327 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3332 val = REG_RD(bp, BNX2_NVM_COMMAND);
3333 if (val & BNX2_NVM_COMMAND_DONE) {
3334 val = REG_RD(bp, BNX2_NVM_READ);
3336 val = be32_to_cpu(val);
3337 memcpy(ret_val, &val, 4);
3341 if (j >= NVRAM_TIMEOUT_COUNT)
3349 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3354 /* Build the command word. */
3355 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3357 /* Calculate an offset of a buffered flash, not needed for 5709. */
3358 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3359 offset = ((offset / bp->flash_info->page_size) <<
3360 bp->flash_info->page_bits) +
3361 (offset % bp->flash_info->page_size);
3364 /* Need to clear DONE bit separately. */
3365 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3367 memcpy(&val32, val, 4);
3368 val32 = cpu_to_be32(val32);
3370 /* Write the data. */
3371 REG_WR(bp, BNX2_NVM_WRITE, val32);
3373 /* Address of the NVRAM to write to. */
3374 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3376 /* Issue the write command. */
3377 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3379 /* Wait for completion. */
3380 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3383 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3386 if (j >= NVRAM_TIMEOUT_COUNT)
3393 bnx2_init_nvram(struct bnx2 *bp)
3396 int j, entry_count, rc = 0;
3397 struct flash_spec *flash;
3399 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3400 bp->flash_info = &flash_5709;
3401 goto get_flash_size;
3404 /* Determine the selected interface. */
3405 val = REG_RD(bp, BNX2_NVM_CFG1);
3407 entry_count = ARRAY_SIZE(flash_table);
3409 if (val & 0x40000000) {
3411 /* Flash interface has been reconfigured */
3412 for (j = 0, flash = &flash_table[0]; j < entry_count;
3414 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3415 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3416 bp->flash_info = flash;
3423 /* Not yet been reconfigured */
3425 if (val & (1 << 23))
3426 mask = FLASH_BACKUP_STRAP_MASK;
3428 mask = FLASH_STRAP_MASK;
3430 for (j = 0, flash = &flash_table[0]; j < entry_count;
3433 if ((val & mask) == (flash->strapping & mask)) {
3434 bp->flash_info = flash;
3436 /* Request access to the flash interface. */
3437 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3440 /* Enable access to flash interface */
3441 bnx2_enable_nvram_access(bp);
3443 /* Reconfigure the flash interface */
3444 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3445 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3446 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3447 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3449 /* Disable access to flash interface */
3450 bnx2_disable_nvram_access(bp);
3451 bnx2_release_nvram_lock(bp);
3456 } /* if (val & 0x40000000) */
3458 if (j == entry_count) {
3459 bp->flash_info = NULL;
3460 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3465 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3466 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3468 bp->flash_size = val;
3470 bp->flash_size = bp->flash_info->total_size;
3476 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3480 u32 cmd_flags, offset32, len32, extra;
3485 /* Request access to the flash interface. */
3486 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3489 /* Enable access to flash interface */
3490 bnx2_enable_nvram_access(bp);
3503 pre_len = 4 - (offset & 3);
3505 if (pre_len >= len32) {
3507 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3508 BNX2_NVM_COMMAND_LAST;
3511 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3514 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3519 memcpy(ret_buf, buf + (offset & 3), pre_len);
3526 extra = 4 - (len32 & 3);
3527 len32 = (len32 + 4) & ~3;
3534 cmd_flags = BNX2_NVM_COMMAND_LAST;
3536 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3537 BNX2_NVM_COMMAND_LAST;
3539 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3541 memcpy(ret_buf, buf, 4 - extra);
3543 else if (len32 > 0) {
3546 /* Read the first word. */
3550 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3552 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3554 /* Advance to the next dword. */
3559 while (len32 > 4 && rc == 0) {
3560 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3562 /* Advance to the next dword. */
3571 cmd_flags = BNX2_NVM_COMMAND_LAST;
3572 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3574 memcpy(ret_buf, buf, 4 - extra);
3577 /* Disable access to flash interface */
3578 bnx2_disable_nvram_access(bp);
3580 bnx2_release_nvram_lock(bp);
3586 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3589 u32 written, offset32, len32;
3590 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3592 int align_start, align_end;
3597 align_start = align_end = 0;
3599 if ((align_start = (offset32 & 3))) {
3601 len32 += align_start;
3604 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3609 align_end = 4 - (len32 & 3);
3611 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3615 if (align_start || align_end) {
3616 align_buf = kmalloc(len32, GFP_KERNEL);
3617 if (align_buf == NULL)
3620 memcpy(align_buf, start, 4);
3623 memcpy(align_buf + len32 - 4, end, 4);
3625 memcpy(align_buf + align_start, data_buf, buf_size);
3629 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3630 flash_buffer = kmalloc(264, GFP_KERNEL);
3631 if (flash_buffer == NULL) {
3633 goto nvram_write_end;
3638 while ((written < len32) && (rc == 0)) {
3639 u32 page_start, page_end, data_start, data_end;
3640 u32 addr, cmd_flags;
3643 /* Find the page_start addr */
3644 page_start = offset32 + written;
3645 page_start -= (page_start % bp->flash_info->page_size);
3646 /* Find the page_end addr */
3647 page_end = page_start + bp->flash_info->page_size;
3648 /* Find the data_start addr */
3649 data_start = (written == 0) ? offset32 : page_start;
3650 /* Find the data_end addr */
3651 data_end = (page_end > offset32 + len32) ?
3652 (offset32 + len32) : page_end;
3654 /* Request access to the flash interface. */
3655 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3656 goto nvram_write_end;
3658 /* Enable access to flash interface */
3659 bnx2_enable_nvram_access(bp);
3661 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3662 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3665 /* Read the whole page into the buffer
3666 * (non-buffer flash only) */
3667 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3668 if (j == (bp->flash_info->page_size - 4)) {
3669 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3671 rc = bnx2_nvram_read_dword(bp,
3677 goto nvram_write_end;
3683 /* Enable writes to flash interface (unlock write-protect) */
3684 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3685 goto nvram_write_end;
3687 /* Loop to write back the buffer data from page_start to
3690 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3691 /* Erase the page */
3692 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3693 goto nvram_write_end;
3695 /* Re-enable the write again for the actual write */
3696 bnx2_enable_nvram_write(bp);
3698 for (addr = page_start; addr < data_start;
3699 addr += 4, i += 4) {
3701 rc = bnx2_nvram_write_dword(bp, addr,
3702 &flash_buffer[i], cmd_flags);
3705 goto nvram_write_end;
3711 /* Loop to write the new data from data_start to data_end */
3712 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3713 if ((addr == page_end - 4) ||
3714 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
3715 (addr == data_end - 4))) {
3717 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3719 rc = bnx2_nvram_write_dword(bp, addr, buf,
3723 goto nvram_write_end;
3729 /* Loop to write back the buffer data from data_end
3731 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3732 for (addr = data_end; addr < page_end;
3733 addr += 4, i += 4) {
3735 if (addr == page_end-4) {
3736 cmd_flags = BNX2_NVM_COMMAND_LAST;
3738 rc = bnx2_nvram_write_dword(bp, addr,
3739 &flash_buffer[i], cmd_flags);
3742 goto nvram_write_end;
3748 /* Disable writes to flash interface (lock write-protect) */
3749 bnx2_disable_nvram_write(bp);
3751 /* Disable access to flash interface */
3752 bnx2_disable_nvram_access(bp);
3753 bnx2_release_nvram_lock(bp);
3755 /* Increment written */
3756 written += data_end - data_start;
3760 kfree(flash_buffer);
3766 bnx2_init_remote_phy(struct bnx2 *bp)
3770 bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
3771 if (!(bp->phy_flags & PHY_SERDES_FLAG))
3774 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
3775 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
3778 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
3779 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
3781 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
3782 if (val & BNX2_LINK_STATUS_SERDES_LINK)
3783 bp->phy_port = PORT_FIBRE;
3785 bp->phy_port = PORT_TP;
3787 if (netif_running(bp->dev)) {
3790 if (val & BNX2_LINK_STATUS_LINK_UP) {
3792 netif_carrier_on(bp->dev);
3795 netif_carrier_off(bp->dev);
3797 sig = BNX2_DRV_ACK_CAP_SIGNATURE |
3798 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
3799 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
3806 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3812 /* Wait for the current PCI transaction to complete before
3813 * issuing a reset. */
3814 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3815 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3816 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3817 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3818 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3819 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3822 /* Wait for the firmware to tell us it is ok to issue a reset. */
3823 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3825 /* Deposit a driver reset signature so the firmware knows that
3826 * this is a soft reset. */
3827 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3828 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3830 /* Do a dummy read to force the chip to complete all current transaction
3831 * before we issue a reset. */
3832 val = REG_RD(bp, BNX2_MISC_ID);
3834 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3835 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3836 REG_RD(bp, BNX2_MISC_COMMAND);
3839 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3840 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3842 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3845 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3846 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3847 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3850 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3852 /* Reading back any register after chip reset will hang the
3853 * bus on 5706 A0 and A1. The msleep below provides plenty
3854 * of margin for write posting.
3856 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3857 (CHIP_ID(bp) == CHIP_ID_5706_A1))
3860 /* Reset takes approximate 30 usec */
3861 for (i = 0; i < 10; i++) {
3862 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3863 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3864 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3869 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3870 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3871 printk(KERN_ERR PFX "Chip reset did not complete\n");
3876 /* Make sure byte swapping is properly configured. */
3877 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3878 if (val != 0x01020304) {
3879 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3883 /* Wait for the firmware to finish its initialization. */
3884 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3888 spin_lock_bh(&bp->phy_lock);
3889 old_port = bp->phy_port;
3890 bnx2_init_remote_phy(bp);
3891 if ((bp->phy_flags & REMOTE_PHY_CAP_FLAG) && old_port != bp->phy_port)
3892 bnx2_set_default_remote_link(bp);
3893 spin_unlock_bh(&bp->phy_lock);
3895 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3896 /* Adjust the voltage regular to two steps lower. The default
3897 * of this register is 0x0000000e. */
3898 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3900 /* Remove bad rbuf memory from the free pool. */
3901 rc = bnx2_alloc_bad_rbuf(bp);
3908 bnx2_init_chip(struct bnx2 *bp)
3913 /* Make sure the interrupt is not active. */
3914 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3916 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3917 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3919 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3921 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3922 DMA_READ_CHANS << 12 |
3923 DMA_WRITE_CHANS << 16;
3925 val |= (0x2 << 20) | (1 << 11);
3927 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3930 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3931 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3932 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3934 REG_WR(bp, BNX2_DMA_CONFIG, val);
3936 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3937 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3938 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3939 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3942 if (bp->flags & PCIX_FLAG) {
3945 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3947 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3948 val16 & ~PCI_X_CMD_ERO);
3951 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3952 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3953 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3954 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3956 /* Initialize context mapping and zero out the quick contexts. The
3957 * context block must have already been enabled. */
3958 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3959 rc = bnx2_init_5709_context(bp);
3963 bnx2_init_context(bp);
3965 if ((rc = bnx2_init_cpus(bp)) != 0)
3968 bnx2_init_nvram(bp);
3970 bnx2_set_mac_addr(bp);
3972 val = REG_RD(bp, BNX2_MQ_CONFIG);
3973 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3974 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3975 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
3976 val |= BNX2_MQ_CONFIG_HALT_DIS;
3978 REG_WR(bp, BNX2_MQ_CONFIG, val);
3980 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3981 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3982 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3984 val = (BCM_PAGE_BITS - 8) << 24;
3985 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3987 /* Configure page size. */
3988 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3989 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3990 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3991 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3993 val = bp->mac_addr[0] +
3994 (bp->mac_addr[1] << 8) +
3995 (bp->mac_addr[2] << 16) +
3997 (bp->mac_addr[4] << 8) +
3998 (bp->mac_addr[5] << 16);
3999 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4001 /* Program the MTU. Also include 4 bytes for CRC32. */
4002 val = bp->dev->mtu + ETH_HLEN + 4;
4003 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4004 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4005 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4007 bp->last_status_idx = 0;
4008 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4010 /* Set up how to generate a link change interrupt. */
4011 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4013 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4014 (u64) bp->status_blk_mapping & 0xffffffff);
4015 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4017 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4018 (u64) bp->stats_blk_mapping & 0xffffffff);
4019 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4020 (u64) bp->stats_blk_mapping >> 32);
4022 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4023 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4025 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4026 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4028 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4029 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4031 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4033 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4035 REG_WR(bp, BNX2_HC_COM_TICKS,
4036 (bp->com_ticks_int << 16) | bp->com_ticks);
4038 REG_WR(bp, BNX2_HC_CMD_TICKS,
4039 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4041 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4042 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4044 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4045 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4047 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4048 val = BNX2_HC_CONFIG_COLLECT_STATS;
4050 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4051 BNX2_HC_CONFIG_COLLECT_STATS;
4054 if (bp->flags & ONE_SHOT_MSI_FLAG)
4055 val |= BNX2_HC_CONFIG_ONE_SHOT;
4057 REG_WR(bp, BNX2_HC_CONFIG, val);
4059 /* Clear internal stats counters. */
4060 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4062 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4064 /* Initialize the receive filter. */
4065 bnx2_set_rx_mode(bp->dev);
4067 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4068 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4069 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4070 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4072 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4075 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4076 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4080 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4086 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4088 u32 val, offset0, offset1, offset2, offset3;
4090 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4091 offset0 = BNX2_L2CTX_TYPE_XI;
4092 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4093 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4094 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4096 offset0 = BNX2_L2CTX_TYPE;
4097 offset1 = BNX2_L2CTX_CMD_TYPE;
4098 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4099 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4101 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4102 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4104 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4105 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4107 val = (u64) bp->tx_desc_mapping >> 32;
4108 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4110 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4111 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4115 bnx2_init_tx_ring(struct bnx2 *bp)
4120 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4122 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4124 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4125 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4130 bp->tx_prod_bseq = 0;
4133 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4134 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4136 bnx2_init_tx_context(bp, cid);
4140 bnx2_init_rx_ring(struct bnx2 *bp)
4144 u16 prod, ring_prod;
4147 /* 8 for CRC and VLAN */
4148 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4150 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4152 ring_prod = prod = bp->rx_prod = 0;
4155 bp->rx_prod_bseq = 0;
4157 for (i = 0; i < bp->rx_max_ring; i++) {
4160 rxbd = &bp->rx_desc_ring[i][0];
4161 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4162 rxbd->rx_bd_len = bp->rx_buf_use_size;
4163 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4165 if (i == (bp->rx_max_ring - 1))
4169 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
4170 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
4174 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4175 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4177 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
4179 val = (u64) bp->rx_desc_mapping[0] >> 32;
4180 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
4182 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4183 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
4185 for (i = 0; i < bp->rx_ring_size; i++) {
4186 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4189 prod = NEXT_RX_BD(prod);
4190 ring_prod = RX_RING_IDX(prod);
4194 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4196 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4200 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4204 bp->rx_ring_size = size;
4206 while (size > MAX_RX_DESC_CNT) {
4207 size -= MAX_RX_DESC_CNT;
4210 /* round to next power of 2 */
4212 while ((max & num_rings) == 0)
4215 if (num_rings != max)
4218 bp->rx_max_ring = max;
4219 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4223 bnx2_free_tx_skbs(struct bnx2 *bp)
4227 if (bp->tx_buf_ring == NULL)
4230 for (i = 0; i < TX_DESC_CNT; ) {
4231 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4232 struct sk_buff *skb = tx_buf->skb;
4240 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4241 skb_headlen(skb), PCI_DMA_TODEVICE);
4245 last = skb_shinfo(skb)->nr_frags;
4246 for (j = 0; j < last; j++) {
4247 tx_buf = &bp->tx_buf_ring[i + j + 1];
4248 pci_unmap_page(bp->pdev,
4249 pci_unmap_addr(tx_buf, mapping),
4250 skb_shinfo(skb)->frags[j].size,
4260 bnx2_free_rx_skbs(struct bnx2 *bp)
4264 if (bp->rx_buf_ring == NULL)
4267 for (i = 0; i < bp->rx_max_ring_idx; i++) {
4268 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4269 struct sk_buff *skb = rx_buf->skb;
4274 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4275 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4284 bnx2_free_skbs(struct bnx2 *bp)
4286 bnx2_free_tx_skbs(bp);
4287 bnx2_free_rx_skbs(bp);
4291 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4295 rc = bnx2_reset_chip(bp, reset_code);
4300 if ((rc = bnx2_init_chip(bp)) != 0)
4303 bnx2_init_tx_ring(bp);
4304 bnx2_init_rx_ring(bp);
4309 bnx2_init_nic(struct bnx2 *bp)
4313 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4316 spin_lock_bh(&bp->phy_lock);
4319 spin_unlock_bh(&bp->phy_lock);
4324 bnx2_test_registers(struct bnx2 *bp)
4328 static const struct {
4331 #define BNX2_FL_NOT_5709 1
4335 { 0x006c, 0, 0x00000000, 0x0000003f },
4336 { 0x0090, 0, 0xffffffff, 0x00000000 },
4337 { 0x0094, 0, 0x00000000, 0x00000000 },
4339 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4340 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4341 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4342 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4343 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4344 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4345 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4346 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4347 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4349 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4350 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4351 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4352 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4353 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4354 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4356 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4357 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4358 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
4360 { 0x1000, 0, 0x00000000, 0x00000001 },
4361 { 0x1004, 0, 0x00000000, 0x000f0001 },
4363 { 0x1408, 0, 0x01c00800, 0x00000000 },
4364 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4365 { 0x14a8, 0, 0x00000000, 0x000001ff },
4366 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4367 { 0x14b0, 0, 0x00000002, 0x00000001 },
4368 { 0x14b8, 0, 0x00000000, 0x00000000 },
4369 { 0x14c0, 0, 0x00000000, 0x00000009 },
4370 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4371 { 0x14cc, 0, 0x00000000, 0x00000001 },
4372 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4374 { 0x1800, 0, 0x00000000, 0x00000001 },
4375 { 0x1804, 0, 0x00000000, 0x00000003 },
4377 { 0x2800, 0, 0x00000000, 0x00000001 },
4378 { 0x2804, 0, 0x00000000, 0x00003f01 },
4379 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4380 { 0x2810, 0, 0xffff0000, 0x00000000 },
4381 { 0x2814, 0, 0xffff0000, 0x00000000 },
4382 { 0x2818, 0, 0xffff0000, 0x00000000 },
4383 { 0x281c, 0, 0xffff0000, 0x00000000 },
4384 { 0x2834, 0, 0xffffffff, 0x00000000 },
4385 { 0x2840, 0, 0x00000000, 0xffffffff },
4386 { 0x2844, 0, 0x00000000, 0xffffffff },
4387 { 0x2848, 0, 0xffffffff, 0x00000000 },
4388 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4390 { 0x2c00, 0, 0x00000000, 0x00000011 },
4391 { 0x2c04, 0, 0x00000000, 0x00030007 },
4393 { 0x3c00, 0, 0x00000000, 0x00000001 },
4394 { 0x3c04, 0, 0x00000000, 0x00070000 },
4395 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4396 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4397 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4398 { 0x3c14, 0, 0x00000000, 0xffffffff },
4399 { 0x3c18, 0, 0x00000000, 0xffffffff },
4400 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4401 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4403 { 0x5004, 0, 0x00000000, 0x0000007f },
4404 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4406 { 0x5c00, 0, 0x00000000, 0x00000001 },
4407 { 0x5c04, 0, 0x00000000, 0x0003000f },
4408 { 0x5c08, 0, 0x00000003, 0x00000000 },
4409 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4410 { 0x5c10, 0, 0x00000000, 0xffffffff },
4411 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4412 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4413 { 0x5c88, 0, 0x00000000, 0x00077373 },
4414 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4416 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4417 { 0x680c, 0, 0xffffffff, 0x00000000 },
4418 { 0x6810, 0, 0xffffffff, 0x00000000 },
4419 { 0x6814, 0, 0xffffffff, 0x00000000 },
4420 { 0x6818, 0, 0xffffffff, 0x00000000 },
4421 { 0x681c, 0, 0xffffffff, 0x00000000 },
4422 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4423 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4424 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4425 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4426 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4427 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4428 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4429 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4430 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4431 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4432 { 0x684c, 0, 0xffffffff, 0x00000000 },
4433 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4434 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4435 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4436 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4437 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4438 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4440 { 0xffff, 0, 0x00000000, 0x00000000 },
4445 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4448 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4449 u32 offset, rw_mask, ro_mask, save_val, val;
4450 u16 flags = reg_tbl[i].flags;
4452 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4455 offset = (u32) reg_tbl[i].offset;
4456 rw_mask = reg_tbl[i].rw_mask;
4457 ro_mask = reg_tbl[i].ro_mask;
4459 save_val = readl(bp->regview + offset);
4461 writel(0, bp->regview + offset);
4463 val = readl(bp->regview + offset);
4464 if ((val & rw_mask) != 0) {
4468 if ((val & ro_mask) != (save_val & ro_mask)) {
4472 writel(0xffffffff, bp->regview + offset);
4474 val = readl(bp->regview + offset);
4475 if ((val & rw_mask) != rw_mask) {
4479 if ((val & ro_mask) != (save_val & ro_mask)) {
4483 writel(save_val, bp->regview + offset);
4487 writel(save_val, bp->regview + offset);
4495 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4497 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4498 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4501 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4504 for (offset = 0; offset < size; offset += 4) {
4506 REG_WR_IND(bp, start + offset, test_pattern[i]);
4508 if (REG_RD_IND(bp, start + offset) !=
4518 bnx2_test_memory(struct bnx2 *bp)
4522 static struct mem_entry {
4525 } mem_tbl_5706[] = {
4526 { 0x60000, 0x4000 },
4527 { 0xa0000, 0x3000 },
4528 { 0xe0000, 0x4000 },
4529 { 0x120000, 0x4000 },
4530 { 0x1a0000, 0x4000 },
4531 { 0x160000, 0x4000 },
4535 { 0x60000, 0x4000 },
4536 { 0xa0000, 0x3000 },
4537 { 0xe0000, 0x4000 },
4538 { 0x120000, 0x4000 },
4539 { 0x1a0000, 0x4000 },
4542 struct mem_entry *mem_tbl;
4544 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4545 mem_tbl = mem_tbl_5709;
4547 mem_tbl = mem_tbl_5706;
4549 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4550 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4551 mem_tbl[i].len)) != 0) {
4559 #define BNX2_MAC_LOOPBACK 0
4560 #define BNX2_PHY_LOOPBACK 1
4563 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4565 unsigned int pkt_size, num_pkts, i;
4566 struct sk_buff *skb, *rx_skb;
4567 unsigned char *packet;
4568 u16 rx_start_idx, rx_idx;
4571 struct sw_bd *rx_buf;
4572 struct l2_fhdr *rx_hdr;
4575 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4576 bp->loopback = MAC_LOOPBACK;
4577 bnx2_set_mac_loopback(bp);
4579 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4580 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4583 bp->loopback = PHY_LOOPBACK;
4584 bnx2_set_phy_loopback(bp);
4590 skb = netdev_alloc_skb(bp->dev, pkt_size);
4593 packet = skb_put(skb, pkt_size);
4594 memcpy(packet, bp->dev->dev_addr, 6);
4595 memset(packet + 6, 0x0, 8);
4596 for (i = 14; i < pkt_size; i++)
4597 packet[i] = (unsigned char) (i & 0xff);
4599 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4602 REG_WR(bp, BNX2_HC_COMMAND,
4603 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4605 REG_RD(bp, BNX2_HC_COMMAND);
4608 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4612 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4614 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4615 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4616 txbd->tx_bd_mss_nbytes = pkt_size;
4617 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4620 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4621 bp->tx_prod_bseq += pkt_size;
4623 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4624 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4628 REG_WR(bp, BNX2_HC_COMMAND,
4629 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4631 REG_RD(bp, BNX2_HC_COMMAND);
4635 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4638 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4639 goto loopback_test_done;
4642 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4643 if (rx_idx != rx_start_idx + num_pkts) {
4644 goto loopback_test_done;
4647 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4648 rx_skb = rx_buf->skb;
4650 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4651 skb_reserve(rx_skb, bp->rx_offset);
4653 pci_dma_sync_single_for_cpu(bp->pdev,
4654 pci_unmap_addr(rx_buf, mapping),
4655 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4657 if (rx_hdr->l2_fhdr_status &
4658 (L2_FHDR_ERRORS_BAD_CRC |
4659 L2_FHDR_ERRORS_PHY_DECODE |
4660 L2_FHDR_ERRORS_ALIGNMENT |
4661 L2_FHDR_ERRORS_TOO_SHORT |
4662 L2_FHDR_ERRORS_GIANT_FRAME)) {
4664 goto loopback_test_done;
4667 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4668 goto loopback_test_done;
4671 for (i = 14; i < pkt_size; i++) {
4672 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4673 goto loopback_test_done;
4684 #define BNX2_MAC_LOOPBACK_FAILED 1
4685 #define BNX2_PHY_LOOPBACK_FAILED 2
4686 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4687 BNX2_PHY_LOOPBACK_FAILED)
4690 bnx2_test_loopback(struct bnx2 *bp)
4694 if (!netif_running(bp->dev))
4695 return BNX2_LOOPBACK_FAILED;
4697 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4698 spin_lock_bh(&bp->phy_lock);
4700 spin_unlock_bh(&bp->phy_lock);
4701 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4702 rc |= BNX2_MAC_LOOPBACK_FAILED;
4703 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4704 rc |= BNX2_PHY_LOOPBACK_FAILED;
4708 #define NVRAM_SIZE 0x200
4709 #define CRC32_RESIDUAL 0xdebb20e3
4712 bnx2_test_nvram(struct bnx2 *bp)
4714 u32 buf[NVRAM_SIZE / 4];
4715 u8 *data = (u8 *) buf;
4719 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4720 goto test_nvram_done;
4722 magic = be32_to_cpu(buf[0]);
4723 if (magic != 0x669955aa) {
4725 goto test_nvram_done;
4728 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4729 goto test_nvram_done;
4731 csum = ether_crc_le(0x100, data);
4732 if (csum != CRC32_RESIDUAL) {
4734 goto test_nvram_done;
4737 csum = ether_crc_le(0x100, data + 0x100);
4738 if (csum != CRC32_RESIDUAL) {
4747 bnx2_test_link(struct bnx2 *bp)
4751 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
4756 spin_lock_bh(&bp->phy_lock);
4757 bnx2_enable_bmsr1(bp);
4758 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4759 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4760 bnx2_disable_bmsr1(bp);
4761 spin_unlock_bh(&bp->phy_lock);
4763 if (bmsr & BMSR_LSTATUS) {
4770 bnx2_test_intr(struct bnx2 *bp)
4775 if (!netif_running(bp->dev))
4778 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4780 /* This register is not touched during run-time. */
4781 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4782 REG_RD(bp, BNX2_HC_COMMAND);
4784 for (i = 0; i < 10; i++) {
4785 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4791 msleep_interruptible(10);
4800 bnx2_5706_serdes_timer(struct bnx2 *bp)
4802 spin_lock(&bp->phy_lock);
4803 if (bp->serdes_an_pending)
4804 bp->serdes_an_pending--;
4805 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4808 bp->current_interval = bp->timer_interval;
4810 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4812 if (bmcr & BMCR_ANENABLE) {
4815 bnx2_write_phy(bp, 0x1c, 0x7c00);
4816 bnx2_read_phy(bp, 0x1c, &phy1);
4818 bnx2_write_phy(bp, 0x17, 0x0f01);
4819 bnx2_read_phy(bp, 0x15, &phy2);
4820 bnx2_write_phy(bp, 0x17, 0x0f01);
4821 bnx2_read_phy(bp, 0x15, &phy2);
4823 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4824 !(phy2 & 0x20)) { /* no CONFIG */
4826 bmcr &= ~BMCR_ANENABLE;
4827 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4828 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4829 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4833 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4834 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4837 bnx2_write_phy(bp, 0x17, 0x0f01);
4838 bnx2_read_phy(bp, 0x15, &phy2);
4842 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4843 bmcr |= BMCR_ANENABLE;
4844 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4846 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4849 bp->current_interval = bp->timer_interval;
4851 spin_unlock(&bp->phy_lock);
4855 bnx2_5708_serdes_timer(struct bnx2 *bp)
4857 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4860 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4861 bp->serdes_an_pending = 0;
4865 spin_lock(&bp->phy_lock);
4866 if (bp->serdes_an_pending)
4867 bp->serdes_an_pending--;
4868 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4871 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4872 if (bmcr & BMCR_ANENABLE) {
4873 bnx2_enable_forced_2g5(bp);
4874 bp->current_interval = SERDES_FORCED_TIMEOUT;
4876 bnx2_disable_forced_2g5(bp);
4877 bp->serdes_an_pending = 2;
4878 bp->current_interval = bp->timer_interval;
4882 bp->current_interval = bp->timer_interval;
4884 spin_unlock(&bp->phy_lock);
4888 bnx2_timer(unsigned long data)
4890 struct bnx2 *bp = (struct bnx2 *) data;
4892 if (!netif_running(bp->dev))
4895 if (atomic_read(&bp->intr_sem) != 0)
4896 goto bnx2_restart_timer;
4898 bnx2_send_heart_beat(bp);
4900 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4902 /* workaround occasional corrupted counters */
4903 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
4904 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
4905 BNX2_HC_COMMAND_STATS_NOW);
4907 if (bp->phy_flags & PHY_SERDES_FLAG) {
4908 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4909 bnx2_5706_serdes_timer(bp);
4911 bnx2_5708_serdes_timer(bp);
4915 mod_timer(&bp->timer, jiffies + bp->current_interval);
4919 bnx2_request_irq(struct bnx2 *bp)
4921 struct net_device *dev = bp->dev;
4924 if (bp->flags & USING_MSI_FLAG) {
4925 irq_handler_t fn = bnx2_msi;
4927 if (bp->flags & ONE_SHOT_MSI_FLAG)
4928 fn = bnx2_msi_1shot;
4930 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
4932 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4933 IRQF_SHARED, dev->name, dev);
4938 bnx2_free_irq(struct bnx2 *bp)
4940 struct net_device *dev = bp->dev;
4942 if (bp->flags & USING_MSI_FLAG) {
4943 free_irq(bp->pdev->irq, dev);
4944 pci_disable_msi(bp->pdev);
4945 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
4947 free_irq(bp->pdev->irq, dev);
4950 /* Called with rtnl_lock */
4952 bnx2_open(struct net_device *dev)
4954 struct bnx2 *bp = netdev_priv(dev);
4957 netif_carrier_off(dev);
4959 bnx2_set_power_state(bp, PCI_D0);
4960 bnx2_disable_int(bp);
4962 rc = bnx2_alloc_mem(bp);
4966 napi_enable(&bp->napi);
4968 if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
4969 if (pci_enable_msi(bp->pdev) == 0) {
4970 bp->flags |= USING_MSI_FLAG;
4971 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4972 bp->flags |= ONE_SHOT_MSI_FLAG;
4975 rc = bnx2_request_irq(bp);
4978 napi_disable(&bp->napi);
4983 rc = bnx2_init_nic(bp);
4986 napi_disable(&bp->napi);
4993 mod_timer(&bp->timer, jiffies + bp->current_interval);
4995 atomic_set(&bp->intr_sem, 0);
4997 bnx2_enable_int(bp);
4999 if (bp->flags & USING_MSI_FLAG) {
5000 /* Test MSI to make sure it is working
5001 * If MSI test fails, go back to INTx mode
5003 if (bnx2_test_intr(bp) != 0) {
5004 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5005 " using MSI, switching to INTx mode. Please"
5006 " report this failure to the PCI maintainer"
5007 " and include system chipset information.\n",
5010 bnx2_disable_int(bp);
5013 rc = bnx2_init_nic(bp);
5016 rc = bnx2_request_irq(bp);
5019 napi_disable(&bp->napi);
5022 del_timer_sync(&bp->timer);
5025 bnx2_enable_int(bp);
5028 if (bp->flags & USING_MSI_FLAG) {
5029 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5032 netif_start_queue(dev);
5038 bnx2_reset_task(struct work_struct *work)
5040 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5042 if (!netif_running(bp->dev))
5045 bp->in_reset_task = 1;
5046 bnx2_netif_stop(bp);
5050 atomic_set(&bp->intr_sem, 1);
5051 bnx2_netif_start(bp);
5052 bp->in_reset_task = 0;
5056 bnx2_tx_timeout(struct net_device *dev)
5058 struct bnx2 *bp = netdev_priv(dev);
5060 /* This allows the netif to be shutdown gracefully before resetting */
5061 schedule_work(&bp->reset_task);
5065 /* Called with rtnl_lock */
5067 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5069 struct bnx2 *bp = netdev_priv(dev);
5071 bnx2_netif_stop(bp);
5074 bnx2_set_rx_mode(dev);
5076 bnx2_netif_start(bp);
5080 /* Called with netif_tx_lock.
5081 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5082 * netif_wake_queue().
5085 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5087 struct bnx2 *bp = netdev_priv(dev);
5090 struct sw_bd *tx_buf;
5091 u32 len, vlan_tag_flags, last_frag, mss;
5092 u16 prod, ring_prod;
5095 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
5096 netif_stop_queue(dev);
5097 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5100 return NETDEV_TX_BUSY;
5102 len = skb_headlen(skb);
5104 ring_prod = TX_RING_IDX(prod);
5107 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5108 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5111 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5113 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5115 if ((mss = skb_shinfo(skb)->gso_size)) {
5116 u32 tcp_opt_len, ip_tcp_len;
5119 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5121 tcp_opt_len = tcp_optlen(skb);
5123 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5124 u32 tcp_off = skb_transport_offset(skb) -
5125 sizeof(struct ipv6hdr) - ETH_HLEN;
5127 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5128 TX_BD_FLAGS_SW_FLAGS;
5129 if (likely(tcp_off == 0))
5130 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5133 vlan_tag_flags |= ((tcp_off & 0x3) <<
5134 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5135 ((tcp_off & 0x10) <<
5136 TX_BD_FLAGS_TCP6_OFF4_SHL);
5137 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5140 if (skb_header_cloned(skb) &&
5141 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5143 return NETDEV_TX_OK;
5146 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5150 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5151 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5155 if (tcp_opt_len || (iph->ihl > 5)) {
5156 vlan_tag_flags |= ((iph->ihl - 5) +
5157 (tcp_opt_len >> 2)) << 8;
5163 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5165 tx_buf = &bp->tx_buf_ring[ring_prod];
5167 pci_unmap_addr_set(tx_buf, mapping, mapping);
5169 txbd = &bp->tx_desc_ring[ring_prod];
5171 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5172 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5173 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5174 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5176 last_frag = skb_shinfo(skb)->nr_frags;
5178 for (i = 0; i < last_frag; i++) {
5179 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5181 prod = NEXT_TX_BD(prod);
5182 ring_prod = TX_RING_IDX(prod);
5183 txbd = &bp->tx_desc_ring[ring_prod];
5186 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5187 len, PCI_DMA_TODEVICE);
5188 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5191 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5192 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5193 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5194 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5197 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5199 prod = NEXT_TX_BD(prod);
5200 bp->tx_prod_bseq += skb->len;
5202 REG_WR16(bp, bp->tx_bidx_addr, prod);
5203 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5208 dev->trans_start = jiffies;
5210 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
5211 netif_stop_queue(dev);
5212 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
5213 netif_wake_queue(dev);
5216 return NETDEV_TX_OK;
5219 /* Called with rtnl_lock */
5221 bnx2_close(struct net_device *dev)
5223 struct bnx2 *bp = netdev_priv(dev);
5226 /* Calling flush_scheduled_work() may deadlock because
5227 * linkwatch_event() may be on the workqueue and it will try to get
5228 * the rtnl_lock which we are holding.
5230 while (bp->in_reset_task)
5233 bnx2_disable_int_sync(bp);
5234 napi_disable(&bp->napi);
5235 del_timer_sync(&bp->timer);
5236 if (bp->flags & NO_WOL_FLAG)
5237 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5239 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5241 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5242 bnx2_reset_chip(bp, reset_code);
5247 netif_carrier_off(bp->dev);
5248 bnx2_set_power_state(bp, PCI_D3hot);
5252 #define GET_NET_STATS64(ctr) \
5253 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5254 (unsigned long) (ctr##_lo)
5256 #define GET_NET_STATS32(ctr) \
5259 #if (BITS_PER_LONG == 64)
5260 #define GET_NET_STATS GET_NET_STATS64
5262 #define GET_NET_STATS GET_NET_STATS32
5265 static struct net_device_stats *
5266 bnx2_get_stats(struct net_device *dev)
5268 struct bnx2 *bp = netdev_priv(dev);
5269 struct statistics_block *stats_blk = bp->stats_blk;
5270 struct net_device_stats *net_stats = &bp->net_stats;
5272 if (bp->stats_blk == NULL) {
5275 net_stats->rx_packets =
5276 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5277 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5278 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5280 net_stats->tx_packets =
5281 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5282 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5283 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5285 net_stats->rx_bytes =
5286 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5288 net_stats->tx_bytes =
5289 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5291 net_stats->multicast =
5292 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5294 net_stats->collisions =
5295 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5297 net_stats->rx_length_errors =
5298 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5299 stats_blk->stat_EtherStatsOverrsizePkts);
5301 net_stats->rx_over_errors =
5302 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5304 net_stats->rx_frame_errors =
5305 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5307 net_stats->rx_crc_errors =
5308 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5310 net_stats->rx_errors = net_stats->rx_length_errors +
5311 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5312 net_stats->rx_crc_errors;
5314 net_stats->tx_aborted_errors =
5315 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5316 stats_blk->stat_Dot3StatsLateCollisions);
5318 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5319 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5320 net_stats->tx_carrier_errors = 0;
5322 net_stats->tx_carrier_errors =
5324 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5327 net_stats->tx_errors =
5329 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5331 net_stats->tx_aborted_errors +
5332 net_stats->tx_carrier_errors;
5334 net_stats->rx_missed_errors =
5335 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5336 stats_blk->stat_FwRxDrop);
5341 /* All ethtool functions called with rtnl_lock */
5344 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5346 struct bnx2 *bp = netdev_priv(dev);
5347 int support_serdes = 0, support_copper = 0;
5349 cmd->supported = SUPPORTED_Autoneg;
5350 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5353 } else if (bp->phy_port == PORT_FIBRE)
5358 if (support_serdes) {
5359 cmd->supported |= SUPPORTED_1000baseT_Full |
5361 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5362 cmd->supported |= SUPPORTED_2500baseX_Full;
5365 if (support_copper) {
5366 cmd->supported |= SUPPORTED_10baseT_Half |
5367 SUPPORTED_10baseT_Full |
5368 SUPPORTED_100baseT_Half |
5369 SUPPORTED_100baseT_Full |
5370 SUPPORTED_1000baseT_Full |
5375 spin_lock_bh(&bp->phy_lock);
5376 cmd->port = bp->phy_port;
5377 cmd->advertising = bp->advertising;
5379 if (bp->autoneg & AUTONEG_SPEED) {
5380 cmd->autoneg = AUTONEG_ENABLE;
5383 cmd->autoneg = AUTONEG_DISABLE;
5386 if (netif_carrier_ok(dev)) {
5387 cmd->speed = bp->line_speed;
5388 cmd->duplex = bp->duplex;
5394 spin_unlock_bh(&bp->phy_lock);
5396 cmd->transceiver = XCVR_INTERNAL;
5397 cmd->phy_address = bp->phy_addr;
5403 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5405 struct bnx2 *bp = netdev_priv(dev);
5406 u8 autoneg = bp->autoneg;
5407 u8 req_duplex = bp->req_duplex;
5408 u16 req_line_speed = bp->req_line_speed;
5409 u32 advertising = bp->advertising;
5412 spin_lock_bh(&bp->phy_lock);
5414 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5415 goto err_out_unlock;
5417 if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5418 goto err_out_unlock;
5420 if (cmd->autoneg == AUTONEG_ENABLE) {
5421 autoneg |= AUTONEG_SPEED;
5423 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5425 /* allow advertising 1 speed */
5426 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5427 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5428 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5429 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5431 if (cmd->port == PORT_FIBRE)
5432 goto err_out_unlock;
5434 advertising = cmd->advertising;
5436 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5437 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5438 (cmd->port == PORT_TP))
5439 goto err_out_unlock;
5440 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
5441 advertising = cmd->advertising;
5442 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5443 goto err_out_unlock;
5445 if (cmd->port == PORT_FIBRE)
5446 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5448 advertising = ETHTOOL_ALL_COPPER_SPEED;
5450 advertising |= ADVERTISED_Autoneg;
5453 if (cmd->port == PORT_FIBRE) {
5454 if ((cmd->speed != SPEED_1000 &&
5455 cmd->speed != SPEED_2500) ||
5456 (cmd->duplex != DUPLEX_FULL))
5457 goto err_out_unlock;
5459 if (cmd->speed == SPEED_2500 &&
5460 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5461 goto err_out_unlock;
5463 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5464 goto err_out_unlock;
5466 autoneg &= ~AUTONEG_SPEED;
5467 req_line_speed = cmd->speed;
5468 req_duplex = cmd->duplex;
5472 bp->autoneg = autoneg;
5473 bp->advertising = advertising;
5474 bp->req_line_speed = req_line_speed;
5475 bp->req_duplex = req_duplex;
5477 err = bnx2_setup_phy(bp, cmd->port);
5480 spin_unlock_bh(&bp->phy_lock);
5486 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5488 struct bnx2 *bp = netdev_priv(dev);
5490 strcpy(info->driver, DRV_MODULE_NAME);
5491 strcpy(info->version, DRV_MODULE_VERSION);
5492 strcpy(info->bus_info, pci_name(bp->pdev));
5493 strcpy(info->fw_version, bp->fw_version);
5496 #define BNX2_REGDUMP_LEN (32 * 1024)
5499 bnx2_get_regs_len(struct net_device *dev)
5501 return BNX2_REGDUMP_LEN;
5505 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5507 u32 *p = _p, i, offset;
5509 struct bnx2 *bp = netdev_priv(dev);
5510 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5511 0x0800, 0x0880, 0x0c00, 0x0c10,
5512 0x0c30, 0x0d08, 0x1000, 0x101c,
5513 0x1040, 0x1048, 0x1080, 0x10a4,
5514 0x1400, 0x1490, 0x1498, 0x14f0,
5515 0x1500, 0x155c, 0x1580, 0x15dc,
5516 0x1600, 0x1658, 0x1680, 0x16d8,
5517 0x1800, 0x1820, 0x1840, 0x1854,
5518 0x1880, 0x1894, 0x1900, 0x1984,
5519 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5520 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5521 0x2000, 0x2030, 0x23c0, 0x2400,
5522 0x2800, 0x2820, 0x2830, 0x2850,
5523 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5524 0x3c00, 0x3c94, 0x4000, 0x4010,
5525 0x4080, 0x4090, 0x43c0, 0x4458,
5526 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5527 0x4fc0, 0x5010, 0x53c0, 0x5444,
5528 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5529 0x5fc0, 0x6000, 0x6400, 0x6428,
5530 0x6800, 0x6848, 0x684c, 0x6860,
5531 0x6888, 0x6910, 0x8000 };
5535 memset(p, 0, BNX2_REGDUMP_LEN);
5537 if (!netif_running(bp->dev))
5541 offset = reg_boundaries[0];
5543 while (offset < BNX2_REGDUMP_LEN) {
5544 *p++ = REG_RD(bp, offset);
5546 if (offset == reg_boundaries[i + 1]) {
5547 offset = reg_boundaries[i + 2];
5548 p = (u32 *) (orig_p + offset);
5555 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5557 struct bnx2 *bp = netdev_priv(dev);
5559 if (bp->flags & NO_WOL_FLAG) {
5564 wol->supported = WAKE_MAGIC;
5566 wol->wolopts = WAKE_MAGIC;
5570 memset(&wol->sopass, 0, sizeof(wol->sopass));
5574 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5576 struct bnx2 *bp = netdev_priv(dev);
5578 if (wol->wolopts & ~WAKE_MAGIC)
5581 if (wol->wolopts & WAKE_MAGIC) {
5582 if (bp->flags & NO_WOL_FLAG)
5594 bnx2_nway_reset(struct net_device *dev)
5596 struct bnx2 *bp = netdev_priv(dev);
5599 if (!(bp->autoneg & AUTONEG_SPEED)) {
5603 spin_lock_bh(&bp->phy_lock);
5605 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5608 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5609 spin_unlock_bh(&bp->phy_lock);
5613 /* Force a link down visible on the other side */
5614 if (bp->phy_flags & PHY_SERDES_FLAG) {
5615 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5616 spin_unlock_bh(&bp->phy_lock);
5620 spin_lock_bh(&bp->phy_lock);
5622 bp->current_interval = SERDES_AN_TIMEOUT;
5623 bp->serdes_an_pending = 1;
5624 mod_timer(&bp->timer, jiffies + bp->current_interval);
5627 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5628 bmcr &= ~BMCR_LOOPBACK;
5629 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5631 spin_unlock_bh(&bp->phy_lock);
5637 bnx2_get_eeprom_len(struct net_device *dev)
5639 struct bnx2 *bp = netdev_priv(dev);
5641 if (bp->flash_info == NULL)
5644 return (int) bp->flash_size;
5648 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5651 struct bnx2 *bp = netdev_priv(dev);
5654 /* parameters already validated in ethtool_get_eeprom */
5656 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5662 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5665 struct bnx2 *bp = netdev_priv(dev);
5668 /* parameters already validated in ethtool_set_eeprom */
5670 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5676 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5678 struct bnx2 *bp = netdev_priv(dev);
5680 memset(coal, 0, sizeof(struct ethtool_coalesce));
5682 coal->rx_coalesce_usecs = bp->rx_ticks;
5683 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5684 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5685 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5687 coal->tx_coalesce_usecs = bp->tx_ticks;
5688 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5689 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5690 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5692 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5698 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5700 struct bnx2 *bp = netdev_priv(dev);
5702 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5703 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5705 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5706 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5708 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5709 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5711 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5712 if (bp->rx_quick_cons_trip_int > 0xff)
5713 bp->rx_quick_cons_trip_int = 0xff;
5715 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5716 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5718 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5719 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5721 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5722 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5724 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5725 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5728 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5729 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5730 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
5731 bp->stats_ticks = USEC_PER_SEC;
5733 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
5734 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
5735 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
5737 if (netif_running(bp->dev)) {
5738 bnx2_netif_stop(bp);
5740 bnx2_netif_start(bp);
5747 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5749 struct bnx2 *bp = netdev_priv(dev);
5751 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5752 ering->rx_mini_max_pending = 0;
5753 ering->rx_jumbo_max_pending = 0;
5755 ering->rx_pending = bp->rx_ring_size;
5756 ering->rx_mini_pending = 0;
5757 ering->rx_jumbo_pending = 0;
5759 ering->tx_max_pending = MAX_TX_DESC_CNT;
5760 ering->tx_pending = bp->tx_ring_size;
5764 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5766 struct bnx2 *bp = netdev_priv(dev);
5768 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5769 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5770 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5774 if (netif_running(bp->dev)) {
5775 bnx2_netif_stop(bp);
5776 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5781 bnx2_set_rx_ring_size(bp, ering->rx_pending);
5782 bp->tx_ring_size = ering->tx_pending;
5784 if (netif_running(bp->dev)) {
5787 rc = bnx2_alloc_mem(bp);
5791 bnx2_netif_start(bp);
5798 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5800 struct bnx2 *bp = netdev_priv(dev);
5802 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5803 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5804 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5808 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5810 struct bnx2 *bp = netdev_priv(dev);
5812 bp->req_flow_ctrl = 0;
5813 if (epause->rx_pause)
5814 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5815 if (epause->tx_pause)
5816 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5818 if (epause->autoneg) {
5819 bp->autoneg |= AUTONEG_FLOW_CTRL;
5822 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5825 spin_lock_bh(&bp->phy_lock);
5827 bnx2_setup_phy(bp, bp->phy_port);
5829 spin_unlock_bh(&bp->phy_lock);
5835 bnx2_get_rx_csum(struct net_device *dev)
5837 struct bnx2 *bp = netdev_priv(dev);
5843 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5845 struct bnx2 *bp = netdev_priv(dev);
5852 bnx2_set_tso(struct net_device *dev, u32 data)
5854 struct bnx2 *bp = netdev_priv(dev);
5857 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5858 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5859 dev->features |= NETIF_F_TSO6;
5861 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5866 #define BNX2_NUM_STATS 46
5869 char string[ETH_GSTRING_LEN];
5870 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5872 { "rx_error_bytes" },
5874 { "tx_error_bytes" },
5875 { "rx_ucast_packets" },
5876 { "rx_mcast_packets" },
5877 { "rx_bcast_packets" },
5878 { "tx_ucast_packets" },
5879 { "tx_mcast_packets" },
5880 { "tx_bcast_packets" },
5881 { "tx_mac_errors" },
5882 { "tx_carrier_errors" },
5883 { "rx_crc_errors" },
5884 { "rx_align_errors" },
5885 { "tx_single_collisions" },
5886 { "tx_multi_collisions" },
5888 { "tx_excess_collisions" },
5889 { "tx_late_collisions" },
5890 { "tx_total_collisions" },
5893 { "rx_undersize_packets" },
5894 { "rx_oversize_packets" },
5895 { "rx_64_byte_packets" },
5896 { "rx_65_to_127_byte_packets" },
5897 { "rx_128_to_255_byte_packets" },
5898 { "rx_256_to_511_byte_packets" },
5899 { "rx_512_to_1023_byte_packets" },
5900 { "rx_1024_to_1522_byte_packets" },
5901 { "rx_1523_to_9022_byte_packets" },
5902 { "tx_64_byte_packets" },
5903 { "tx_65_to_127_byte_packets" },
5904 { "tx_128_to_255_byte_packets" },
5905 { "tx_256_to_511_byte_packets" },
5906 { "tx_512_to_1023_byte_packets" },
5907 { "tx_1024_to_1522_byte_packets" },
5908 { "tx_1523_to_9022_byte_packets" },
5909 { "rx_xon_frames" },
5910 { "rx_xoff_frames" },
5911 { "tx_xon_frames" },
5912 { "tx_xoff_frames" },
5913 { "rx_mac_ctrl_frames" },
5914 { "rx_filtered_packets" },
5916 { "rx_fw_discards" },
5919 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5921 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5922 STATS_OFFSET32(stat_IfHCInOctets_hi),
5923 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5924 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5925 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5926 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5927 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5928 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5929 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5930 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5931 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5932 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5933 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5934 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5935 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5936 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5937 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5938 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5939 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5940 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5941 STATS_OFFSET32(stat_EtherStatsCollisions),
5942 STATS_OFFSET32(stat_EtherStatsFragments),
5943 STATS_OFFSET32(stat_EtherStatsJabbers),
5944 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5945 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5946 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5947 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5948 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5949 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5950 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5951 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5952 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5953 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5954 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5955 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5956 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5957 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5958 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5959 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5960 STATS_OFFSET32(stat_XonPauseFramesReceived),
5961 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5962 STATS_OFFSET32(stat_OutXonSent),
5963 STATS_OFFSET32(stat_OutXoffSent),
5964 STATS_OFFSET32(stat_MacControlFramesReceived),
5965 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5966 STATS_OFFSET32(stat_IfInMBUFDiscards),
5967 STATS_OFFSET32(stat_FwRxDrop),
5970 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5971 * skipped because of errata.
5973 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5974 8,0,8,8,8,8,8,8,8,8,
5975 4,0,4,4,4,4,4,4,4,4,
5976 4,4,4,4,4,4,4,4,4,4,
5977 4,4,4,4,4,4,4,4,4,4,
5981 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5982 8,0,8,8,8,8,8,8,8,8,
5983 4,4,4,4,4,4,4,4,4,4,
5984 4,4,4,4,4,4,4,4,4,4,
5985 4,4,4,4,4,4,4,4,4,4,
5989 #define BNX2_NUM_TESTS 6
5992 char string[ETH_GSTRING_LEN];
5993 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5994 { "register_test (offline)" },
5995 { "memory_test (offline)" },
5996 { "loopback_test (offline)" },
5997 { "nvram_test (online)" },
5998 { "interrupt_test (online)" },
5999 { "link_test (online)" },
6003 bnx2_get_sset_count(struct net_device *dev, int sset)
6007 return BNX2_NUM_TESTS;
6009 return BNX2_NUM_STATS;
6016 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6018 struct bnx2 *bp = netdev_priv(dev);
6020 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6021 if (etest->flags & ETH_TEST_FL_OFFLINE) {
6024 bnx2_netif_stop(bp);
6025 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6028 if (bnx2_test_registers(bp) != 0) {
6030 etest->flags |= ETH_TEST_FL_FAILED;
6032 if (bnx2_test_memory(bp) != 0) {
6034 etest->flags |= ETH_TEST_FL_FAILED;
6036 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6037 etest->flags |= ETH_TEST_FL_FAILED;
6039 if (!netif_running(bp->dev)) {
6040 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6044 bnx2_netif_start(bp);
6047 /* wait for link up */
6048 for (i = 0; i < 7; i++) {
6051 msleep_interruptible(1000);
6055 if (bnx2_test_nvram(bp) != 0) {
6057 etest->flags |= ETH_TEST_FL_FAILED;
6059 if (bnx2_test_intr(bp) != 0) {
6061 etest->flags |= ETH_TEST_FL_FAILED;
6064 if (bnx2_test_link(bp) != 0) {
6066 etest->flags |= ETH_TEST_FL_FAILED;
6072 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6074 switch (stringset) {
6076 memcpy(buf, bnx2_stats_str_arr,
6077 sizeof(bnx2_stats_str_arr));
6080 memcpy(buf, bnx2_tests_str_arr,
6081 sizeof(bnx2_tests_str_arr));
6087 bnx2_get_ethtool_stats(struct net_device *dev,
6088 struct ethtool_stats *stats, u64 *buf)
6090 struct bnx2 *bp = netdev_priv(dev);
6092 u32 *hw_stats = (u32 *) bp->stats_blk;
6093 u8 *stats_len_arr = NULL;
6095 if (hw_stats == NULL) {
6096 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6100 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6101 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6102 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6103 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6104 stats_len_arr = bnx2_5706_stats_len_arr;
6106 stats_len_arr = bnx2_5708_stats_len_arr;
6108 for (i = 0; i < BNX2_NUM_STATS; i++) {
6109 if (stats_len_arr[i] == 0) {
6110 /* skip this counter */
6114 if (stats_len_arr[i] == 4) {
6115 /* 4-byte counter */
6117 *(hw_stats + bnx2_stats_offset_arr[i]);
6120 /* 8-byte counter */
6121 buf[i] = (((u64) *(hw_stats +
6122 bnx2_stats_offset_arr[i])) << 32) +
6123 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6128 bnx2_phys_id(struct net_device *dev, u32 data)
6130 struct bnx2 *bp = netdev_priv(dev);
6137 save = REG_RD(bp, BNX2_MISC_CFG);
6138 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6140 for (i = 0; i < (data * 2); i++) {
6142 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6145 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6146 BNX2_EMAC_LED_1000MB_OVERRIDE |
6147 BNX2_EMAC_LED_100MB_OVERRIDE |
6148 BNX2_EMAC_LED_10MB_OVERRIDE |
6149 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6150 BNX2_EMAC_LED_TRAFFIC);
6152 msleep_interruptible(500);
6153 if (signal_pending(current))
6156 REG_WR(bp, BNX2_EMAC_LED, 0);
6157 REG_WR(bp, BNX2_MISC_CFG, save);
6162 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6164 struct bnx2 *bp = netdev_priv(dev);
6166 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6167 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6169 return (ethtool_op_set_tx_csum(dev, data));
6172 static const struct ethtool_ops bnx2_ethtool_ops = {
6173 .get_settings = bnx2_get_settings,
6174 .set_settings = bnx2_set_settings,
6175 .get_drvinfo = bnx2_get_drvinfo,
6176 .get_regs_len = bnx2_get_regs_len,
6177 .get_regs = bnx2_get_regs,
6178 .get_wol = bnx2_get_wol,
6179 .set_wol = bnx2_set_wol,
6180 .nway_reset = bnx2_nway_reset,
6181 .get_link = ethtool_op_get_link,
6182 .get_eeprom_len = bnx2_get_eeprom_len,
6183 .get_eeprom = bnx2_get_eeprom,
6184 .set_eeprom = bnx2_set_eeprom,
6185 .get_coalesce = bnx2_get_coalesce,
6186 .set_coalesce = bnx2_set_coalesce,
6187 .get_ringparam = bnx2_get_ringparam,
6188 .set_ringparam = bnx2_set_ringparam,
6189 .get_pauseparam = bnx2_get_pauseparam,
6190 .set_pauseparam = bnx2_set_pauseparam,
6191 .get_rx_csum = bnx2_get_rx_csum,
6192 .set_rx_csum = bnx2_set_rx_csum,
6193 .set_tx_csum = bnx2_set_tx_csum,
6194 .set_sg = ethtool_op_set_sg,
6195 .set_tso = bnx2_set_tso,
6196 .self_test = bnx2_self_test,
6197 .get_strings = bnx2_get_strings,
6198 .phys_id = bnx2_phys_id,
6199 .get_ethtool_stats = bnx2_get_ethtool_stats,
6200 .get_sset_count = bnx2_get_sset_count,
6203 /* Called with rtnl_lock */
6205 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6207 struct mii_ioctl_data *data = if_mii(ifr);
6208 struct bnx2 *bp = netdev_priv(dev);
6213 data->phy_id = bp->phy_addr;
6219 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6222 if (!netif_running(dev))
6225 spin_lock_bh(&bp->phy_lock);
6226 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6227 spin_unlock_bh(&bp->phy_lock);
6229 data->val_out = mii_regval;
6235 if (!capable(CAP_NET_ADMIN))
6238 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6241 if (!netif_running(dev))
6244 spin_lock_bh(&bp->phy_lock);
6245 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6246 spin_unlock_bh(&bp->phy_lock);
6257 /* Called with rtnl_lock */
6259 bnx2_change_mac_addr(struct net_device *dev, void *p)
6261 struct sockaddr *addr = p;
6262 struct bnx2 *bp = netdev_priv(dev);
6264 if (!is_valid_ether_addr(addr->sa_data))
6267 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6268 if (netif_running(dev))
6269 bnx2_set_mac_addr(bp);
6274 /* Called with rtnl_lock */
6276 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6278 struct bnx2 *bp = netdev_priv(dev);
6280 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6281 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6285 if (netif_running(dev)) {
6286 bnx2_netif_stop(bp);
6290 bnx2_netif_start(bp);
6295 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6297 poll_bnx2(struct net_device *dev)
6299 struct bnx2 *bp = netdev_priv(dev);
6301 disable_irq(bp->pdev->irq);
6302 bnx2_interrupt(bp->pdev->irq, dev);
6303 enable_irq(bp->pdev->irq);
6307 static void __devinit
6308 bnx2_get_5709_media(struct bnx2 *bp)
6310 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6311 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6314 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6316 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6317 bp->phy_flags |= PHY_SERDES_FLAG;
6321 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6322 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6324 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6326 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6331 bp->phy_flags |= PHY_SERDES_FLAG;
6339 bp->phy_flags |= PHY_SERDES_FLAG;
6345 static void __devinit
6346 bnx2_get_pci_speed(struct bnx2 *bp)
6350 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6351 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6354 bp->flags |= PCIX_FLAG;
6356 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6358 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6360 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6361 bp->bus_speed_mhz = 133;
6364 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6365 bp->bus_speed_mhz = 100;
6368 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6369 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6370 bp->bus_speed_mhz = 66;
6373 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6374 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6375 bp->bus_speed_mhz = 50;
6378 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6379 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6380 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6381 bp->bus_speed_mhz = 33;
6386 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6387 bp->bus_speed_mhz = 66;
6389 bp->bus_speed_mhz = 33;
6392 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6393 bp->flags |= PCI_32BIT_FLAG;
6397 static int __devinit
6398 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6401 unsigned long mem_len;
6404 u64 dma_mask, persist_dma_mask;
6406 SET_NETDEV_DEV(dev, &pdev->dev);
6407 bp = netdev_priv(dev);
6412 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6413 rc = pci_enable_device(pdev);
6415 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
6419 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6421 "Cannot find PCI device base address, aborting.\n");
6423 goto err_out_disable;
6426 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6428 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
6429 goto err_out_disable;
6432 pci_set_master(pdev);
6434 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6435 if (bp->pm_cap == 0) {
6437 "Cannot find power management capability, aborting.\n");
6439 goto err_out_release;
6445 spin_lock_init(&bp->phy_lock);
6446 spin_lock_init(&bp->indirect_lock);
6447 INIT_WORK(&bp->reset_task, bnx2_reset_task);
6449 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
6450 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
6451 dev->mem_end = dev->mem_start + mem_len;
6452 dev->irq = pdev->irq;
6454 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6457 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
6459 goto err_out_release;
6462 /* Configure byte swap and enable write to the reg_window registers.
6463 * Rely on CPU to do target byte swapping on big endian systems
6464 * The chip's target access swapping will not swap all accesses
6466 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6467 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6468 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6470 bnx2_set_power_state(bp, PCI_D0);
6472 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6474 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6475 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6477 "Cannot find PCIE capability, aborting.\n");
6481 bp->flags |= PCIE_FLAG;
6483 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6484 if (bp->pcix_cap == 0) {
6486 "Cannot find PCIX capability, aborting.\n");
6492 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6493 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6494 bp->flags |= MSI_CAP_FLAG;
6497 /* 5708 cannot support DMA addresses > 40-bit. */
6498 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6499 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6501 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6503 /* Configure DMA attributes. */
6504 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6505 dev->features |= NETIF_F_HIGHDMA;
6506 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6509 "pci_set_consistent_dma_mask failed, aborting.\n");
6512 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6513 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6517 if (!(bp->flags & PCIE_FLAG))
6518 bnx2_get_pci_speed(bp);
6520 /* 5706A0 may falsely detect SERR and PERR. */
6521 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6522 reg = REG_RD(bp, PCI_COMMAND);
6523 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6524 REG_WR(bp, PCI_COMMAND, reg);
6526 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6527 !(bp->flags & PCIX_FLAG)) {
6530 "5706 A1 can only be used in a PCIX bus, aborting.\n");
6534 bnx2_init_nvram(bp);
6536 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6538 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
6539 BNX2_SHM_HDR_SIGNATURE_SIG) {
6540 u32 off = PCI_FUNC(pdev->devfn) << 2;
6542 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6544 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6546 /* Get the permanent MAC address. First we need to make sure the
6547 * firmware is actually running.
6549 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
6551 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6552 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
6553 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
6558 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6559 for (i = 0, j = 0; i < 3; i++) {
6562 num = (u8) (reg >> (24 - (i * 8)));
6563 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
6564 if (num >= k || !skip0 || k == 1) {
6565 bp->fw_version[j++] = (num / k) + '0';
6570 bp->fw_version[j++] = '.';
6572 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
6573 BNX2_PORT_FEATURE_ASF_ENABLED) {
6574 bp->flags |= ASF_ENABLE_FLAG;
6576 for (i = 0; i < 30; i++) {
6577 reg = REG_RD_IND(bp, bp->shmem_base +
6578 BNX2_BC_STATE_CONDITION);
6579 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
6584 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
6585 reg &= BNX2_CONDITION_MFW_RUN_MASK;
6586 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
6587 reg != BNX2_CONDITION_MFW_RUN_NONE) {
6589 u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
6591 bp->fw_version[j++] = ' ';
6592 for (i = 0; i < 3; i++) {
6593 reg = REG_RD_IND(bp, addr + i * 4);
6595 memcpy(&bp->fw_version[j], ®, 4);
6600 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
6601 bp->mac_addr[0] = (u8) (reg >> 8);
6602 bp->mac_addr[1] = (u8) reg;
6604 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
6605 bp->mac_addr[2] = (u8) (reg >> 24);
6606 bp->mac_addr[3] = (u8) (reg >> 16);
6607 bp->mac_addr[4] = (u8) (reg >> 8);
6608 bp->mac_addr[5] = (u8) reg;
6610 bp->tx_ring_size = MAX_TX_DESC_CNT;
6611 bnx2_set_rx_ring_size(bp, 255);
6615 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6617 bp->tx_quick_cons_trip_int = 20;
6618 bp->tx_quick_cons_trip = 20;
6619 bp->tx_ticks_int = 80;
6622 bp->rx_quick_cons_trip_int = 6;
6623 bp->rx_quick_cons_trip = 6;
6624 bp->rx_ticks_int = 18;
6627 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6629 bp->timer_interval = HZ;
6630 bp->current_interval = HZ;
6634 /* Disable WOL support if we are running on a SERDES chip. */
6635 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6636 bnx2_get_5709_media(bp);
6637 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
6638 bp->phy_flags |= PHY_SERDES_FLAG;
6640 bp->phy_port = PORT_TP;
6641 if (bp->phy_flags & PHY_SERDES_FLAG) {
6642 bp->phy_port = PORT_FIBRE;
6643 bp->flags |= NO_WOL_FLAG;
6644 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
6646 reg = REG_RD_IND(bp, bp->shmem_base +
6647 BNX2_SHARED_HW_CFG_CONFIG);
6648 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6649 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6651 bnx2_init_remote_phy(bp);
6653 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6654 CHIP_NUM(bp) == CHIP_NUM_5708)
6655 bp->phy_flags |= PHY_CRC_FIX_FLAG;
6656 else if (CHIP_ID(bp) == CHIP_ID_5709_A0 ||
6657 CHIP_ID(bp) == CHIP_ID_5709_A1)
6658 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
6660 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6661 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6662 (CHIP_ID(bp) == CHIP_ID_5708_B1))
6663 bp->flags |= NO_WOL_FLAG;
6665 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6666 bp->tx_quick_cons_trip_int =
6667 bp->tx_quick_cons_trip;
6668 bp->tx_ticks_int = bp->tx_ticks;
6669 bp->rx_quick_cons_trip_int =
6670 bp->rx_quick_cons_trip;
6671 bp->rx_ticks_int = bp->rx_ticks;
6672 bp->comp_prod_trip_int = bp->comp_prod_trip;
6673 bp->com_ticks_int = bp->com_ticks;
6674 bp->cmd_ticks_int = bp->cmd_ticks;
6677 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6679 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6680 * with byte enables disabled on the unused 32-bit word. This is legal
6681 * but causes problems on the AMD 8132 which will eventually stop
6682 * responding after a while.
6684 * AMD believes this incompatibility is unique to the 5706, and
6685 * prefers to locally disable MSI rather than globally disabling it.
6687 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6688 struct pci_dev *amd_8132 = NULL;
6690 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6691 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6694 if (amd_8132->revision >= 0x10 &&
6695 amd_8132->revision <= 0x13) {
6697 pci_dev_put(amd_8132);
6703 bnx2_set_default_link(bp);
6704 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6706 init_timer(&bp->timer);
6707 bp->timer.expires = RUN_AT(bp->timer_interval);
6708 bp->timer.data = (unsigned long) bp;
6709 bp->timer.function = bnx2_timer;
6715 iounmap(bp->regview);
6720 pci_release_regions(pdev);
6723 pci_disable_device(pdev);
6724 pci_set_drvdata(pdev, NULL);
6730 static char * __devinit
6731 bnx2_bus_string(struct bnx2 *bp, char *str)
6735 if (bp->flags & PCIE_FLAG) {
6736 s += sprintf(s, "PCI Express");
6738 s += sprintf(s, "PCI");
6739 if (bp->flags & PCIX_FLAG)
6740 s += sprintf(s, "-X");
6741 if (bp->flags & PCI_32BIT_FLAG)
6742 s += sprintf(s, " 32-bit");
6744 s += sprintf(s, " 64-bit");
6745 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
6750 static int __devinit
6751 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6753 static int version_printed = 0;
6754 struct net_device *dev = NULL;
6758 DECLARE_MAC_BUF(mac);
6760 if (version_printed++ == 0)
6761 printk(KERN_INFO "%s", version);
6763 /* dev zeroed in init_etherdev */
6764 dev = alloc_etherdev(sizeof(*bp));
6769 rc = bnx2_init_board(pdev, dev);
6775 dev->open = bnx2_open;
6776 dev->hard_start_xmit = bnx2_start_xmit;
6777 dev->stop = bnx2_close;
6778 dev->get_stats = bnx2_get_stats;
6779 dev->set_multicast_list = bnx2_set_rx_mode;
6780 dev->do_ioctl = bnx2_ioctl;
6781 dev->set_mac_address = bnx2_change_mac_addr;
6782 dev->change_mtu = bnx2_change_mtu;
6783 dev->tx_timeout = bnx2_tx_timeout;
6784 dev->watchdog_timeo = TX_TIMEOUT;
6786 dev->vlan_rx_register = bnx2_vlan_rx_register;
6788 dev->ethtool_ops = &bnx2_ethtool_ops;
6790 bp = netdev_priv(dev);
6791 netif_napi_add(dev, &bp->napi, bnx2_poll, 64);
6793 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6794 dev->poll_controller = poll_bnx2;
6797 pci_set_drvdata(pdev, dev);
6799 memcpy(dev->dev_addr, bp->mac_addr, 6);
6800 memcpy(dev->perm_addr, bp->mac_addr, 6);
6801 bp->name = board_info[ent->driver_data].name;
6803 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
6804 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6805 dev->features |= NETIF_F_IPV6_CSUM;
6808 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6810 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6811 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6812 dev->features |= NETIF_F_TSO6;
6814 if ((rc = register_netdev(dev))) {
6815 dev_err(&pdev->dev, "Cannot register net device\n");
6817 iounmap(bp->regview);
6818 pci_release_regions(pdev);
6819 pci_disable_device(pdev);
6820 pci_set_drvdata(pdev, NULL);
6825 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
6826 "IRQ %d, node addr %s\n",
6829 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6830 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6831 bnx2_bus_string(bp, str),
6833 bp->pdev->irq, print_mac(mac, dev->dev_addr));
6838 static void __devexit
6839 bnx2_remove_one(struct pci_dev *pdev)
6841 struct net_device *dev = pci_get_drvdata(pdev);
6842 struct bnx2 *bp = netdev_priv(dev);
6844 flush_scheduled_work();
6846 unregister_netdev(dev);
6849 iounmap(bp->regview);
6852 pci_release_regions(pdev);
6853 pci_disable_device(pdev);
6854 pci_set_drvdata(pdev, NULL);
6858 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6860 struct net_device *dev = pci_get_drvdata(pdev);
6861 struct bnx2 *bp = netdev_priv(dev);
6864 /* PCI register 4 needs to be saved whether netif_running() or not.
6865 * MSI address and data need to be saved if using MSI and
6868 pci_save_state(pdev);
6869 if (!netif_running(dev))
6872 flush_scheduled_work();
6873 bnx2_netif_stop(bp);
6874 netif_device_detach(dev);
6875 del_timer_sync(&bp->timer);
6876 if (bp->flags & NO_WOL_FLAG)
6877 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6879 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6881 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6882 bnx2_reset_chip(bp, reset_code);
6884 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6889 bnx2_resume(struct pci_dev *pdev)
6891 struct net_device *dev = pci_get_drvdata(pdev);
6892 struct bnx2 *bp = netdev_priv(dev);
6894 pci_restore_state(pdev);
6895 if (!netif_running(dev))
6898 bnx2_set_power_state(bp, PCI_D0);
6899 netif_device_attach(dev);
6901 bnx2_netif_start(bp);
6905 static struct pci_driver bnx2_pci_driver = {
6906 .name = DRV_MODULE_NAME,
6907 .id_table = bnx2_pci_tbl,
6908 .probe = bnx2_init_one,
6909 .remove = __devexit_p(bnx2_remove_one),
6910 .suspend = bnx2_suspend,
6911 .resume = bnx2_resume,
6914 static int __init bnx2_init(void)
6916 return pci_register_driver(&bnx2_pci_driver);
6919 static void __exit bnx2_cleanup(void)
6921 pci_unregister_driver(&bnx2_pci_driver);
6924 module_init(bnx2_init);
6925 module_exit(bnx2_cleanup);