1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2007 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
55 #define DRV_MODULE_NAME "bnx2"
56 #define PFX DRV_MODULE_NAME ": "
57 #define DRV_MODULE_VERSION "1.6.5"
58 #define DRV_MODULE_RELDATE "September 20, 2007"
60 #define RUN_AT(x) (jiffies + (x))
62 /* Time in jiffies before concluding the transmitter is hung. */
63 #define TX_TIMEOUT (5*HZ)
65 static const char version[] __devinitdata =
66 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
68 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
69 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
73 static int disable_msi = 0;
75 module_param(disable_msi, int, 0);
76 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
90 /* indexed by board_t, above */
93 } board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
101 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
105 static struct pci_device_id bnx2_pci_tbl[] = {
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
127 static struct flash_spec flash_table[] =
129 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
130 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
132 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
133 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
134 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
136 /* Expansion entry 0001 */
137 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
138 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
139 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
141 /* Saifun SA25F010 (non-buffered flash) */
142 /* strap, cfg1, & write1 need updates */
143 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
144 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
145 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
146 "Non-buffered flash (128kB)"},
147 /* Saifun SA25F020 (non-buffered flash) */
148 /* strap, cfg1, & write1 need updates */
149 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
150 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
151 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
152 "Non-buffered flash (256kB)"},
153 /* Expansion entry 0100 */
154 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
155 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
156 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
158 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
159 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
160 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
161 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
162 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
163 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
164 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
165 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
166 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
167 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
168 /* Saifun SA25F005 (non-buffered flash) */
169 /* strap, cfg1, & write1 need updates */
170 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
171 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
172 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
173 "Non-buffered flash (64kB)"},
175 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
176 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
177 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
179 /* Expansion entry 1001 */
180 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
181 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
182 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
184 /* Expansion entry 1010 */
185 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
186 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
187 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
189 /* ATMEL AT45DB011B (buffered flash) */
190 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
191 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
192 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
193 "Buffered flash (128kB)"},
194 /* Expansion entry 1100 */
195 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
196 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
197 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
199 /* Expansion entry 1101 */
200 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
201 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
202 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
204 /* Ateml Expansion entry 1110 */
205 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
206 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
207 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
208 "Entry 1110 (Atmel)"},
209 /* ATMEL AT45DB021B (buffered flash) */
210 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
211 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
212 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
213 "Buffered flash (256kB)"},
216 static struct flash_spec flash_5709 = {
217 .flags = BNX2_NV_BUFFERED,
218 .page_bits = BCM5709_FLASH_PAGE_BITS,
219 .page_size = BCM5709_FLASH_PAGE_SIZE,
220 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
221 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
222 .name = "5709 Buffered flash (256kB)",
225 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
227 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
233 /* The ring uses 256 indices for 255 entries, one of them
234 * needs to be skipped.
236 diff = bp->tx_prod - bp->tx_cons;
237 if (unlikely(diff >= TX_DESC_CNT)) {
239 if (diff == TX_DESC_CNT)
240 diff = MAX_TX_DESC_CNT;
242 return (bp->tx_ring_size - diff);
246 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
250 spin_lock_bh(&bp->indirect_lock);
251 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
252 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
253 spin_unlock_bh(&bp->indirect_lock);
258 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
260 spin_lock_bh(&bp->indirect_lock);
261 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
262 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
263 spin_unlock_bh(&bp->indirect_lock);
267 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
270 spin_lock_bh(&bp->indirect_lock);
271 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
274 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
275 REG_WR(bp, BNX2_CTX_CTX_CTRL,
276 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
277 for (i = 0; i < 5; i++) {
279 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
280 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
285 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
286 REG_WR(bp, BNX2_CTX_DATA, val);
288 spin_unlock_bh(&bp->indirect_lock);
292 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
297 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
298 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
299 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
301 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
302 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
307 val1 = (bp->phy_addr << 21) | (reg << 16) |
308 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
309 BNX2_EMAC_MDIO_COMM_START_BUSY;
310 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
312 for (i = 0; i < 50; i++) {
315 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
316 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
319 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
320 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
326 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
335 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
336 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
337 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
339 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
340 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
349 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
354 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
355 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
356 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
358 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
359 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
364 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
365 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
366 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
367 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
369 for (i = 0; i < 50; i++) {
372 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
373 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
379 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
384 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
385 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
386 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
388 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
389 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
398 bnx2_disable_int(struct bnx2 *bp)
400 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
401 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
402 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
406 bnx2_enable_int(struct bnx2 *bp)
408 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
409 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
410 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
412 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
413 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
415 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
419 bnx2_disable_int_sync(struct bnx2 *bp)
421 atomic_inc(&bp->intr_sem);
422 bnx2_disable_int(bp);
423 synchronize_irq(bp->pdev->irq);
427 bnx2_netif_stop(struct bnx2 *bp)
429 bnx2_disable_int_sync(bp);
430 if (netif_running(bp->dev)) {
431 napi_disable(&bp->napi);
432 netif_tx_disable(bp->dev);
433 bp->dev->trans_start = jiffies; /* prevent tx timeout */
438 bnx2_netif_start(struct bnx2 *bp)
440 if (atomic_dec_and_test(&bp->intr_sem)) {
441 if (netif_running(bp->dev)) {
442 netif_wake_queue(bp->dev);
443 napi_enable(&bp->napi);
450 bnx2_free_mem(struct bnx2 *bp)
454 for (i = 0; i < bp->ctx_pages; i++) {
455 if (bp->ctx_blk[i]) {
456 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
458 bp->ctx_blk_mapping[i]);
459 bp->ctx_blk[i] = NULL;
462 if (bp->status_blk) {
463 pci_free_consistent(bp->pdev, bp->status_stats_size,
464 bp->status_blk, bp->status_blk_mapping);
465 bp->status_blk = NULL;
466 bp->stats_blk = NULL;
468 if (bp->tx_desc_ring) {
469 pci_free_consistent(bp->pdev,
470 sizeof(struct tx_bd) * TX_DESC_CNT,
471 bp->tx_desc_ring, bp->tx_desc_mapping);
472 bp->tx_desc_ring = NULL;
474 kfree(bp->tx_buf_ring);
475 bp->tx_buf_ring = NULL;
476 for (i = 0; i < bp->rx_max_ring; i++) {
477 if (bp->rx_desc_ring[i])
478 pci_free_consistent(bp->pdev,
479 sizeof(struct rx_bd) * RX_DESC_CNT,
481 bp->rx_desc_mapping[i]);
482 bp->rx_desc_ring[i] = NULL;
484 vfree(bp->rx_buf_ring);
485 bp->rx_buf_ring = NULL;
489 bnx2_alloc_mem(struct bnx2 *bp)
491 int i, status_blk_size;
493 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
495 if (bp->tx_buf_ring == NULL)
498 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
499 sizeof(struct tx_bd) *
501 &bp->tx_desc_mapping);
502 if (bp->tx_desc_ring == NULL)
505 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
507 if (bp->rx_buf_ring == NULL)
510 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
513 for (i = 0; i < bp->rx_max_ring; i++) {
514 bp->rx_desc_ring[i] =
515 pci_alloc_consistent(bp->pdev,
516 sizeof(struct rx_bd) * RX_DESC_CNT,
517 &bp->rx_desc_mapping[i]);
518 if (bp->rx_desc_ring[i] == NULL)
523 /* Combine status and statistics blocks into one allocation. */
524 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
525 bp->status_stats_size = status_blk_size +
526 sizeof(struct statistics_block);
528 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
529 &bp->status_blk_mapping);
530 if (bp->status_blk == NULL)
533 memset(bp->status_blk, 0, bp->status_stats_size);
535 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
538 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
540 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
541 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
542 if (bp->ctx_pages == 0)
544 for (i = 0; i < bp->ctx_pages; i++) {
545 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
547 &bp->ctx_blk_mapping[i]);
548 if (bp->ctx_blk[i] == NULL)
560 bnx2_report_fw_link(struct bnx2 *bp)
562 u32 fw_link_status = 0;
564 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
570 switch (bp->line_speed) {
572 if (bp->duplex == DUPLEX_HALF)
573 fw_link_status = BNX2_LINK_STATUS_10HALF;
575 fw_link_status = BNX2_LINK_STATUS_10FULL;
578 if (bp->duplex == DUPLEX_HALF)
579 fw_link_status = BNX2_LINK_STATUS_100HALF;
581 fw_link_status = BNX2_LINK_STATUS_100FULL;
584 if (bp->duplex == DUPLEX_HALF)
585 fw_link_status = BNX2_LINK_STATUS_1000HALF;
587 fw_link_status = BNX2_LINK_STATUS_1000FULL;
590 if (bp->duplex == DUPLEX_HALF)
591 fw_link_status = BNX2_LINK_STATUS_2500HALF;
593 fw_link_status = BNX2_LINK_STATUS_2500FULL;
597 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
600 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
602 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
603 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
605 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
606 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
607 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
609 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
613 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
615 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
619 bnx2_xceiver_str(struct bnx2 *bp)
621 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
622 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
627 bnx2_report_link(struct bnx2 *bp)
630 netif_carrier_on(bp->dev);
631 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
632 bnx2_xceiver_str(bp));
634 printk("%d Mbps ", bp->line_speed);
636 if (bp->duplex == DUPLEX_FULL)
637 printk("full duplex");
639 printk("half duplex");
642 if (bp->flow_ctrl & FLOW_CTRL_RX) {
643 printk(", receive ");
644 if (bp->flow_ctrl & FLOW_CTRL_TX)
645 printk("& transmit ");
648 printk(", transmit ");
650 printk("flow control ON");
655 netif_carrier_off(bp->dev);
656 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
657 bnx2_xceiver_str(bp));
660 bnx2_report_fw_link(bp);
664 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
666 u32 local_adv, remote_adv;
669 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
670 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
672 if (bp->duplex == DUPLEX_FULL) {
673 bp->flow_ctrl = bp->req_flow_ctrl;
678 if (bp->duplex != DUPLEX_FULL) {
682 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
683 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
686 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
687 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
688 bp->flow_ctrl |= FLOW_CTRL_TX;
689 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
690 bp->flow_ctrl |= FLOW_CTRL_RX;
694 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
695 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
697 if (bp->phy_flags & PHY_SERDES_FLAG) {
698 u32 new_local_adv = 0;
699 u32 new_remote_adv = 0;
701 if (local_adv & ADVERTISE_1000XPAUSE)
702 new_local_adv |= ADVERTISE_PAUSE_CAP;
703 if (local_adv & ADVERTISE_1000XPSE_ASYM)
704 new_local_adv |= ADVERTISE_PAUSE_ASYM;
705 if (remote_adv & ADVERTISE_1000XPAUSE)
706 new_remote_adv |= ADVERTISE_PAUSE_CAP;
707 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
708 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
710 local_adv = new_local_adv;
711 remote_adv = new_remote_adv;
714 /* See Table 28B-3 of 802.3ab-1999 spec. */
715 if (local_adv & ADVERTISE_PAUSE_CAP) {
716 if(local_adv & ADVERTISE_PAUSE_ASYM) {
717 if (remote_adv & ADVERTISE_PAUSE_CAP) {
718 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
720 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
721 bp->flow_ctrl = FLOW_CTRL_RX;
725 if (remote_adv & ADVERTISE_PAUSE_CAP) {
726 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
730 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
731 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
732 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
734 bp->flow_ctrl = FLOW_CTRL_TX;
740 bnx2_5709s_linkup(struct bnx2 *bp)
746 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
747 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
748 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
750 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
751 bp->line_speed = bp->req_line_speed;
752 bp->duplex = bp->req_duplex;
755 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
757 case MII_BNX2_GP_TOP_AN_SPEED_10:
758 bp->line_speed = SPEED_10;
760 case MII_BNX2_GP_TOP_AN_SPEED_100:
761 bp->line_speed = SPEED_100;
763 case MII_BNX2_GP_TOP_AN_SPEED_1G:
764 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
765 bp->line_speed = SPEED_1000;
767 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
768 bp->line_speed = SPEED_2500;
771 if (val & MII_BNX2_GP_TOP_AN_FD)
772 bp->duplex = DUPLEX_FULL;
774 bp->duplex = DUPLEX_HALF;
779 bnx2_5708s_linkup(struct bnx2 *bp)
784 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
785 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
786 case BCM5708S_1000X_STAT1_SPEED_10:
787 bp->line_speed = SPEED_10;
789 case BCM5708S_1000X_STAT1_SPEED_100:
790 bp->line_speed = SPEED_100;
792 case BCM5708S_1000X_STAT1_SPEED_1G:
793 bp->line_speed = SPEED_1000;
795 case BCM5708S_1000X_STAT1_SPEED_2G5:
796 bp->line_speed = SPEED_2500;
799 if (val & BCM5708S_1000X_STAT1_FD)
800 bp->duplex = DUPLEX_FULL;
802 bp->duplex = DUPLEX_HALF;
808 bnx2_5706s_linkup(struct bnx2 *bp)
810 u32 bmcr, local_adv, remote_adv, common;
813 bp->line_speed = SPEED_1000;
815 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
816 if (bmcr & BMCR_FULLDPLX) {
817 bp->duplex = DUPLEX_FULL;
820 bp->duplex = DUPLEX_HALF;
823 if (!(bmcr & BMCR_ANENABLE)) {
827 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
828 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
830 common = local_adv & remote_adv;
831 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
833 if (common & ADVERTISE_1000XFULL) {
834 bp->duplex = DUPLEX_FULL;
837 bp->duplex = DUPLEX_HALF;
845 bnx2_copper_linkup(struct bnx2 *bp)
849 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
850 if (bmcr & BMCR_ANENABLE) {
851 u32 local_adv, remote_adv, common;
853 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
854 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
856 common = local_adv & (remote_adv >> 2);
857 if (common & ADVERTISE_1000FULL) {
858 bp->line_speed = SPEED_1000;
859 bp->duplex = DUPLEX_FULL;
861 else if (common & ADVERTISE_1000HALF) {
862 bp->line_speed = SPEED_1000;
863 bp->duplex = DUPLEX_HALF;
866 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
867 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
869 common = local_adv & remote_adv;
870 if (common & ADVERTISE_100FULL) {
871 bp->line_speed = SPEED_100;
872 bp->duplex = DUPLEX_FULL;
874 else if (common & ADVERTISE_100HALF) {
875 bp->line_speed = SPEED_100;
876 bp->duplex = DUPLEX_HALF;
878 else if (common & ADVERTISE_10FULL) {
879 bp->line_speed = SPEED_10;
880 bp->duplex = DUPLEX_FULL;
882 else if (common & ADVERTISE_10HALF) {
883 bp->line_speed = SPEED_10;
884 bp->duplex = DUPLEX_HALF;
893 if (bmcr & BMCR_SPEED100) {
894 bp->line_speed = SPEED_100;
897 bp->line_speed = SPEED_10;
899 if (bmcr & BMCR_FULLDPLX) {
900 bp->duplex = DUPLEX_FULL;
903 bp->duplex = DUPLEX_HALF;
911 bnx2_set_mac_link(struct bnx2 *bp)
915 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
916 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
917 (bp->duplex == DUPLEX_HALF)) {
918 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
921 /* Configure the EMAC mode register. */
922 val = REG_RD(bp, BNX2_EMAC_MODE);
924 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
925 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
926 BNX2_EMAC_MODE_25G_MODE);
929 switch (bp->line_speed) {
931 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
932 val |= BNX2_EMAC_MODE_PORT_MII_10M;
937 val |= BNX2_EMAC_MODE_PORT_MII;
940 val |= BNX2_EMAC_MODE_25G_MODE;
943 val |= BNX2_EMAC_MODE_PORT_GMII;
948 val |= BNX2_EMAC_MODE_PORT_GMII;
951 /* Set the MAC to operate in the appropriate duplex mode. */
952 if (bp->duplex == DUPLEX_HALF)
953 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
954 REG_WR(bp, BNX2_EMAC_MODE, val);
956 /* Enable/disable rx PAUSE. */
957 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
959 if (bp->flow_ctrl & FLOW_CTRL_RX)
960 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
961 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
963 /* Enable/disable tx PAUSE. */
964 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
965 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
967 if (bp->flow_ctrl & FLOW_CTRL_TX)
968 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
969 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
971 /* Acknowledge the interrupt. */
972 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
978 bnx2_enable_bmsr1(struct bnx2 *bp)
980 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
981 (CHIP_NUM(bp) == CHIP_NUM_5709))
982 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
983 MII_BNX2_BLK_ADDR_GP_STATUS);
987 bnx2_disable_bmsr1(struct bnx2 *bp)
989 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
990 (CHIP_NUM(bp) == CHIP_NUM_5709))
991 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
992 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
996 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1001 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1004 if (bp->autoneg & AUTONEG_SPEED)
1005 bp->advertising |= ADVERTISED_2500baseX_Full;
1007 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1008 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1010 bnx2_read_phy(bp, bp->mii_up1, &up1);
1011 if (!(up1 & BCM5708S_UP1_2G5)) {
1012 up1 |= BCM5708S_UP1_2G5;
1013 bnx2_write_phy(bp, bp->mii_up1, up1);
1017 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1018 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1019 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1025 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1030 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1033 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1034 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1036 bnx2_read_phy(bp, bp->mii_up1, &up1);
1037 if (up1 & BCM5708S_UP1_2G5) {
1038 up1 &= ~BCM5708S_UP1_2G5;
1039 bnx2_write_phy(bp, bp->mii_up1, up1);
1043 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1044 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1045 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1051 bnx2_enable_forced_2g5(struct bnx2 *bp)
1055 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1058 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1061 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1062 MII_BNX2_BLK_ADDR_SERDES_DIG);
1063 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1064 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1065 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1066 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1068 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1069 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1070 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1072 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1073 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1074 bmcr |= BCM5708S_BMCR_FORCE_2500;
1077 if (bp->autoneg & AUTONEG_SPEED) {
1078 bmcr &= ~BMCR_ANENABLE;
1079 if (bp->req_duplex == DUPLEX_FULL)
1080 bmcr |= BMCR_FULLDPLX;
1082 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1086 bnx2_disable_forced_2g5(struct bnx2 *bp)
1090 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1093 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1096 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1097 MII_BNX2_BLK_ADDR_SERDES_DIG);
1098 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1099 val &= ~MII_BNX2_SD_MISC1_FORCE;
1100 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1102 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1103 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1104 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1106 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1107 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1108 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1111 if (bp->autoneg & AUTONEG_SPEED)
1112 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1113 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1117 bnx2_set_link(struct bnx2 *bp)
1122 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1127 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1130 link_up = bp->link_up;
1132 bnx2_enable_bmsr1(bp);
1133 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1134 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1135 bnx2_disable_bmsr1(bp);
1137 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1138 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1141 val = REG_RD(bp, BNX2_EMAC_STATUS);
1142 if (val & BNX2_EMAC_STATUS_LINK)
1143 bmsr |= BMSR_LSTATUS;
1145 bmsr &= ~BMSR_LSTATUS;
1148 if (bmsr & BMSR_LSTATUS) {
1151 if (bp->phy_flags & PHY_SERDES_FLAG) {
1152 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1153 bnx2_5706s_linkup(bp);
1154 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1155 bnx2_5708s_linkup(bp);
1156 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1157 bnx2_5709s_linkup(bp);
1160 bnx2_copper_linkup(bp);
1162 bnx2_resolve_flow_ctrl(bp);
1165 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1166 (bp->autoneg & AUTONEG_SPEED))
1167 bnx2_disable_forced_2g5(bp);
1169 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1173 if (bp->link_up != link_up) {
1174 bnx2_report_link(bp);
1177 bnx2_set_mac_link(bp);
1183 bnx2_reset_phy(struct bnx2 *bp)
1188 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1190 #define PHY_RESET_MAX_WAIT 100
1191 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1194 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1195 if (!(reg & BMCR_RESET)) {
1200 if (i == PHY_RESET_MAX_WAIT) {
1207 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1211 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1212 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1214 if (bp->phy_flags & PHY_SERDES_FLAG) {
1215 adv = ADVERTISE_1000XPAUSE;
1218 adv = ADVERTISE_PAUSE_CAP;
1221 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1222 if (bp->phy_flags & PHY_SERDES_FLAG) {
1223 adv = ADVERTISE_1000XPSE_ASYM;
1226 adv = ADVERTISE_PAUSE_ASYM;
1229 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1230 if (bp->phy_flags & PHY_SERDES_FLAG) {
1231 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1234 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1240 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1243 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1245 u32 speed_arg = 0, pause_adv;
1247 pause_adv = bnx2_phy_get_pause_adv(bp);
1249 if (bp->autoneg & AUTONEG_SPEED) {
1250 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1251 if (bp->advertising & ADVERTISED_10baseT_Half)
1252 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1253 if (bp->advertising & ADVERTISED_10baseT_Full)
1254 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1255 if (bp->advertising & ADVERTISED_100baseT_Half)
1256 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1257 if (bp->advertising & ADVERTISED_100baseT_Full)
1258 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1259 if (bp->advertising & ADVERTISED_1000baseT_Full)
1260 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1261 if (bp->advertising & ADVERTISED_2500baseX_Full)
1262 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1264 if (bp->req_line_speed == SPEED_2500)
1265 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1266 else if (bp->req_line_speed == SPEED_1000)
1267 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1268 else if (bp->req_line_speed == SPEED_100) {
1269 if (bp->req_duplex == DUPLEX_FULL)
1270 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1272 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1273 } else if (bp->req_line_speed == SPEED_10) {
1274 if (bp->req_duplex == DUPLEX_FULL)
1275 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1277 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1281 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1282 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1283 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1284 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1286 if (port == PORT_TP)
1287 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1288 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1290 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1292 spin_unlock_bh(&bp->phy_lock);
1293 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1294 spin_lock_bh(&bp->phy_lock);
1300 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1305 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1306 return (bnx2_setup_remote_phy(bp, port));
1308 if (!(bp->autoneg & AUTONEG_SPEED)) {
1310 int force_link_down = 0;
1312 if (bp->req_line_speed == SPEED_2500) {
1313 if (!bnx2_test_and_enable_2g5(bp))
1314 force_link_down = 1;
1315 } else if (bp->req_line_speed == SPEED_1000) {
1316 if (bnx2_test_and_disable_2g5(bp))
1317 force_link_down = 1;
1319 bnx2_read_phy(bp, bp->mii_adv, &adv);
1320 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1322 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1323 new_bmcr = bmcr & ~BMCR_ANENABLE;
1324 new_bmcr |= BMCR_SPEED1000;
1326 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1327 if (bp->req_line_speed == SPEED_2500)
1328 bnx2_enable_forced_2g5(bp);
1329 else if (bp->req_line_speed == SPEED_1000) {
1330 bnx2_disable_forced_2g5(bp);
1331 new_bmcr &= ~0x2000;
1334 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1335 if (bp->req_line_speed == SPEED_2500)
1336 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1338 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1341 if (bp->req_duplex == DUPLEX_FULL) {
1342 adv |= ADVERTISE_1000XFULL;
1343 new_bmcr |= BMCR_FULLDPLX;
1346 adv |= ADVERTISE_1000XHALF;
1347 new_bmcr &= ~BMCR_FULLDPLX;
1349 if ((new_bmcr != bmcr) || (force_link_down)) {
1350 /* Force a link down visible on the other side */
1352 bnx2_write_phy(bp, bp->mii_adv, adv &
1353 ~(ADVERTISE_1000XFULL |
1354 ADVERTISE_1000XHALF));
1355 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1356 BMCR_ANRESTART | BMCR_ANENABLE);
1359 netif_carrier_off(bp->dev);
1360 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1361 bnx2_report_link(bp);
1363 bnx2_write_phy(bp, bp->mii_adv, adv);
1364 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1366 bnx2_resolve_flow_ctrl(bp);
1367 bnx2_set_mac_link(bp);
1372 bnx2_test_and_enable_2g5(bp);
1374 if (bp->advertising & ADVERTISED_1000baseT_Full)
1375 new_adv |= ADVERTISE_1000XFULL;
1377 new_adv |= bnx2_phy_get_pause_adv(bp);
1379 bnx2_read_phy(bp, bp->mii_adv, &adv);
1380 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1382 bp->serdes_an_pending = 0;
1383 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1384 /* Force a link down visible on the other side */
1386 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1387 spin_unlock_bh(&bp->phy_lock);
1389 spin_lock_bh(&bp->phy_lock);
1392 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1393 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1395 /* Speed up link-up time when the link partner
1396 * does not autonegotiate which is very common
1397 * in blade servers. Some blade servers use
1398 * IPMI for kerboard input and it's important
1399 * to minimize link disruptions. Autoneg. involves
1400 * exchanging base pages plus 3 next pages and
1401 * normally completes in about 120 msec.
1403 bp->current_interval = SERDES_AN_TIMEOUT;
1404 bp->serdes_an_pending = 1;
1405 mod_timer(&bp->timer, jiffies + bp->current_interval);
1407 bnx2_resolve_flow_ctrl(bp);
1408 bnx2_set_mac_link(bp);
1414 #define ETHTOOL_ALL_FIBRE_SPEED \
1415 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1416 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1417 (ADVERTISED_1000baseT_Full)
1419 #define ETHTOOL_ALL_COPPER_SPEED \
1420 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1421 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1422 ADVERTISED_1000baseT_Full)
1424 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1425 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1427 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1430 bnx2_set_default_remote_link(struct bnx2 *bp)
1434 if (bp->phy_port == PORT_TP)
1435 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1437 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1439 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1440 bp->req_line_speed = 0;
1441 bp->autoneg |= AUTONEG_SPEED;
1442 bp->advertising = ADVERTISED_Autoneg;
1443 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1444 bp->advertising |= ADVERTISED_10baseT_Half;
1445 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1446 bp->advertising |= ADVERTISED_10baseT_Full;
1447 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1448 bp->advertising |= ADVERTISED_100baseT_Half;
1449 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1450 bp->advertising |= ADVERTISED_100baseT_Full;
1451 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1452 bp->advertising |= ADVERTISED_1000baseT_Full;
1453 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1454 bp->advertising |= ADVERTISED_2500baseX_Full;
1457 bp->advertising = 0;
1458 bp->req_duplex = DUPLEX_FULL;
1459 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1460 bp->req_line_speed = SPEED_10;
1461 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1462 bp->req_duplex = DUPLEX_HALF;
1464 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1465 bp->req_line_speed = SPEED_100;
1466 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1467 bp->req_duplex = DUPLEX_HALF;
1469 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1470 bp->req_line_speed = SPEED_1000;
1471 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1472 bp->req_line_speed = SPEED_2500;
1477 bnx2_set_default_link(struct bnx2 *bp)
1479 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1480 return bnx2_set_default_remote_link(bp);
1482 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1483 bp->req_line_speed = 0;
1484 if (bp->phy_flags & PHY_SERDES_FLAG) {
1487 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1489 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1490 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1491 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1493 bp->req_line_speed = bp->line_speed = SPEED_1000;
1494 bp->req_duplex = DUPLEX_FULL;
1497 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1501 bnx2_send_heart_beat(struct bnx2 *bp)
1506 spin_lock(&bp->indirect_lock);
1507 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1508 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1509 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1510 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1511 spin_unlock(&bp->indirect_lock);
1515 bnx2_remote_phy_event(struct bnx2 *bp)
1518 u8 link_up = bp->link_up;
1521 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1523 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1524 bnx2_send_heart_beat(bp);
1526 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1528 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1534 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1535 bp->duplex = DUPLEX_FULL;
1537 case BNX2_LINK_STATUS_10HALF:
1538 bp->duplex = DUPLEX_HALF;
1539 case BNX2_LINK_STATUS_10FULL:
1540 bp->line_speed = SPEED_10;
1542 case BNX2_LINK_STATUS_100HALF:
1543 bp->duplex = DUPLEX_HALF;
1544 case BNX2_LINK_STATUS_100BASE_T4:
1545 case BNX2_LINK_STATUS_100FULL:
1546 bp->line_speed = SPEED_100;
1548 case BNX2_LINK_STATUS_1000HALF:
1549 bp->duplex = DUPLEX_HALF;
1550 case BNX2_LINK_STATUS_1000FULL:
1551 bp->line_speed = SPEED_1000;
1553 case BNX2_LINK_STATUS_2500HALF:
1554 bp->duplex = DUPLEX_HALF;
1555 case BNX2_LINK_STATUS_2500FULL:
1556 bp->line_speed = SPEED_2500;
1563 spin_lock(&bp->phy_lock);
1565 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1566 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1567 if (bp->duplex == DUPLEX_FULL)
1568 bp->flow_ctrl = bp->req_flow_ctrl;
1570 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1571 bp->flow_ctrl |= FLOW_CTRL_TX;
1572 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1573 bp->flow_ctrl |= FLOW_CTRL_RX;
1576 old_port = bp->phy_port;
1577 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1578 bp->phy_port = PORT_FIBRE;
1580 bp->phy_port = PORT_TP;
1582 if (old_port != bp->phy_port)
1583 bnx2_set_default_link(bp);
1585 spin_unlock(&bp->phy_lock);
1587 if (bp->link_up != link_up)
1588 bnx2_report_link(bp);
1590 bnx2_set_mac_link(bp);
1594 bnx2_set_remote_link(struct bnx2 *bp)
1598 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1600 case BNX2_FW_EVT_CODE_LINK_EVENT:
1601 bnx2_remote_phy_event(bp);
1603 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1605 bnx2_send_heart_beat(bp);
1612 bnx2_setup_copper_phy(struct bnx2 *bp)
1617 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1619 if (bp->autoneg & AUTONEG_SPEED) {
1620 u32 adv_reg, adv1000_reg;
1621 u32 new_adv_reg = 0;
1622 u32 new_adv1000_reg = 0;
1624 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1625 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1626 ADVERTISE_PAUSE_ASYM);
1628 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1629 adv1000_reg &= PHY_ALL_1000_SPEED;
1631 if (bp->advertising & ADVERTISED_10baseT_Half)
1632 new_adv_reg |= ADVERTISE_10HALF;
1633 if (bp->advertising & ADVERTISED_10baseT_Full)
1634 new_adv_reg |= ADVERTISE_10FULL;
1635 if (bp->advertising & ADVERTISED_100baseT_Half)
1636 new_adv_reg |= ADVERTISE_100HALF;
1637 if (bp->advertising & ADVERTISED_100baseT_Full)
1638 new_adv_reg |= ADVERTISE_100FULL;
1639 if (bp->advertising & ADVERTISED_1000baseT_Full)
1640 new_adv1000_reg |= ADVERTISE_1000FULL;
1642 new_adv_reg |= ADVERTISE_CSMA;
1644 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1646 if ((adv1000_reg != new_adv1000_reg) ||
1647 (adv_reg != new_adv_reg) ||
1648 ((bmcr & BMCR_ANENABLE) == 0)) {
1650 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1651 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1652 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1655 else if (bp->link_up) {
1656 /* Flow ctrl may have changed from auto to forced */
1657 /* or vice-versa. */
1659 bnx2_resolve_flow_ctrl(bp);
1660 bnx2_set_mac_link(bp);
1666 if (bp->req_line_speed == SPEED_100) {
1667 new_bmcr |= BMCR_SPEED100;
1669 if (bp->req_duplex == DUPLEX_FULL) {
1670 new_bmcr |= BMCR_FULLDPLX;
1672 if (new_bmcr != bmcr) {
1675 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1676 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1678 if (bmsr & BMSR_LSTATUS) {
1679 /* Force link down */
1680 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1681 spin_unlock_bh(&bp->phy_lock);
1683 spin_lock_bh(&bp->phy_lock);
1685 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1686 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1689 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1691 /* Normally, the new speed is setup after the link has
1692 * gone down and up again. In some cases, link will not go
1693 * down so we need to set up the new speed here.
1695 if (bmsr & BMSR_LSTATUS) {
1696 bp->line_speed = bp->req_line_speed;
1697 bp->duplex = bp->req_duplex;
1698 bnx2_resolve_flow_ctrl(bp);
1699 bnx2_set_mac_link(bp);
1702 bnx2_resolve_flow_ctrl(bp);
1703 bnx2_set_mac_link(bp);
1709 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1711 if (bp->loopback == MAC_LOOPBACK)
1714 if (bp->phy_flags & PHY_SERDES_FLAG) {
1715 return (bnx2_setup_serdes_phy(bp, port));
1718 return (bnx2_setup_copper_phy(bp));
1723 bnx2_init_5709s_phy(struct bnx2 *bp)
1727 bp->mii_bmcr = MII_BMCR + 0x10;
1728 bp->mii_bmsr = MII_BMSR + 0x10;
1729 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1730 bp->mii_adv = MII_ADVERTISE + 0x10;
1731 bp->mii_lpa = MII_LPA + 0x10;
1732 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1734 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1735 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1737 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1740 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1742 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1743 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1744 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1745 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1747 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1748 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1749 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1750 val |= BCM5708S_UP1_2G5;
1752 val &= ~BCM5708S_UP1_2G5;
1753 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1755 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1756 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1757 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1758 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1760 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1762 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1763 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1764 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1766 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1772 bnx2_init_5708s_phy(struct bnx2 *bp)
1778 bp->mii_up1 = BCM5708S_UP1;
1780 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1781 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1782 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1784 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1785 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1786 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1788 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1789 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1790 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1792 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1793 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1794 val |= BCM5708S_UP1_2G5;
1795 bnx2_write_phy(bp, BCM5708S_UP1, val);
1798 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1799 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1800 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1801 /* increase tx signal amplitude */
1802 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1803 BCM5708S_BLK_ADDR_TX_MISC);
1804 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1805 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1806 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1807 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1810 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1811 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1816 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1817 BNX2_SHARED_HW_CFG_CONFIG);
1818 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1819 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1820 BCM5708S_BLK_ADDR_TX_MISC);
1821 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1822 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1823 BCM5708S_BLK_ADDR_DIG);
1830 bnx2_init_5706s_phy(struct bnx2 *bp)
1834 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1836 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1837 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1839 if (bp->dev->mtu > 1500) {
1842 /* Set extended packet length bit */
1843 bnx2_write_phy(bp, 0x18, 0x7);
1844 bnx2_read_phy(bp, 0x18, &val);
1845 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1847 bnx2_write_phy(bp, 0x1c, 0x6c00);
1848 bnx2_read_phy(bp, 0x1c, &val);
1849 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1854 bnx2_write_phy(bp, 0x18, 0x7);
1855 bnx2_read_phy(bp, 0x18, &val);
1856 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1858 bnx2_write_phy(bp, 0x1c, 0x6c00);
1859 bnx2_read_phy(bp, 0x1c, &val);
1860 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1867 bnx2_init_copper_phy(struct bnx2 *bp)
1873 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1874 bnx2_write_phy(bp, 0x18, 0x0c00);
1875 bnx2_write_phy(bp, 0x17, 0x000a);
1876 bnx2_write_phy(bp, 0x15, 0x310b);
1877 bnx2_write_phy(bp, 0x17, 0x201f);
1878 bnx2_write_phy(bp, 0x15, 0x9506);
1879 bnx2_write_phy(bp, 0x17, 0x401f);
1880 bnx2_write_phy(bp, 0x15, 0x14e2);
1881 bnx2_write_phy(bp, 0x18, 0x0400);
1884 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1885 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1886 MII_BNX2_DSP_EXPAND_REG | 0x8);
1887 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1889 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1892 if (bp->dev->mtu > 1500) {
1893 /* Set extended packet length bit */
1894 bnx2_write_phy(bp, 0x18, 0x7);
1895 bnx2_read_phy(bp, 0x18, &val);
1896 bnx2_write_phy(bp, 0x18, val | 0x4000);
1898 bnx2_read_phy(bp, 0x10, &val);
1899 bnx2_write_phy(bp, 0x10, val | 0x1);
1902 bnx2_write_phy(bp, 0x18, 0x7);
1903 bnx2_read_phy(bp, 0x18, &val);
1904 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1906 bnx2_read_phy(bp, 0x10, &val);
1907 bnx2_write_phy(bp, 0x10, val & ~0x1);
1910 /* ethernet@wirespeed */
1911 bnx2_write_phy(bp, 0x18, 0x7007);
1912 bnx2_read_phy(bp, 0x18, &val);
1913 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1919 bnx2_init_phy(struct bnx2 *bp)
1924 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1925 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1927 bp->mii_bmcr = MII_BMCR;
1928 bp->mii_bmsr = MII_BMSR;
1929 bp->mii_bmsr1 = MII_BMSR;
1930 bp->mii_adv = MII_ADVERTISE;
1931 bp->mii_lpa = MII_LPA;
1933 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1935 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1938 bnx2_read_phy(bp, MII_PHYSID1, &val);
1939 bp->phy_id = val << 16;
1940 bnx2_read_phy(bp, MII_PHYSID2, &val);
1941 bp->phy_id |= val & 0xffff;
1943 if (bp->phy_flags & PHY_SERDES_FLAG) {
1944 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1945 rc = bnx2_init_5706s_phy(bp);
1946 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1947 rc = bnx2_init_5708s_phy(bp);
1948 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1949 rc = bnx2_init_5709s_phy(bp);
1952 rc = bnx2_init_copper_phy(bp);
1957 rc = bnx2_setup_phy(bp, bp->phy_port);
1963 bnx2_set_mac_loopback(struct bnx2 *bp)
1967 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1968 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1969 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1970 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1975 static int bnx2_test_link(struct bnx2 *);
1978 bnx2_set_phy_loopback(struct bnx2 *bp)
1983 spin_lock_bh(&bp->phy_lock);
1984 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
1986 spin_unlock_bh(&bp->phy_lock);
1990 for (i = 0; i < 10; i++) {
1991 if (bnx2_test_link(bp) == 0)
1996 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1997 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1998 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1999 BNX2_EMAC_MODE_25G_MODE);
2001 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2002 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2008 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2014 msg_data |= bp->fw_wr_seq;
2016 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2018 /* wait for an acknowledgement. */
2019 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2022 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
2024 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2027 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2030 /* If we timed out, inform the firmware that this is the case. */
2031 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2033 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2036 msg_data &= ~BNX2_DRV_MSG_CODE;
2037 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2039 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2044 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2051 bnx2_init_5709_context(struct bnx2 *bp)
2056 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2057 val |= (BCM_PAGE_BITS - 8) << 16;
2058 REG_WR(bp, BNX2_CTX_COMMAND, val);
2059 for (i = 0; i < 10; i++) {
2060 val = REG_RD(bp, BNX2_CTX_COMMAND);
2061 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2065 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2068 for (i = 0; i < bp->ctx_pages; i++) {
2071 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2072 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2073 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2074 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2075 (u64) bp->ctx_blk_mapping[i] >> 32);
2076 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2077 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2078 for (j = 0; j < 10; j++) {
2080 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2081 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2085 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2094 bnx2_init_context(struct bnx2 *bp)
2100 u32 vcid_addr, pcid_addr, offset;
2105 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2108 vcid_addr = GET_PCID_ADDR(vcid);
2110 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2115 pcid_addr = GET_PCID_ADDR(new_vcid);
2118 vcid_addr = GET_CID_ADDR(vcid);
2119 pcid_addr = vcid_addr;
2122 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2123 vcid_addr += (i << PHY_CTX_SHIFT);
2124 pcid_addr += (i << PHY_CTX_SHIFT);
2126 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
2127 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2129 /* Zero out the context. */
2130 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2131 CTX_WR(bp, 0x00, offset, 0);
2133 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2134 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2140 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2146 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2147 if (good_mbuf == NULL) {
2148 printk(KERN_ERR PFX "Failed to allocate memory in "
2149 "bnx2_alloc_bad_rbuf\n");
2153 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2154 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2158 /* Allocate a bunch of mbufs and save the good ones in an array. */
2159 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2160 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2161 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2163 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2165 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2167 /* The addresses with Bit 9 set are bad memory blocks. */
2168 if (!(val & (1 << 9))) {
2169 good_mbuf[good_mbuf_cnt] = (u16) val;
2173 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2176 /* Free the good ones back to the mbuf pool thus discarding
2177 * all the bad ones. */
2178 while (good_mbuf_cnt) {
2181 val = good_mbuf[good_mbuf_cnt];
2182 val = (val << 9) | val | 1;
2184 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2191 bnx2_set_mac_addr(struct bnx2 *bp)
2194 u8 *mac_addr = bp->dev->dev_addr;
2196 val = (mac_addr[0] << 8) | mac_addr[1];
2198 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2200 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2201 (mac_addr[4] << 8) | mac_addr[5];
2203 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2207 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2209 struct sk_buff *skb;
2210 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2212 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2213 unsigned long align;
2215 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2220 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2221 skb_reserve(skb, BNX2_RX_ALIGN - align);
2223 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2224 PCI_DMA_FROMDEVICE);
2227 pci_unmap_addr_set(rx_buf, mapping, mapping);
2229 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2230 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2232 bp->rx_prod_bseq += bp->rx_buf_use_size;
2238 bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
2240 struct status_block *sblk = bp->status_blk;
2241 u32 new_link_state, old_link_state;
2244 new_link_state = sblk->status_attn_bits & event;
2245 old_link_state = sblk->status_attn_bits_ack & event;
2246 if (new_link_state != old_link_state) {
2248 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2250 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2258 bnx2_phy_int(struct bnx2 *bp)
2260 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2261 spin_lock(&bp->phy_lock);
2263 spin_unlock(&bp->phy_lock);
2265 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2266 bnx2_set_remote_link(bp);
2271 bnx2_tx_int(struct bnx2 *bp)
2273 struct status_block *sblk = bp->status_blk;
2274 u16 hw_cons, sw_cons, sw_ring_cons;
2277 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
2278 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2281 sw_cons = bp->tx_cons;
2283 while (sw_cons != hw_cons) {
2284 struct sw_bd *tx_buf;
2285 struct sk_buff *skb;
2288 sw_ring_cons = TX_RING_IDX(sw_cons);
2290 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2293 /* partial BD completions possible with TSO packets */
2294 if (skb_is_gso(skb)) {
2295 u16 last_idx, last_ring_idx;
2297 last_idx = sw_cons +
2298 skb_shinfo(skb)->nr_frags + 1;
2299 last_ring_idx = sw_ring_cons +
2300 skb_shinfo(skb)->nr_frags + 1;
2301 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2304 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2309 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2310 skb_headlen(skb), PCI_DMA_TODEVICE);
2313 last = skb_shinfo(skb)->nr_frags;
2315 for (i = 0; i < last; i++) {
2316 sw_cons = NEXT_TX_BD(sw_cons);
2318 pci_unmap_page(bp->pdev,
2320 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2322 skb_shinfo(skb)->frags[i].size,
2326 sw_cons = NEXT_TX_BD(sw_cons);
2328 tx_free_bd += last + 1;
2332 hw_cons = bp->hw_tx_cons =
2333 sblk->status_tx_quick_consumer_index0;
2335 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2340 bp->tx_cons = sw_cons;
2341 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2342 * before checking for netif_queue_stopped(). Without the
2343 * memory barrier, there is a small possibility that bnx2_start_xmit()
2344 * will miss it and cause the queue to be stopped forever.
2348 if (unlikely(netif_queue_stopped(bp->dev)) &&
2349 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2350 netif_tx_lock(bp->dev);
2351 if ((netif_queue_stopped(bp->dev)) &&
2352 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2353 netif_wake_queue(bp->dev);
2354 netif_tx_unlock(bp->dev);
2359 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2362 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2363 struct rx_bd *cons_bd, *prod_bd;
2365 cons_rx_buf = &bp->rx_buf_ring[cons];
2366 prod_rx_buf = &bp->rx_buf_ring[prod];
2368 pci_dma_sync_single_for_device(bp->pdev,
2369 pci_unmap_addr(cons_rx_buf, mapping),
2370 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2372 bp->rx_prod_bseq += bp->rx_buf_use_size;
2374 prod_rx_buf->skb = skb;
2379 pci_unmap_addr_set(prod_rx_buf, mapping,
2380 pci_unmap_addr(cons_rx_buf, mapping));
2382 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2383 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2384 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2385 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2389 bnx2_rx_int(struct bnx2 *bp, int budget)
2391 struct status_block *sblk = bp->status_blk;
2392 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2393 struct l2_fhdr *rx_hdr;
2396 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
2397 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2400 sw_cons = bp->rx_cons;
2401 sw_prod = bp->rx_prod;
2403 /* Memory barrier necessary as speculative reads of the rx
2404 * buffer can be ahead of the index in the status block
2407 while (sw_cons != hw_cons) {
2410 struct sw_bd *rx_buf;
2411 struct sk_buff *skb;
2412 dma_addr_t dma_addr;
2414 sw_ring_cons = RX_RING_IDX(sw_cons);
2415 sw_ring_prod = RX_RING_IDX(sw_prod);
2417 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2422 dma_addr = pci_unmap_addr(rx_buf, mapping);
2424 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2425 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2427 rx_hdr = (struct l2_fhdr *) skb->data;
2428 len = rx_hdr->l2_fhdr_pkt_len - 4;
2430 if ((status = rx_hdr->l2_fhdr_status) &
2431 (L2_FHDR_ERRORS_BAD_CRC |
2432 L2_FHDR_ERRORS_PHY_DECODE |
2433 L2_FHDR_ERRORS_ALIGNMENT |
2434 L2_FHDR_ERRORS_TOO_SHORT |
2435 L2_FHDR_ERRORS_GIANT_FRAME)) {
2440 /* Since we don't have a jumbo ring, copy small packets
2443 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2444 struct sk_buff *new_skb;
2446 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2447 if (new_skb == NULL)
2451 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2452 new_skb->data, len + 2);
2453 skb_reserve(new_skb, 2);
2454 skb_put(new_skb, len);
2456 bnx2_reuse_rx_skb(bp, skb,
2457 sw_ring_cons, sw_ring_prod);
2461 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
2462 pci_unmap_single(bp->pdev, dma_addr,
2463 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2465 skb_reserve(skb, bp->rx_offset);
2470 bnx2_reuse_rx_skb(bp, skb,
2471 sw_ring_cons, sw_ring_prod);
2475 skb->protocol = eth_type_trans(skb, bp->dev);
2477 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2478 (ntohs(skb->protocol) != 0x8100)) {
2485 skb->ip_summed = CHECKSUM_NONE;
2487 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2488 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2490 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2491 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2492 skb->ip_summed = CHECKSUM_UNNECESSARY;
2496 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2497 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2498 rx_hdr->l2_fhdr_vlan_tag);
2502 netif_receive_skb(skb);
2504 bp->dev->last_rx = jiffies;
2508 sw_cons = NEXT_RX_BD(sw_cons);
2509 sw_prod = NEXT_RX_BD(sw_prod);
2511 if ((rx_pkt == budget))
2514 /* Refresh hw_cons to see if there is new work */
2515 if (sw_cons == hw_cons) {
2516 hw_cons = bp->hw_rx_cons =
2517 sblk->status_rx_quick_consumer_index0;
2518 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2523 bp->rx_cons = sw_cons;
2524 bp->rx_prod = sw_prod;
2526 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2528 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2536 /* MSI ISR - The only difference between this and the INTx ISR
2537 * is that the MSI interrupt is always serviced.
2540 bnx2_msi(int irq, void *dev_instance)
2542 struct net_device *dev = dev_instance;
2543 struct bnx2 *bp = netdev_priv(dev);
2545 prefetch(bp->status_blk);
2546 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2547 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2548 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2550 /* Return here if interrupt is disabled. */
2551 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2554 netif_rx_schedule(dev, &bp->napi);
2560 bnx2_msi_1shot(int irq, void *dev_instance)
2562 struct net_device *dev = dev_instance;
2563 struct bnx2 *bp = netdev_priv(dev);
2565 prefetch(bp->status_blk);
2567 /* Return here if interrupt is disabled. */
2568 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2571 netif_rx_schedule(dev, &bp->napi);
2577 bnx2_interrupt(int irq, void *dev_instance)
2579 struct net_device *dev = dev_instance;
2580 struct bnx2 *bp = netdev_priv(dev);
2581 struct status_block *sblk = bp->status_blk;
2583 /* When using INTx, it is possible for the interrupt to arrive
2584 * at the CPU before the status block posted prior to the
2585 * interrupt. Reading a register will flush the status block.
2586 * When using MSI, the MSI message will always complete after
2587 * the status block write.
2589 if ((sblk->status_idx == bp->last_status_idx) &&
2590 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2591 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2594 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2595 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2596 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2598 /* Read back to deassert IRQ immediately to avoid too many
2599 * spurious interrupts.
2601 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2603 /* Return here if interrupt is shared and is disabled. */
2604 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2607 if (netif_rx_schedule_prep(dev, &bp->napi)) {
2608 bp->last_status_idx = sblk->status_idx;
2609 __netif_rx_schedule(dev, &bp->napi);
2615 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2616 STATUS_ATTN_BITS_TIMER_ABORT)
2619 bnx2_has_work(struct bnx2 *bp)
2621 struct status_block *sblk = bp->status_blk;
2623 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2624 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2627 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2628 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2635 bnx2_poll(struct napi_struct *napi, int budget)
2637 struct bnx2 *bp = container_of(napi, struct bnx2, napi);
2638 struct net_device *dev = bp->dev;
2639 struct status_block *sblk = bp->status_blk;
2640 u32 status_attn_bits = sblk->status_attn_bits;
2641 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2644 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2645 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2649 /* This is needed to take care of transient status
2650 * during link changes.
2652 REG_WR(bp, BNX2_HC_COMMAND,
2653 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2654 REG_RD(bp, BNX2_HC_COMMAND);
2657 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2660 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons)
2661 work_done = bnx2_rx_int(bp, budget);
2663 bp->last_status_idx = bp->status_blk->status_idx;
2666 if (!bnx2_has_work(bp)) {
2667 netif_rx_complete(dev, napi);
2668 if (likely(bp->flags & USING_MSI_FLAG)) {
2669 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2670 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2671 bp->last_status_idx);
2674 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2675 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2676 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2677 bp->last_status_idx);
2679 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2680 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2681 bp->last_status_idx);
2687 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2688 * from set_multicast.
2691 bnx2_set_rx_mode(struct net_device *dev)
2693 struct bnx2 *bp = netdev_priv(dev);
2694 u32 rx_mode, sort_mode;
2697 spin_lock_bh(&bp->phy_lock);
2699 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2700 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2701 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2703 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2704 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2706 if (!(bp->flags & ASF_ENABLE_FLAG))
2707 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2709 if (dev->flags & IFF_PROMISC) {
2710 /* Promiscuous mode. */
2711 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2712 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2713 BNX2_RPM_SORT_USER0_PROM_VLAN;
2715 else if (dev->flags & IFF_ALLMULTI) {
2716 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2717 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2720 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2723 /* Accept one or more multicast(s). */
2724 struct dev_mc_list *mclist;
2725 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2730 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2732 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2733 i++, mclist = mclist->next) {
2735 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2737 regidx = (bit & 0xe0) >> 5;
2739 mc_filter[regidx] |= (1 << bit);
2742 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2743 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2747 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2750 if (rx_mode != bp->rx_mode) {
2751 bp->rx_mode = rx_mode;
2752 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2755 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2756 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2757 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2759 spin_unlock_bh(&bp->phy_lock);
2762 #define FW_BUF_SIZE 0x8000
2765 bnx2_gunzip_init(struct bnx2 *bp)
2767 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2770 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2773 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2774 if (bp->strm->workspace == NULL)
2784 vfree(bp->gunzip_buf);
2785 bp->gunzip_buf = NULL;
2788 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2789 "uncompression.\n", bp->dev->name);
2794 bnx2_gunzip_end(struct bnx2 *bp)
2796 kfree(bp->strm->workspace);
2801 if (bp->gunzip_buf) {
2802 vfree(bp->gunzip_buf);
2803 bp->gunzip_buf = NULL;
2808 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2812 /* check gzip header */
2813 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2819 if (zbuf[3] & FNAME)
2820 while ((zbuf[n++] != 0) && (n < len));
2822 bp->strm->next_in = zbuf + n;
2823 bp->strm->avail_in = len - n;
2824 bp->strm->next_out = bp->gunzip_buf;
2825 bp->strm->avail_out = FW_BUF_SIZE;
2827 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2831 rc = zlib_inflate(bp->strm, Z_FINISH);
2833 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2834 *outbuf = bp->gunzip_buf;
2836 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2837 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2838 bp->dev->name, bp->strm->msg);
2840 zlib_inflateEnd(bp->strm);
2842 if (rc == Z_STREAM_END)
2849 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2856 for (i = 0; i < rv2p_code_len; i += 8) {
2857 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2859 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2862 if (rv2p_proc == RV2P_PROC1) {
2863 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2864 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2867 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2868 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2872 /* Reset the processor, un-stall is done later. */
2873 if (rv2p_proc == RV2P_PROC1) {
2874 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2877 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2882 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2889 val = REG_RD_IND(bp, cpu_reg->mode);
2890 val |= cpu_reg->mode_value_halt;
2891 REG_WR_IND(bp, cpu_reg->mode, val);
2892 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2894 /* Load the Text area. */
2895 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2900 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2910 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2911 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2915 /* Load the Data area. */
2916 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2920 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2921 REG_WR_IND(bp, offset, fw->data[j]);
2925 /* Load the SBSS area. */
2926 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2930 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2931 REG_WR_IND(bp, offset, fw->sbss[j]);
2935 /* Load the BSS area. */
2936 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2940 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2941 REG_WR_IND(bp, offset, fw->bss[j]);
2945 /* Load the Read-Only area. */
2946 offset = cpu_reg->spad_base +
2947 (fw->rodata_addr - cpu_reg->mips_view_base);
2951 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2952 REG_WR_IND(bp, offset, fw->rodata[j]);
2956 /* Clear the pre-fetch instruction. */
2957 REG_WR_IND(bp, cpu_reg->inst, 0);
2958 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2960 /* Start the CPU. */
2961 val = REG_RD_IND(bp, cpu_reg->mode);
2962 val &= ~cpu_reg->mode_value_halt;
2963 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2964 REG_WR_IND(bp, cpu_reg->mode, val);
2970 bnx2_init_cpus(struct bnx2 *bp)
2972 struct cpu_reg cpu_reg;
2978 if ((rc = bnx2_gunzip_init(bp)) != 0)
2981 /* Initialize the RV2P processor. */
2982 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2987 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2989 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2994 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2996 /* Initialize the RX Processor. */
2997 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2998 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2999 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
3000 cpu_reg.state = BNX2_RXP_CPU_STATE;
3001 cpu_reg.state_value_clear = 0xffffff;
3002 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3003 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3004 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3005 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3006 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3007 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3008 cpu_reg.mips_view_base = 0x8000000;
3010 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3011 fw = &bnx2_rxp_fw_09;
3013 fw = &bnx2_rxp_fw_06;
3015 rc = load_cpu_fw(bp, &cpu_reg, fw);
3019 /* Initialize the TX Processor. */
3020 cpu_reg.mode = BNX2_TXP_CPU_MODE;
3021 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3022 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3023 cpu_reg.state = BNX2_TXP_CPU_STATE;
3024 cpu_reg.state_value_clear = 0xffffff;
3025 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3026 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3027 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3028 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3029 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3030 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3031 cpu_reg.mips_view_base = 0x8000000;
3033 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3034 fw = &bnx2_txp_fw_09;
3036 fw = &bnx2_txp_fw_06;
3038 rc = load_cpu_fw(bp, &cpu_reg, fw);
3042 /* Initialize the TX Patch-up Processor. */
3043 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3044 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3045 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3046 cpu_reg.state = BNX2_TPAT_CPU_STATE;
3047 cpu_reg.state_value_clear = 0xffffff;
3048 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3049 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3050 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3051 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3052 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3053 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3054 cpu_reg.mips_view_base = 0x8000000;
3056 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3057 fw = &bnx2_tpat_fw_09;
3059 fw = &bnx2_tpat_fw_06;
3061 rc = load_cpu_fw(bp, &cpu_reg, fw);
3065 /* Initialize the Completion Processor. */
3066 cpu_reg.mode = BNX2_COM_CPU_MODE;
3067 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3068 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3069 cpu_reg.state = BNX2_COM_CPU_STATE;
3070 cpu_reg.state_value_clear = 0xffffff;
3071 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3072 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3073 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3074 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3075 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3076 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3077 cpu_reg.mips_view_base = 0x8000000;
3079 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3080 fw = &bnx2_com_fw_09;
3082 fw = &bnx2_com_fw_06;
3084 rc = load_cpu_fw(bp, &cpu_reg, fw);
3088 /* Initialize the Command Processor. */
3089 cpu_reg.mode = BNX2_CP_CPU_MODE;
3090 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3091 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3092 cpu_reg.state = BNX2_CP_CPU_STATE;
3093 cpu_reg.state_value_clear = 0xffffff;
3094 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3095 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3096 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3097 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3098 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3099 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3100 cpu_reg.mips_view_base = 0x8000000;
3102 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3103 fw = &bnx2_cp_fw_09;
3105 rc = load_cpu_fw(bp, &cpu_reg, fw);
3110 bnx2_gunzip_end(bp);
3115 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3119 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3125 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3126 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3127 PCI_PM_CTRL_PME_STATUS);
3129 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3130 /* delay required during transition out of D3hot */
3133 val = REG_RD(bp, BNX2_EMAC_MODE);
3134 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3135 val &= ~BNX2_EMAC_MODE_MPKT;
3136 REG_WR(bp, BNX2_EMAC_MODE, val);
3138 val = REG_RD(bp, BNX2_RPM_CONFIG);
3139 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3140 REG_WR(bp, BNX2_RPM_CONFIG, val);
3151 autoneg = bp->autoneg;
3152 advertising = bp->advertising;
3154 bp->autoneg = AUTONEG_SPEED;
3155 bp->advertising = ADVERTISED_10baseT_Half |
3156 ADVERTISED_10baseT_Full |
3157 ADVERTISED_100baseT_Half |
3158 ADVERTISED_100baseT_Full |
3161 bnx2_setup_copper_phy(bp);
3163 bp->autoneg = autoneg;
3164 bp->advertising = advertising;
3166 bnx2_set_mac_addr(bp);
3168 val = REG_RD(bp, BNX2_EMAC_MODE);
3170 /* Enable port mode. */
3171 val &= ~BNX2_EMAC_MODE_PORT;
3172 val |= BNX2_EMAC_MODE_PORT_MII |
3173 BNX2_EMAC_MODE_MPKT_RCVD |
3174 BNX2_EMAC_MODE_ACPI_RCVD |
3175 BNX2_EMAC_MODE_MPKT;
3177 REG_WR(bp, BNX2_EMAC_MODE, val);
3179 /* receive all multicast */
3180 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3181 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3184 REG_WR(bp, BNX2_EMAC_RX_MODE,
3185 BNX2_EMAC_RX_MODE_SORT_MODE);
3187 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3188 BNX2_RPM_SORT_USER0_MC_EN;
3189 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3190 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3191 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3192 BNX2_RPM_SORT_USER0_ENA);
3194 /* Need to enable EMAC and RPM for WOL. */
3195 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3196 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3197 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3198 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3200 val = REG_RD(bp, BNX2_RPM_CONFIG);
3201 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3202 REG_WR(bp, BNX2_RPM_CONFIG, val);
3204 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3207 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3210 if (!(bp->flags & NO_WOL_FLAG))
3211 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3213 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3214 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3215 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3224 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3226 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3229 /* No more memory access after this point until
3230 * device is brought back to D0.
3242 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3247 /* Request access to the flash interface. */
3248 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3249 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3250 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3251 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3257 if (j >= NVRAM_TIMEOUT_COUNT)
3264 bnx2_release_nvram_lock(struct bnx2 *bp)
3269 /* Relinquish nvram interface. */
3270 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3272 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3273 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3274 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3280 if (j >= NVRAM_TIMEOUT_COUNT)
3288 bnx2_enable_nvram_write(struct bnx2 *bp)
3292 val = REG_RD(bp, BNX2_MISC_CFG);
3293 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3295 if (bp->flash_info->flags & BNX2_NV_WREN) {
3298 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3299 REG_WR(bp, BNX2_NVM_COMMAND,
3300 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3302 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3305 val = REG_RD(bp, BNX2_NVM_COMMAND);
3306 if (val & BNX2_NVM_COMMAND_DONE)
3310 if (j >= NVRAM_TIMEOUT_COUNT)
3317 bnx2_disable_nvram_write(struct bnx2 *bp)
3321 val = REG_RD(bp, BNX2_MISC_CFG);
3322 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3327 bnx2_enable_nvram_access(struct bnx2 *bp)
3331 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3332 /* Enable both bits, even on read. */
3333 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3334 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3338 bnx2_disable_nvram_access(struct bnx2 *bp)
3342 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3343 /* Disable both bits, even after read. */
3344 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3345 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3346 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3350 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3355 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3356 /* Buffered flash, no erase needed */
3359 /* Build an erase command */
3360 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3361 BNX2_NVM_COMMAND_DOIT;
3363 /* Need to clear DONE bit separately. */
3364 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3366 /* Address of the NVRAM to read from. */
3367 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3369 /* Issue an erase command. */
3370 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3372 /* Wait for completion. */
3373 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3378 val = REG_RD(bp, BNX2_NVM_COMMAND);
3379 if (val & BNX2_NVM_COMMAND_DONE)
3383 if (j >= NVRAM_TIMEOUT_COUNT)
3390 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3395 /* Build the command word. */
3396 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3398 /* Calculate an offset of a buffered flash, not needed for 5709. */
3399 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3400 offset = ((offset / bp->flash_info->page_size) <<
3401 bp->flash_info->page_bits) +
3402 (offset % bp->flash_info->page_size);
3405 /* Need to clear DONE bit separately. */
3406 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3408 /* Address of the NVRAM to read from. */
3409 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3411 /* Issue a read command. */
3412 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3414 /* Wait for completion. */
3415 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3420 val = REG_RD(bp, BNX2_NVM_COMMAND);
3421 if (val & BNX2_NVM_COMMAND_DONE) {
3422 val = REG_RD(bp, BNX2_NVM_READ);
3424 val = be32_to_cpu(val);
3425 memcpy(ret_val, &val, 4);
3429 if (j >= NVRAM_TIMEOUT_COUNT)
3437 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3442 /* Build the command word. */
3443 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3445 /* Calculate an offset of a buffered flash, not needed for 5709. */
3446 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3447 offset = ((offset / bp->flash_info->page_size) <<
3448 bp->flash_info->page_bits) +
3449 (offset % bp->flash_info->page_size);
3452 /* Need to clear DONE bit separately. */
3453 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3455 memcpy(&val32, val, 4);
3456 val32 = cpu_to_be32(val32);
3458 /* Write the data. */
3459 REG_WR(bp, BNX2_NVM_WRITE, val32);
3461 /* Address of the NVRAM to write to. */
3462 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3464 /* Issue the write command. */
3465 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3467 /* Wait for completion. */
3468 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3471 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3474 if (j >= NVRAM_TIMEOUT_COUNT)
3481 bnx2_init_nvram(struct bnx2 *bp)
3484 int j, entry_count, rc = 0;
3485 struct flash_spec *flash;
3487 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3488 bp->flash_info = &flash_5709;
3489 goto get_flash_size;
3492 /* Determine the selected interface. */
3493 val = REG_RD(bp, BNX2_NVM_CFG1);
3495 entry_count = ARRAY_SIZE(flash_table);
3497 if (val & 0x40000000) {
3499 /* Flash interface has been reconfigured */
3500 for (j = 0, flash = &flash_table[0]; j < entry_count;
3502 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3503 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3504 bp->flash_info = flash;
3511 /* Not yet been reconfigured */
3513 if (val & (1 << 23))
3514 mask = FLASH_BACKUP_STRAP_MASK;
3516 mask = FLASH_STRAP_MASK;
3518 for (j = 0, flash = &flash_table[0]; j < entry_count;
3521 if ((val & mask) == (flash->strapping & mask)) {
3522 bp->flash_info = flash;
3524 /* Request access to the flash interface. */
3525 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3528 /* Enable access to flash interface */
3529 bnx2_enable_nvram_access(bp);
3531 /* Reconfigure the flash interface */
3532 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3533 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3534 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3535 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3537 /* Disable access to flash interface */
3538 bnx2_disable_nvram_access(bp);
3539 bnx2_release_nvram_lock(bp);
3544 } /* if (val & 0x40000000) */
3546 if (j == entry_count) {
3547 bp->flash_info = NULL;
3548 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3553 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3554 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3556 bp->flash_size = val;
3558 bp->flash_size = bp->flash_info->total_size;
3564 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3568 u32 cmd_flags, offset32, len32, extra;
3573 /* Request access to the flash interface. */
3574 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3577 /* Enable access to flash interface */
3578 bnx2_enable_nvram_access(bp);
3591 pre_len = 4 - (offset & 3);
3593 if (pre_len >= len32) {
3595 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3596 BNX2_NVM_COMMAND_LAST;
3599 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3602 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3607 memcpy(ret_buf, buf + (offset & 3), pre_len);
3614 extra = 4 - (len32 & 3);
3615 len32 = (len32 + 4) & ~3;
3622 cmd_flags = BNX2_NVM_COMMAND_LAST;
3624 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3625 BNX2_NVM_COMMAND_LAST;
3627 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3629 memcpy(ret_buf, buf, 4 - extra);
3631 else if (len32 > 0) {
3634 /* Read the first word. */
3638 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3640 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3642 /* Advance to the next dword. */
3647 while (len32 > 4 && rc == 0) {
3648 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3650 /* Advance to the next dword. */
3659 cmd_flags = BNX2_NVM_COMMAND_LAST;
3660 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3662 memcpy(ret_buf, buf, 4 - extra);
3665 /* Disable access to flash interface */
3666 bnx2_disable_nvram_access(bp);
3668 bnx2_release_nvram_lock(bp);
3674 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3677 u32 written, offset32, len32;
3678 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3680 int align_start, align_end;
3685 align_start = align_end = 0;
3687 if ((align_start = (offset32 & 3))) {
3689 len32 += align_start;
3692 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3697 align_end = 4 - (len32 & 3);
3699 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3703 if (align_start || align_end) {
3704 align_buf = kmalloc(len32, GFP_KERNEL);
3705 if (align_buf == NULL)
3708 memcpy(align_buf, start, 4);
3711 memcpy(align_buf + len32 - 4, end, 4);
3713 memcpy(align_buf + align_start, data_buf, buf_size);
3717 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3718 flash_buffer = kmalloc(264, GFP_KERNEL);
3719 if (flash_buffer == NULL) {
3721 goto nvram_write_end;
3726 while ((written < len32) && (rc == 0)) {
3727 u32 page_start, page_end, data_start, data_end;
3728 u32 addr, cmd_flags;
3731 /* Find the page_start addr */
3732 page_start = offset32 + written;
3733 page_start -= (page_start % bp->flash_info->page_size);
3734 /* Find the page_end addr */
3735 page_end = page_start + bp->flash_info->page_size;
3736 /* Find the data_start addr */
3737 data_start = (written == 0) ? offset32 : page_start;
3738 /* Find the data_end addr */
3739 data_end = (page_end > offset32 + len32) ?
3740 (offset32 + len32) : page_end;
3742 /* Request access to the flash interface. */
3743 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3744 goto nvram_write_end;
3746 /* Enable access to flash interface */
3747 bnx2_enable_nvram_access(bp);
3749 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3750 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3753 /* Read the whole page into the buffer
3754 * (non-buffer flash only) */
3755 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3756 if (j == (bp->flash_info->page_size - 4)) {
3757 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3759 rc = bnx2_nvram_read_dword(bp,
3765 goto nvram_write_end;
3771 /* Enable writes to flash interface (unlock write-protect) */
3772 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3773 goto nvram_write_end;
3775 /* Loop to write back the buffer data from page_start to
3778 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3779 /* Erase the page */
3780 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3781 goto nvram_write_end;
3783 /* Re-enable the write again for the actual write */
3784 bnx2_enable_nvram_write(bp);
3786 for (addr = page_start; addr < data_start;
3787 addr += 4, i += 4) {
3789 rc = bnx2_nvram_write_dword(bp, addr,
3790 &flash_buffer[i], cmd_flags);
3793 goto nvram_write_end;
3799 /* Loop to write the new data from data_start to data_end */
3800 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3801 if ((addr == page_end - 4) ||
3802 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
3803 (addr == data_end - 4))) {
3805 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3807 rc = bnx2_nvram_write_dword(bp, addr, buf,
3811 goto nvram_write_end;
3817 /* Loop to write back the buffer data from data_end
3819 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3820 for (addr = data_end; addr < page_end;
3821 addr += 4, i += 4) {
3823 if (addr == page_end-4) {
3824 cmd_flags = BNX2_NVM_COMMAND_LAST;
3826 rc = bnx2_nvram_write_dword(bp, addr,
3827 &flash_buffer[i], cmd_flags);
3830 goto nvram_write_end;
3836 /* Disable writes to flash interface (lock write-protect) */
3837 bnx2_disable_nvram_write(bp);
3839 /* Disable access to flash interface */
3840 bnx2_disable_nvram_access(bp);
3841 bnx2_release_nvram_lock(bp);
3843 /* Increment written */
3844 written += data_end - data_start;
3848 kfree(flash_buffer);
3854 bnx2_init_remote_phy(struct bnx2 *bp)
3858 bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
3859 if (!(bp->phy_flags & PHY_SERDES_FLAG))
3862 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
3863 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
3866 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
3867 if (netif_running(bp->dev)) {
3868 val = BNX2_DRV_ACK_CAP_SIGNATURE |
3869 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
3870 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
3873 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
3875 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
3876 if (val & BNX2_LINK_STATUS_SERDES_LINK)
3877 bp->phy_port = PORT_FIBRE;
3879 bp->phy_port = PORT_TP;
3884 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3889 /* Wait for the current PCI transaction to complete before
3890 * issuing a reset. */
3891 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3892 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3893 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3894 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3895 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3896 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3899 /* Wait for the firmware to tell us it is ok to issue a reset. */
3900 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3902 /* Deposit a driver reset signature so the firmware knows that
3903 * this is a soft reset. */
3904 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3905 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3907 /* Do a dummy read to force the chip to complete all current transaction
3908 * before we issue a reset. */
3909 val = REG_RD(bp, BNX2_MISC_ID);
3911 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3912 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3913 REG_RD(bp, BNX2_MISC_COMMAND);
3916 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3917 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3919 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3922 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3923 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3924 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3927 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3929 /* Reading back any register after chip reset will hang the
3930 * bus on 5706 A0 and A1. The msleep below provides plenty
3931 * of margin for write posting.
3933 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3934 (CHIP_ID(bp) == CHIP_ID_5706_A1))
3937 /* Reset takes approximate 30 usec */
3938 for (i = 0; i < 10; i++) {
3939 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3940 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3941 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3946 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3947 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3948 printk(KERN_ERR PFX "Chip reset did not complete\n");
3953 /* Make sure byte swapping is properly configured. */
3954 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3955 if (val != 0x01020304) {
3956 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3960 /* Wait for the firmware to finish its initialization. */
3961 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3965 spin_lock_bh(&bp->phy_lock);
3966 bnx2_init_remote_phy(bp);
3967 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
3968 bnx2_set_default_remote_link(bp);
3969 spin_unlock_bh(&bp->phy_lock);
3971 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3972 /* Adjust the voltage regular to two steps lower. The default
3973 * of this register is 0x0000000e. */
3974 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3976 /* Remove bad rbuf memory from the free pool. */
3977 rc = bnx2_alloc_bad_rbuf(bp);
3984 bnx2_init_chip(struct bnx2 *bp)
3989 /* Make sure the interrupt is not active. */
3990 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3992 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3993 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3995 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3997 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3998 DMA_READ_CHANS << 12 |
3999 DMA_WRITE_CHANS << 16;
4001 val |= (0x2 << 20) | (1 << 11);
4003 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
4006 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4007 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
4008 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4010 REG_WR(bp, BNX2_DMA_CONFIG, val);
4012 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4013 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4014 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4015 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4018 if (bp->flags & PCIX_FLAG) {
4021 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4023 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4024 val16 & ~PCI_X_CMD_ERO);
4027 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4028 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4029 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4030 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4032 /* Initialize context mapping and zero out the quick contexts. The
4033 * context block must have already been enabled. */
4034 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4035 rc = bnx2_init_5709_context(bp);
4039 bnx2_init_context(bp);
4041 if ((rc = bnx2_init_cpus(bp)) != 0)
4044 bnx2_init_nvram(bp);
4046 bnx2_set_mac_addr(bp);
4048 val = REG_RD(bp, BNX2_MQ_CONFIG);
4049 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4050 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4051 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4052 val |= BNX2_MQ_CONFIG_HALT_DIS;
4054 REG_WR(bp, BNX2_MQ_CONFIG, val);
4056 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4057 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4058 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4060 val = (BCM_PAGE_BITS - 8) << 24;
4061 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4063 /* Configure page size. */
4064 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4065 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4066 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4067 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4069 val = bp->mac_addr[0] +
4070 (bp->mac_addr[1] << 8) +
4071 (bp->mac_addr[2] << 16) +
4073 (bp->mac_addr[4] << 8) +
4074 (bp->mac_addr[5] << 16);
4075 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4077 /* Program the MTU. Also include 4 bytes for CRC32. */
4078 val = bp->dev->mtu + ETH_HLEN + 4;
4079 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4080 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4081 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4083 bp->last_status_idx = 0;
4084 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4086 /* Set up how to generate a link change interrupt. */
4087 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4089 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4090 (u64) bp->status_blk_mapping & 0xffffffff);
4091 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4093 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4094 (u64) bp->stats_blk_mapping & 0xffffffff);
4095 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4096 (u64) bp->stats_blk_mapping >> 32);
4098 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4099 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4101 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4102 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4104 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4105 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4107 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4109 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4111 REG_WR(bp, BNX2_HC_COM_TICKS,
4112 (bp->com_ticks_int << 16) | bp->com_ticks);
4114 REG_WR(bp, BNX2_HC_CMD_TICKS,
4115 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4117 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4118 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4120 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4121 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4123 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4124 val = BNX2_HC_CONFIG_COLLECT_STATS;
4126 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4127 BNX2_HC_CONFIG_COLLECT_STATS;
4130 if (bp->flags & ONE_SHOT_MSI_FLAG)
4131 val |= BNX2_HC_CONFIG_ONE_SHOT;
4133 REG_WR(bp, BNX2_HC_CONFIG, val);
4135 /* Clear internal stats counters. */
4136 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4138 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4140 /* Initialize the receive filter. */
4141 bnx2_set_rx_mode(bp->dev);
4143 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4144 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4145 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4146 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4148 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4151 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4152 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4156 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4162 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4164 u32 val, offset0, offset1, offset2, offset3;
4166 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4167 offset0 = BNX2_L2CTX_TYPE_XI;
4168 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4169 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4170 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4172 offset0 = BNX2_L2CTX_TYPE;
4173 offset1 = BNX2_L2CTX_CMD_TYPE;
4174 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4175 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4177 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4178 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4180 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4181 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4183 val = (u64) bp->tx_desc_mapping >> 32;
4184 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4186 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4187 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4191 bnx2_init_tx_ring(struct bnx2 *bp)
4196 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4198 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4200 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4201 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4206 bp->tx_prod_bseq = 0;
4209 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4210 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4212 bnx2_init_tx_context(bp, cid);
4216 bnx2_init_rx_ring(struct bnx2 *bp)
4220 u16 prod, ring_prod;
4223 /* 8 for CRC and VLAN */
4224 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4226 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4228 ring_prod = prod = bp->rx_prod = 0;
4231 bp->rx_prod_bseq = 0;
4233 for (i = 0; i < bp->rx_max_ring; i++) {
4236 rxbd = &bp->rx_desc_ring[i][0];
4237 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4238 rxbd->rx_bd_len = bp->rx_buf_use_size;
4239 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4241 if (i == (bp->rx_max_ring - 1))
4245 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
4246 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
4250 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4251 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4253 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
4255 val = (u64) bp->rx_desc_mapping[0] >> 32;
4256 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
4258 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4259 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
4261 for (i = 0; i < bp->rx_ring_size; i++) {
4262 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4265 prod = NEXT_RX_BD(prod);
4266 ring_prod = RX_RING_IDX(prod);
4270 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4272 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4276 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4280 bp->rx_ring_size = size;
4282 while (size > MAX_RX_DESC_CNT) {
4283 size -= MAX_RX_DESC_CNT;
4286 /* round to next power of 2 */
4288 while ((max & num_rings) == 0)
4291 if (num_rings != max)
4294 bp->rx_max_ring = max;
4295 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4299 bnx2_free_tx_skbs(struct bnx2 *bp)
4303 if (bp->tx_buf_ring == NULL)
4306 for (i = 0; i < TX_DESC_CNT; ) {
4307 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4308 struct sk_buff *skb = tx_buf->skb;
4316 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4317 skb_headlen(skb), PCI_DMA_TODEVICE);
4321 last = skb_shinfo(skb)->nr_frags;
4322 for (j = 0; j < last; j++) {
4323 tx_buf = &bp->tx_buf_ring[i + j + 1];
4324 pci_unmap_page(bp->pdev,
4325 pci_unmap_addr(tx_buf, mapping),
4326 skb_shinfo(skb)->frags[j].size,
4336 bnx2_free_rx_skbs(struct bnx2 *bp)
4340 if (bp->rx_buf_ring == NULL)
4343 for (i = 0; i < bp->rx_max_ring_idx; i++) {
4344 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4345 struct sk_buff *skb = rx_buf->skb;
4350 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4351 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4360 bnx2_free_skbs(struct bnx2 *bp)
4362 bnx2_free_tx_skbs(bp);
4363 bnx2_free_rx_skbs(bp);
4367 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4371 rc = bnx2_reset_chip(bp, reset_code);
4376 if ((rc = bnx2_init_chip(bp)) != 0)
4379 bnx2_init_tx_ring(bp);
4380 bnx2_init_rx_ring(bp);
4385 bnx2_init_nic(struct bnx2 *bp)
4389 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4392 spin_lock_bh(&bp->phy_lock);
4395 spin_unlock_bh(&bp->phy_lock);
4400 bnx2_test_registers(struct bnx2 *bp)
4404 static const struct {
4407 #define BNX2_FL_NOT_5709 1
4411 { 0x006c, 0, 0x00000000, 0x0000003f },
4412 { 0x0090, 0, 0xffffffff, 0x00000000 },
4413 { 0x0094, 0, 0x00000000, 0x00000000 },
4415 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4416 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4417 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4418 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4419 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4420 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4421 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4422 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4423 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4425 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4426 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4427 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4428 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4429 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4430 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4432 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4433 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4434 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
4436 { 0x1000, 0, 0x00000000, 0x00000001 },
4437 { 0x1004, 0, 0x00000000, 0x000f0001 },
4439 { 0x1408, 0, 0x01c00800, 0x00000000 },
4440 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4441 { 0x14a8, 0, 0x00000000, 0x000001ff },
4442 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4443 { 0x14b0, 0, 0x00000002, 0x00000001 },
4444 { 0x14b8, 0, 0x00000000, 0x00000000 },
4445 { 0x14c0, 0, 0x00000000, 0x00000009 },
4446 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4447 { 0x14cc, 0, 0x00000000, 0x00000001 },
4448 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4450 { 0x1800, 0, 0x00000000, 0x00000001 },
4451 { 0x1804, 0, 0x00000000, 0x00000003 },
4453 { 0x2800, 0, 0x00000000, 0x00000001 },
4454 { 0x2804, 0, 0x00000000, 0x00003f01 },
4455 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4456 { 0x2810, 0, 0xffff0000, 0x00000000 },
4457 { 0x2814, 0, 0xffff0000, 0x00000000 },
4458 { 0x2818, 0, 0xffff0000, 0x00000000 },
4459 { 0x281c, 0, 0xffff0000, 0x00000000 },
4460 { 0x2834, 0, 0xffffffff, 0x00000000 },
4461 { 0x2840, 0, 0x00000000, 0xffffffff },
4462 { 0x2844, 0, 0x00000000, 0xffffffff },
4463 { 0x2848, 0, 0xffffffff, 0x00000000 },
4464 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4466 { 0x2c00, 0, 0x00000000, 0x00000011 },
4467 { 0x2c04, 0, 0x00000000, 0x00030007 },
4469 { 0x3c00, 0, 0x00000000, 0x00000001 },
4470 { 0x3c04, 0, 0x00000000, 0x00070000 },
4471 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4472 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4473 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4474 { 0x3c14, 0, 0x00000000, 0xffffffff },
4475 { 0x3c18, 0, 0x00000000, 0xffffffff },
4476 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4477 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4479 { 0x5004, 0, 0x00000000, 0x0000007f },
4480 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4482 { 0x5c00, 0, 0x00000000, 0x00000001 },
4483 { 0x5c04, 0, 0x00000000, 0x0003000f },
4484 { 0x5c08, 0, 0x00000003, 0x00000000 },
4485 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4486 { 0x5c10, 0, 0x00000000, 0xffffffff },
4487 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4488 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4489 { 0x5c88, 0, 0x00000000, 0x00077373 },
4490 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4492 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4493 { 0x680c, 0, 0xffffffff, 0x00000000 },
4494 { 0x6810, 0, 0xffffffff, 0x00000000 },
4495 { 0x6814, 0, 0xffffffff, 0x00000000 },
4496 { 0x6818, 0, 0xffffffff, 0x00000000 },
4497 { 0x681c, 0, 0xffffffff, 0x00000000 },
4498 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4499 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4500 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4501 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4502 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4503 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4504 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4505 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4506 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4507 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4508 { 0x684c, 0, 0xffffffff, 0x00000000 },
4509 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4510 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4511 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4512 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4513 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4514 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4516 { 0xffff, 0, 0x00000000, 0x00000000 },
4521 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4524 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4525 u32 offset, rw_mask, ro_mask, save_val, val;
4526 u16 flags = reg_tbl[i].flags;
4528 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4531 offset = (u32) reg_tbl[i].offset;
4532 rw_mask = reg_tbl[i].rw_mask;
4533 ro_mask = reg_tbl[i].ro_mask;
4535 save_val = readl(bp->regview + offset);
4537 writel(0, bp->regview + offset);
4539 val = readl(bp->regview + offset);
4540 if ((val & rw_mask) != 0) {
4544 if ((val & ro_mask) != (save_val & ro_mask)) {
4548 writel(0xffffffff, bp->regview + offset);
4550 val = readl(bp->regview + offset);
4551 if ((val & rw_mask) != rw_mask) {
4555 if ((val & ro_mask) != (save_val & ro_mask)) {
4559 writel(save_val, bp->regview + offset);
4563 writel(save_val, bp->regview + offset);
4571 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4573 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4574 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4577 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4580 for (offset = 0; offset < size; offset += 4) {
4582 REG_WR_IND(bp, start + offset, test_pattern[i]);
4584 if (REG_RD_IND(bp, start + offset) !=
4594 bnx2_test_memory(struct bnx2 *bp)
4598 static struct mem_entry {
4601 } mem_tbl_5706[] = {
4602 { 0x60000, 0x4000 },
4603 { 0xa0000, 0x3000 },
4604 { 0xe0000, 0x4000 },
4605 { 0x120000, 0x4000 },
4606 { 0x1a0000, 0x4000 },
4607 { 0x160000, 0x4000 },
4611 { 0x60000, 0x4000 },
4612 { 0xa0000, 0x3000 },
4613 { 0xe0000, 0x4000 },
4614 { 0x120000, 0x4000 },
4615 { 0x1a0000, 0x4000 },
4618 struct mem_entry *mem_tbl;
4620 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4621 mem_tbl = mem_tbl_5709;
4623 mem_tbl = mem_tbl_5706;
4625 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4626 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4627 mem_tbl[i].len)) != 0) {
4635 #define BNX2_MAC_LOOPBACK 0
4636 #define BNX2_PHY_LOOPBACK 1
4639 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4641 unsigned int pkt_size, num_pkts, i;
4642 struct sk_buff *skb, *rx_skb;
4643 unsigned char *packet;
4644 u16 rx_start_idx, rx_idx;
4647 struct sw_bd *rx_buf;
4648 struct l2_fhdr *rx_hdr;
4651 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4652 bp->loopback = MAC_LOOPBACK;
4653 bnx2_set_mac_loopback(bp);
4655 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4656 bp->loopback = PHY_LOOPBACK;
4657 bnx2_set_phy_loopback(bp);
4663 skb = netdev_alloc_skb(bp->dev, pkt_size);
4666 packet = skb_put(skb, pkt_size);
4667 memcpy(packet, bp->dev->dev_addr, 6);
4668 memset(packet + 6, 0x0, 8);
4669 for (i = 14; i < pkt_size; i++)
4670 packet[i] = (unsigned char) (i & 0xff);
4672 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4675 REG_WR(bp, BNX2_HC_COMMAND,
4676 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4678 REG_RD(bp, BNX2_HC_COMMAND);
4681 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4685 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4687 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4688 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4689 txbd->tx_bd_mss_nbytes = pkt_size;
4690 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4693 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4694 bp->tx_prod_bseq += pkt_size;
4696 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4697 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4701 REG_WR(bp, BNX2_HC_COMMAND,
4702 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4704 REG_RD(bp, BNX2_HC_COMMAND);
4708 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4711 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4712 goto loopback_test_done;
4715 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4716 if (rx_idx != rx_start_idx + num_pkts) {
4717 goto loopback_test_done;
4720 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4721 rx_skb = rx_buf->skb;
4723 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4724 skb_reserve(rx_skb, bp->rx_offset);
4726 pci_dma_sync_single_for_cpu(bp->pdev,
4727 pci_unmap_addr(rx_buf, mapping),
4728 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4730 if (rx_hdr->l2_fhdr_status &
4731 (L2_FHDR_ERRORS_BAD_CRC |
4732 L2_FHDR_ERRORS_PHY_DECODE |
4733 L2_FHDR_ERRORS_ALIGNMENT |
4734 L2_FHDR_ERRORS_TOO_SHORT |
4735 L2_FHDR_ERRORS_GIANT_FRAME)) {
4737 goto loopback_test_done;
4740 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4741 goto loopback_test_done;
4744 for (i = 14; i < pkt_size; i++) {
4745 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4746 goto loopback_test_done;
4757 #define BNX2_MAC_LOOPBACK_FAILED 1
4758 #define BNX2_PHY_LOOPBACK_FAILED 2
4759 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4760 BNX2_PHY_LOOPBACK_FAILED)
4763 bnx2_test_loopback(struct bnx2 *bp)
4767 if (!netif_running(bp->dev))
4768 return BNX2_LOOPBACK_FAILED;
4770 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4771 spin_lock_bh(&bp->phy_lock);
4773 spin_unlock_bh(&bp->phy_lock);
4774 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4775 rc |= BNX2_MAC_LOOPBACK_FAILED;
4776 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4777 rc |= BNX2_PHY_LOOPBACK_FAILED;
4781 #define NVRAM_SIZE 0x200
4782 #define CRC32_RESIDUAL 0xdebb20e3
4785 bnx2_test_nvram(struct bnx2 *bp)
4787 u32 buf[NVRAM_SIZE / 4];
4788 u8 *data = (u8 *) buf;
4792 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4793 goto test_nvram_done;
4795 magic = be32_to_cpu(buf[0]);
4796 if (magic != 0x669955aa) {
4798 goto test_nvram_done;
4801 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4802 goto test_nvram_done;
4804 csum = ether_crc_le(0x100, data);
4805 if (csum != CRC32_RESIDUAL) {
4807 goto test_nvram_done;
4810 csum = ether_crc_le(0x100, data + 0x100);
4811 if (csum != CRC32_RESIDUAL) {
4820 bnx2_test_link(struct bnx2 *bp)
4824 spin_lock_bh(&bp->phy_lock);
4825 bnx2_enable_bmsr1(bp);
4826 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4827 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4828 bnx2_disable_bmsr1(bp);
4829 spin_unlock_bh(&bp->phy_lock);
4831 if (bmsr & BMSR_LSTATUS) {
4838 bnx2_test_intr(struct bnx2 *bp)
4843 if (!netif_running(bp->dev))
4846 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4848 /* This register is not touched during run-time. */
4849 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4850 REG_RD(bp, BNX2_HC_COMMAND);
4852 for (i = 0; i < 10; i++) {
4853 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4859 msleep_interruptible(10);
4868 bnx2_5706_serdes_timer(struct bnx2 *bp)
4870 spin_lock(&bp->phy_lock);
4871 if (bp->serdes_an_pending)
4872 bp->serdes_an_pending--;
4873 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4876 bp->current_interval = bp->timer_interval;
4878 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4880 if (bmcr & BMCR_ANENABLE) {
4883 bnx2_write_phy(bp, 0x1c, 0x7c00);
4884 bnx2_read_phy(bp, 0x1c, &phy1);
4886 bnx2_write_phy(bp, 0x17, 0x0f01);
4887 bnx2_read_phy(bp, 0x15, &phy2);
4888 bnx2_write_phy(bp, 0x17, 0x0f01);
4889 bnx2_read_phy(bp, 0x15, &phy2);
4891 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4892 !(phy2 & 0x20)) { /* no CONFIG */
4894 bmcr &= ~BMCR_ANENABLE;
4895 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4896 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4897 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4901 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4902 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4905 bnx2_write_phy(bp, 0x17, 0x0f01);
4906 bnx2_read_phy(bp, 0x15, &phy2);
4910 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4911 bmcr |= BMCR_ANENABLE;
4912 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4914 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4917 bp->current_interval = bp->timer_interval;
4919 spin_unlock(&bp->phy_lock);
4923 bnx2_5708_serdes_timer(struct bnx2 *bp)
4925 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4928 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4929 bp->serdes_an_pending = 0;
4933 spin_lock(&bp->phy_lock);
4934 if (bp->serdes_an_pending)
4935 bp->serdes_an_pending--;
4936 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4939 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4940 if (bmcr & BMCR_ANENABLE) {
4941 bnx2_enable_forced_2g5(bp);
4942 bp->current_interval = SERDES_FORCED_TIMEOUT;
4944 bnx2_disable_forced_2g5(bp);
4945 bp->serdes_an_pending = 2;
4946 bp->current_interval = bp->timer_interval;
4950 bp->current_interval = bp->timer_interval;
4952 spin_unlock(&bp->phy_lock);
4956 bnx2_timer(unsigned long data)
4958 struct bnx2 *bp = (struct bnx2 *) data;
4960 if (!netif_running(bp->dev))
4963 if (atomic_read(&bp->intr_sem) != 0)
4964 goto bnx2_restart_timer;
4966 bnx2_send_heart_beat(bp);
4968 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4970 /* workaround occasional corrupted counters */
4971 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
4972 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
4973 BNX2_HC_COMMAND_STATS_NOW);
4975 if (bp->phy_flags & PHY_SERDES_FLAG) {
4976 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4977 bnx2_5706_serdes_timer(bp);
4979 bnx2_5708_serdes_timer(bp);
4983 mod_timer(&bp->timer, jiffies + bp->current_interval);
4987 bnx2_request_irq(struct bnx2 *bp)
4989 struct net_device *dev = bp->dev;
4992 if (bp->flags & USING_MSI_FLAG) {
4993 irq_handler_t fn = bnx2_msi;
4995 if (bp->flags & ONE_SHOT_MSI_FLAG)
4996 fn = bnx2_msi_1shot;
4998 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
5000 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
5001 IRQF_SHARED, dev->name, dev);
5006 bnx2_free_irq(struct bnx2 *bp)
5008 struct net_device *dev = bp->dev;
5010 if (bp->flags & USING_MSI_FLAG) {
5011 free_irq(bp->pdev->irq, dev);
5012 pci_disable_msi(bp->pdev);
5013 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
5015 free_irq(bp->pdev->irq, dev);
5018 /* Called with rtnl_lock */
5020 bnx2_open(struct net_device *dev)
5022 struct bnx2 *bp = netdev_priv(dev);
5025 netif_carrier_off(dev);
5027 bnx2_set_power_state(bp, PCI_D0);
5028 bnx2_disable_int(bp);
5030 rc = bnx2_alloc_mem(bp);
5034 napi_enable(&bp->napi);
5036 if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
5037 if (pci_enable_msi(bp->pdev) == 0) {
5038 bp->flags |= USING_MSI_FLAG;
5039 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5040 bp->flags |= ONE_SHOT_MSI_FLAG;
5043 rc = bnx2_request_irq(bp);
5046 napi_disable(&bp->napi);
5051 rc = bnx2_init_nic(bp);
5054 napi_disable(&bp->napi);
5061 mod_timer(&bp->timer, jiffies + bp->current_interval);
5063 atomic_set(&bp->intr_sem, 0);
5065 bnx2_enable_int(bp);
5067 if (bp->flags & USING_MSI_FLAG) {
5068 /* Test MSI to make sure it is working
5069 * If MSI test fails, go back to INTx mode
5071 if (bnx2_test_intr(bp) != 0) {
5072 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5073 " using MSI, switching to INTx mode. Please"
5074 " report this failure to the PCI maintainer"
5075 " and include system chipset information.\n",
5078 bnx2_disable_int(bp);
5081 rc = bnx2_init_nic(bp);
5084 rc = bnx2_request_irq(bp);
5087 napi_disable(&bp->napi);
5090 del_timer_sync(&bp->timer);
5093 bnx2_enable_int(bp);
5096 if (bp->flags & USING_MSI_FLAG) {
5097 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5100 netif_start_queue(dev);
5106 bnx2_reset_task(struct work_struct *work)
5108 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5110 if (!netif_running(bp->dev))
5113 bp->in_reset_task = 1;
5114 bnx2_netif_stop(bp);
5118 atomic_set(&bp->intr_sem, 1);
5119 bnx2_netif_start(bp);
5120 bp->in_reset_task = 0;
5124 bnx2_tx_timeout(struct net_device *dev)
5126 struct bnx2 *bp = netdev_priv(dev);
5128 /* This allows the netif to be shutdown gracefully before resetting */
5129 schedule_work(&bp->reset_task);
5133 /* Called with rtnl_lock */
5135 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5137 struct bnx2 *bp = netdev_priv(dev);
5139 bnx2_netif_stop(bp);
5142 bnx2_set_rx_mode(dev);
5144 bnx2_netif_start(bp);
5148 /* Called with netif_tx_lock.
5149 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5150 * netif_wake_queue().
5153 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5155 struct bnx2 *bp = netdev_priv(dev);
5158 struct sw_bd *tx_buf;
5159 u32 len, vlan_tag_flags, last_frag, mss;
5160 u16 prod, ring_prod;
5163 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
5164 netif_stop_queue(dev);
5165 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5168 return NETDEV_TX_BUSY;
5170 len = skb_headlen(skb);
5172 ring_prod = TX_RING_IDX(prod);
5175 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5176 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5179 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5181 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5183 if ((mss = skb_shinfo(skb)->gso_size)) {
5184 u32 tcp_opt_len, ip_tcp_len;
5187 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5189 tcp_opt_len = tcp_optlen(skb);
5191 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5192 u32 tcp_off = skb_transport_offset(skb) -
5193 sizeof(struct ipv6hdr) - ETH_HLEN;
5195 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5196 TX_BD_FLAGS_SW_FLAGS;
5197 if (likely(tcp_off == 0))
5198 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5201 vlan_tag_flags |= ((tcp_off & 0x3) <<
5202 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5203 ((tcp_off & 0x10) <<
5204 TX_BD_FLAGS_TCP6_OFF4_SHL);
5205 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5208 if (skb_header_cloned(skb) &&
5209 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5211 return NETDEV_TX_OK;
5214 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5218 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5219 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5223 if (tcp_opt_len || (iph->ihl > 5)) {
5224 vlan_tag_flags |= ((iph->ihl - 5) +
5225 (tcp_opt_len >> 2)) << 8;
5231 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5233 tx_buf = &bp->tx_buf_ring[ring_prod];
5235 pci_unmap_addr_set(tx_buf, mapping, mapping);
5237 txbd = &bp->tx_desc_ring[ring_prod];
5239 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5240 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5241 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5242 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5244 last_frag = skb_shinfo(skb)->nr_frags;
5246 for (i = 0; i < last_frag; i++) {
5247 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5249 prod = NEXT_TX_BD(prod);
5250 ring_prod = TX_RING_IDX(prod);
5251 txbd = &bp->tx_desc_ring[ring_prod];
5254 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5255 len, PCI_DMA_TODEVICE);
5256 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5259 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5260 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5261 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5262 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5265 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5267 prod = NEXT_TX_BD(prod);
5268 bp->tx_prod_bseq += skb->len;
5270 REG_WR16(bp, bp->tx_bidx_addr, prod);
5271 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5276 dev->trans_start = jiffies;
5278 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
5279 netif_stop_queue(dev);
5280 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
5281 netif_wake_queue(dev);
5284 return NETDEV_TX_OK;
5287 /* Called with rtnl_lock */
5289 bnx2_close(struct net_device *dev)
5291 struct bnx2 *bp = netdev_priv(dev);
5294 /* Calling flush_scheduled_work() may deadlock because
5295 * linkwatch_event() may be on the workqueue and it will try to get
5296 * the rtnl_lock which we are holding.
5298 while (bp->in_reset_task)
5301 bnx2_disable_int_sync(bp);
5302 napi_disable(&bp->napi);
5303 del_timer_sync(&bp->timer);
5304 if (bp->flags & NO_WOL_FLAG)
5305 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5307 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5309 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5310 bnx2_reset_chip(bp, reset_code);
5315 netif_carrier_off(bp->dev);
5316 bnx2_set_power_state(bp, PCI_D3hot);
5320 #define GET_NET_STATS64(ctr) \
5321 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5322 (unsigned long) (ctr##_lo)
5324 #define GET_NET_STATS32(ctr) \
5327 #if (BITS_PER_LONG == 64)
5328 #define GET_NET_STATS GET_NET_STATS64
5330 #define GET_NET_STATS GET_NET_STATS32
5333 static struct net_device_stats *
5334 bnx2_get_stats(struct net_device *dev)
5336 struct bnx2 *bp = netdev_priv(dev);
5337 struct statistics_block *stats_blk = bp->stats_blk;
5338 struct net_device_stats *net_stats = &bp->net_stats;
5340 if (bp->stats_blk == NULL) {
5343 net_stats->rx_packets =
5344 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5345 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5346 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5348 net_stats->tx_packets =
5349 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5350 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5351 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5353 net_stats->rx_bytes =
5354 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5356 net_stats->tx_bytes =
5357 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5359 net_stats->multicast =
5360 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5362 net_stats->collisions =
5363 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5365 net_stats->rx_length_errors =
5366 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5367 stats_blk->stat_EtherStatsOverrsizePkts);
5369 net_stats->rx_over_errors =
5370 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5372 net_stats->rx_frame_errors =
5373 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5375 net_stats->rx_crc_errors =
5376 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5378 net_stats->rx_errors = net_stats->rx_length_errors +
5379 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5380 net_stats->rx_crc_errors;
5382 net_stats->tx_aborted_errors =
5383 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5384 stats_blk->stat_Dot3StatsLateCollisions);
5386 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5387 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5388 net_stats->tx_carrier_errors = 0;
5390 net_stats->tx_carrier_errors =
5392 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5395 net_stats->tx_errors =
5397 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5399 net_stats->tx_aborted_errors +
5400 net_stats->tx_carrier_errors;
5402 net_stats->rx_missed_errors =
5403 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5404 stats_blk->stat_FwRxDrop);
5409 /* All ethtool functions called with rtnl_lock */
5412 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5414 struct bnx2 *bp = netdev_priv(dev);
5415 int support_serdes = 0, support_copper = 0;
5417 cmd->supported = SUPPORTED_Autoneg;
5418 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5421 } else if (bp->phy_port == PORT_FIBRE)
5426 if (support_serdes) {
5427 cmd->supported |= SUPPORTED_1000baseT_Full |
5429 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5430 cmd->supported |= SUPPORTED_2500baseX_Full;
5433 if (support_copper) {
5434 cmd->supported |= SUPPORTED_10baseT_Half |
5435 SUPPORTED_10baseT_Full |
5436 SUPPORTED_100baseT_Half |
5437 SUPPORTED_100baseT_Full |
5438 SUPPORTED_1000baseT_Full |
5443 spin_lock_bh(&bp->phy_lock);
5444 cmd->port = bp->phy_port;
5445 cmd->advertising = bp->advertising;
5447 if (bp->autoneg & AUTONEG_SPEED) {
5448 cmd->autoneg = AUTONEG_ENABLE;
5451 cmd->autoneg = AUTONEG_DISABLE;
5454 if (netif_carrier_ok(dev)) {
5455 cmd->speed = bp->line_speed;
5456 cmd->duplex = bp->duplex;
5462 spin_unlock_bh(&bp->phy_lock);
5464 cmd->transceiver = XCVR_INTERNAL;
5465 cmd->phy_address = bp->phy_addr;
5471 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5473 struct bnx2 *bp = netdev_priv(dev);
5474 u8 autoneg = bp->autoneg;
5475 u8 req_duplex = bp->req_duplex;
5476 u16 req_line_speed = bp->req_line_speed;
5477 u32 advertising = bp->advertising;
5480 spin_lock_bh(&bp->phy_lock);
5482 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5483 goto err_out_unlock;
5485 if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5486 goto err_out_unlock;
5488 if (cmd->autoneg == AUTONEG_ENABLE) {
5489 autoneg |= AUTONEG_SPEED;
5491 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5493 /* allow advertising 1 speed */
5494 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5495 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5496 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5497 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5499 if (cmd->port == PORT_FIBRE)
5500 goto err_out_unlock;
5502 advertising = cmd->advertising;
5504 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5505 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5506 (cmd->port == PORT_TP))
5507 goto err_out_unlock;
5508 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
5509 advertising = cmd->advertising;
5510 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5511 goto err_out_unlock;
5513 if (cmd->port == PORT_FIBRE)
5514 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5516 advertising = ETHTOOL_ALL_COPPER_SPEED;
5518 advertising |= ADVERTISED_Autoneg;
5521 if (cmd->port == PORT_FIBRE) {
5522 if ((cmd->speed != SPEED_1000 &&
5523 cmd->speed != SPEED_2500) ||
5524 (cmd->duplex != DUPLEX_FULL))
5525 goto err_out_unlock;
5527 if (cmd->speed == SPEED_2500 &&
5528 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5529 goto err_out_unlock;
5531 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5532 goto err_out_unlock;
5534 autoneg &= ~AUTONEG_SPEED;
5535 req_line_speed = cmd->speed;
5536 req_duplex = cmd->duplex;
5540 bp->autoneg = autoneg;
5541 bp->advertising = advertising;
5542 bp->req_line_speed = req_line_speed;
5543 bp->req_duplex = req_duplex;
5545 err = bnx2_setup_phy(bp, cmd->port);
5548 spin_unlock_bh(&bp->phy_lock);
5554 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5556 struct bnx2 *bp = netdev_priv(dev);
5558 strcpy(info->driver, DRV_MODULE_NAME);
5559 strcpy(info->version, DRV_MODULE_VERSION);
5560 strcpy(info->bus_info, pci_name(bp->pdev));
5561 strcpy(info->fw_version, bp->fw_version);
5564 #define BNX2_REGDUMP_LEN (32 * 1024)
5567 bnx2_get_regs_len(struct net_device *dev)
5569 return BNX2_REGDUMP_LEN;
5573 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5575 u32 *p = _p, i, offset;
5577 struct bnx2 *bp = netdev_priv(dev);
5578 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5579 0x0800, 0x0880, 0x0c00, 0x0c10,
5580 0x0c30, 0x0d08, 0x1000, 0x101c,
5581 0x1040, 0x1048, 0x1080, 0x10a4,
5582 0x1400, 0x1490, 0x1498, 0x14f0,
5583 0x1500, 0x155c, 0x1580, 0x15dc,
5584 0x1600, 0x1658, 0x1680, 0x16d8,
5585 0x1800, 0x1820, 0x1840, 0x1854,
5586 0x1880, 0x1894, 0x1900, 0x1984,
5587 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5588 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5589 0x2000, 0x2030, 0x23c0, 0x2400,
5590 0x2800, 0x2820, 0x2830, 0x2850,
5591 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5592 0x3c00, 0x3c94, 0x4000, 0x4010,
5593 0x4080, 0x4090, 0x43c0, 0x4458,
5594 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5595 0x4fc0, 0x5010, 0x53c0, 0x5444,
5596 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5597 0x5fc0, 0x6000, 0x6400, 0x6428,
5598 0x6800, 0x6848, 0x684c, 0x6860,
5599 0x6888, 0x6910, 0x8000 };
5603 memset(p, 0, BNX2_REGDUMP_LEN);
5605 if (!netif_running(bp->dev))
5609 offset = reg_boundaries[0];
5611 while (offset < BNX2_REGDUMP_LEN) {
5612 *p++ = REG_RD(bp, offset);
5614 if (offset == reg_boundaries[i + 1]) {
5615 offset = reg_boundaries[i + 2];
5616 p = (u32 *) (orig_p + offset);
5623 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5625 struct bnx2 *bp = netdev_priv(dev);
5627 if (bp->flags & NO_WOL_FLAG) {
5632 wol->supported = WAKE_MAGIC;
5634 wol->wolopts = WAKE_MAGIC;
5638 memset(&wol->sopass, 0, sizeof(wol->sopass));
5642 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5644 struct bnx2 *bp = netdev_priv(dev);
5646 if (wol->wolopts & ~WAKE_MAGIC)
5649 if (wol->wolopts & WAKE_MAGIC) {
5650 if (bp->flags & NO_WOL_FLAG)
5662 bnx2_nway_reset(struct net_device *dev)
5664 struct bnx2 *bp = netdev_priv(dev);
5667 if (!(bp->autoneg & AUTONEG_SPEED)) {
5671 spin_lock_bh(&bp->phy_lock);
5673 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5676 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5677 spin_unlock_bh(&bp->phy_lock);
5681 /* Force a link down visible on the other side */
5682 if (bp->phy_flags & PHY_SERDES_FLAG) {
5683 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5684 spin_unlock_bh(&bp->phy_lock);
5688 spin_lock_bh(&bp->phy_lock);
5690 bp->current_interval = SERDES_AN_TIMEOUT;
5691 bp->serdes_an_pending = 1;
5692 mod_timer(&bp->timer, jiffies + bp->current_interval);
5695 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5696 bmcr &= ~BMCR_LOOPBACK;
5697 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5699 spin_unlock_bh(&bp->phy_lock);
5705 bnx2_get_eeprom_len(struct net_device *dev)
5707 struct bnx2 *bp = netdev_priv(dev);
5709 if (bp->flash_info == NULL)
5712 return (int) bp->flash_size;
5716 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5719 struct bnx2 *bp = netdev_priv(dev);
5722 /* parameters already validated in ethtool_get_eeprom */
5724 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5730 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5733 struct bnx2 *bp = netdev_priv(dev);
5736 /* parameters already validated in ethtool_set_eeprom */
5738 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5744 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5746 struct bnx2 *bp = netdev_priv(dev);
5748 memset(coal, 0, sizeof(struct ethtool_coalesce));
5750 coal->rx_coalesce_usecs = bp->rx_ticks;
5751 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5752 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5753 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5755 coal->tx_coalesce_usecs = bp->tx_ticks;
5756 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5757 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5758 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5760 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5766 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5768 struct bnx2 *bp = netdev_priv(dev);
5770 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5771 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5773 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5774 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5776 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5777 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5779 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5780 if (bp->rx_quick_cons_trip_int > 0xff)
5781 bp->rx_quick_cons_trip_int = 0xff;
5783 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5784 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5786 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5787 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5789 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5790 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5792 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5793 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5796 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5797 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5798 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
5799 bp->stats_ticks = USEC_PER_SEC;
5801 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
5802 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
5803 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
5805 if (netif_running(bp->dev)) {
5806 bnx2_netif_stop(bp);
5808 bnx2_netif_start(bp);
5815 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5817 struct bnx2 *bp = netdev_priv(dev);
5819 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5820 ering->rx_mini_max_pending = 0;
5821 ering->rx_jumbo_max_pending = 0;
5823 ering->rx_pending = bp->rx_ring_size;
5824 ering->rx_mini_pending = 0;
5825 ering->rx_jumbo_pending = 0;
5827 ering->tx_max_pending = MAX_TX_DESC_CNT;
5828 ering->tx_pending = bp->tx_ring_size;
5832 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5834 struct bnx2 *bp = netdev_priv(dev);
5836 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5837 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5838 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5842 if (netif_running(bp->dev)) {
5843 bnx2_netif_stop(bp);
5844 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5849 bnx2_set_rx_ring_size(bp, ering->rx_pending);
5850 bp->tx_ring_size = ering->tx_pending;
5852 if (netif_running(bp->dev)) {
5855 rc = bnx2_alloc_mem(bp);
5859 bnx2_netif_start(bp);
5866 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5868 struct bnx2 *bp = netdev_priv(dev);
5870 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5871 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5872 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5876 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5878 struct bnx2 *bp = netdev_priv(dev);
5880 bp->req_flow_ctrl = 0;
5881 if (epause->rx_pause)
5882 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5883 if (epause->tx_pause)
5884 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5886 if (epause->autoneg) {
5887 bp->autoneg |= AUTONEG_FLOW_CTRL;
5890 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5893 spin_lock_bh(&bp->phy_lock);
5895 bnx2_setup_phy(bp, bp->phy_port);
5897 spin_unlock_bh(&bp->phy_lock);
5903 bnx2_get_rx_csum(struct net_device *dev)
5905 struct bnx2 *bp = netdev_priv(dev);
5911 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5913 struct bnx2 *bp = netdev_priv(dev);
5920 bnx2_set_tso(struct net_device *dev, u32 data)
5922 struct bnx2 *bp = netdev_priv(dev);
5925 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5926 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5927 dev->features |= NETIF_F_TSO6;
5929 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5934 #define BNX2_NUM_STATS 46
5937 char string[ETH_GSTRING_LEN];
5938 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5940 { "rx_error_bytes" },
5942 { "tx_error_bytes" },
5943 { "rx_ucast_packets" },
5944 { "rx_mcast_packets" },
5945 { "rx_bcast_packets" },
5946 { "tx_ucast_packets" },
5947 { "tx_mcast_packets" },
5948 { "tx_bcast_packets" },
5949 { "tx_mac_errors" },
5950 { "tx_carrier_errors" },
5951 { "rx_crc_errors" },
5952 { "rx_align_errors" },
5953 { "tx_single_collisions" },
5954 { "tx_multi_collisions" },
5956 { "tx_excess_collisions" },
5957 { "tx_late_collisions" },
5958 { "tx_total_collisions" },
5961 { "rx_undersize_packets" },
5962 { "rx_oversize_packets" },
5963 { "rx_64_byte_packets" },
5964 { "rx_65_to_127_byte_packets" },
5965 { "rx_128_to_255_byte_packets" },
5966 { "rx_256_to_511_byte_packets" },
5967 { "rx_512_to_1023_byte_packets" },
5968 { "rx_1024_to_1522_byte_packets" },
5969 { "rx_1523_to_9022_byte_packets" },
5970 { "tx_64_byte_packets" },
5971 { "tx_65_to_127_byte_packets" },
5972 { "tx_128_to_255_byte_packets" },
5973 { "tx_256_to_511_byte_packets" },
5974 { "tx_512_to_1023_byte_packets" },
5975 { "tx_1024_to_1522_byte_packets" },
5976 { "tx_1523_to_9022_byte_packets" },
5977 { "rx_xon_frames" },
5978 { "rx_xoff_frames" },
5979 { "tx_xon_frames" },
5980 { "tx_xoff_frames" },
5981 { "rx_mac_ctrl_frames" },
5982 { "rx_filtered_packets" },
5984 { "rx_fw_discards" },
5987 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5989 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5990 STATS_OFFSET32(stat_IfHCInOctets_hi),
5991 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5992 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5993 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5994 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5995 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5996 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5997 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5998 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5999 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6000 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6001 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6002 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6003 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6004 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6005 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6006 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6007 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6008 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6009 STATS_OFFSET32(stat_EtherStatsCollisions),
6010 STATS_OFFSET32(stat_EtherStatsFragments),
6011 STATS_OFFSET32(stat_EtherStatsJabbers),
6012 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6013 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6014 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6015 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6016 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6017 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6018 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6019 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6020 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6021 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6022 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6023 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6024 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6025 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6026 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6027 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6028 STATS_OFFSET32(stat_XonPauseFramesReceived),
6029 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6030 STATS_OFFSET32(stat_OutXonSent),
6031 STATS_OFFSET32(stat_OutXoffSent),
6032 STATS_OFFSET32(stat_MacControlFramesReceived),
6033 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6034 STATS_OFFSET32(stat_IfInMBUFDiscards),
6035 STATS_OFFSET32(stat_FwRxDrop),
6038 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6039 * skipped because of errata.
6041 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6042 8,0,8,8,8,8,8,8,8,8,
6043 4,0,4,4,4,4,4,4,4,4,
6044 4,4,4,4,4,4,4,4,4,4,
6045 4,4,4,4,4,4,4,4,4,4,
6049 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6050 8,0,8,8,8,8,8,8,8,8,
6051 4,4,4,4,4,4,4,4,4,4,
6052 4,4,4,4,4,4,4,4,4,4,
6053 4,4,4,4,4,4,4,4,4,4,
6057 #define BNX2_NUM_TESTS 6
6060 char string[ETH_GSTRING_LEN];
6061 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6062 { "register_test (offline)" },
6063 { "memory_test (offline)" },
6064 { "loopback_test (offline)" },
6065 { "nvram_test (online)" },
6066 { "interrupt_test (online)" },
6067 { "link_test (online)" },
6071 bnx2_self_test_count(struct net_device *dev)
6073 return BNX2_NUM_TESTS;
6077 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6079 struct bnx2 *bp = netdev_priv(dev);
6081 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6082 if (etest->flags & ETH_TEST_FL_OFFLINE) {
6085 bnx2_netif_stop(bp);
6086 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6089 if (bnx2_test_registers(bp) != 0) {
6091 etest->flags |= ETH_TEST_FL_FAILED;
6093 if (bnx2_test_memory(bp) != 0) {
6095 etest->flags |= ETH_TEST_FL_FAILED;
6097 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6098 etest->flags |= ETH_TEST_FL_FAILED;
6100 if (!netif_running(bp->dev)) {
6101 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6105 bnx2_netif_start(bp);
6108 /* wait for link up */
6109 for (i = 0; i < 7; i++) {
6112 msleep_interruptible(1000);
6116 if (bnx2_test_nvram(bp) != 0) {
6118 etest->flags |= ETH_TEST_FL_FAILED;
6120 if (bnx2_test_intr(bp) != 0) {
6122 etest->flags |= ETH_TEST_FL_FAILED;
6125 if (bnx2_test_link(bp) != 0) {
6127 etest->flags |= ETH_TEST_FL_FAILED;
6133 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6135 switch (stringset) {
6137 memcpy(buf, bnx2_stats_str_arr,
6138 sizeof(bnx2_stats_str_arr));
6141 memcpy(buf, bnx2_tests_str_arr,
6142 sizeof(bnx2_tests_str_arr));
6148 bnx2_get_stats_count(struct net_device *dev)
6150 return BNX2_NUM_STATS;
6154 bnx2_get_ethtool_stats(struct net_device *dev,
6155 struct ethtool_stats *stats, u64 *buf)
6157 struct bnx2 *bp = netdev_priv(dev);
6159 u32 *hw_stats = (u32 *) bp->stats_blk;
6160 u8 *stats_len_arr = NULL;
6162 if (hw_stats == NULL) {
6163 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6167 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6168 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6169 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6170 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6171 stats_len_arr = bnx2_5706_stats_len_arr;
6173 stats_len_arr = bnx2_5708_stats_len_arr;
6175 for (i = 0; i < BNX2_NUM_STATS; i++) {
6176 if (stats_len_arr[i] == 0) {
6177 /* skip this counter */
6181 if (stats_len_arr[i] == 4) {
6182 /* 4-byte counter */
6184 *(hw_stats + bnx2_stats_offset_arr[i]);
6187 /* 8-byte counter */
6188 buf[i] = (((u64) *(hw_stats +
6189 bnx2_stats_offset_arr[i])) << 32) +
6190 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6195 bnx2_phys_id(struct net_device *dev, u32 data)
6197 struct bnx2 *bp = netdev_priv(dev);
6204 save = REG_RD(bp, BNX2_MISC_CFG);
6205 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6207 for (i = 0; i < (data * 2); i++) {
6209 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6212 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6213 BNX2_EMAC_LED_1000MB_OVERRIDE |
6214 BNX2_EMAC_LED_100MB_OVERRIDE |
6215 BNX2_EMAC_LED_10MB_OVERRIDE |
6216 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6217 BNX2_EMAC_LED_TRAFFIC);
6219 msleep_interruptible(500);
6220 if (signal_pending(current))
6223 REG_WR(bp, BNX2_EMAC_LED, 0);
6224 REG_WR(bp, BNX2_MISC_CFG, save);
6229 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6231 struct bnx2 *bp = netdev_priv(dev);
6233 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6234 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6236 return (ethtool_op_set_tx_csum(dev, data));
6239 static const struct ethtool_ops bnx2_ethtool_ops = {
6240 .get_settings = bnx2_get_settings,
6241 .set_settings = bnx2_set_settings,
6242 .get_drvinfo = bnx2_get_drvinfo,
6243 .get_regs_len = bnx2_get_regs_len,
6244 .get_regs = bnx2_get_regs,
6245 .get_wol = bnx2_get_wol,
6246 .set_wol = bnx2_set_wol,
6247 .nway_reset = bnx2_nway_reset,
6248 .get_link = ethtool_op_get_link,
6249 .get_eeprom_len = bnx2_get_eeprom_len,
6250 .get_eeprom = bnx2_get_eeprom,
6251 .set_eeprom = bnx2_set_eeprom,
6252 .get_coalesce = bnx2_get_coalesce,
6253 .set_coalesce = bnx2_set_coalesce,
6254 .get_ringparam = bnx2_get_ringparam,
6255 .set_ringparam = bnx2_set_ringparam,
6256 .get_pauseparam = bnx2_get_pauseparam,
6257 .set_pauseparam = bnx2_set_pauseparam,
6258 .get_rx_csum = bnx2_get_rx_csum,
6259 .set_rx_csum = bnx2_set_rx_csum,
6260 .get_tx_csum = ethtool_op_get_tx_csum,
6261 .set_tx_csum = bnx2_set_tx_csum,
6262 .get_sg = ethtool_op_get_sg,
6263 .set_sg = ethtool_op_set_sg,
6264 .get_tso = ethtool_op_get_tso,
6265 .set_tso = bnx2_set_tso,
6266 .self_test_count = bnx2_self_test_count,
6267 .self_test = bnx2_self_test,
6268 .get_strings = bnx2_get_strings,
6269 .phys_id = bnx2_phys_id,
6270 .get_stats_count = bnx2_get_stats_count,
6271 .get_ethtool_stats = bnx2_get_ethtool_stats,
6274 /* Called with rtnl_lock */
6276 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6278 struct mii_ioctl_data *data = if_mii(ifr);
6279 struct bnx2 *bp = netdev_priv(dev);
6284 data->phy_id = bp->phy_addr;
6290 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6293 if (!netif_running(dev))
6296 spin_lock_bh(&bp->phy_lock);
6297 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6298 spin_unlock_bh(&bp->phy_lock);
6300 data->val_out = mii_regval;
6306 if (!capable(CAP_NET_ADMIN))
6309 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6312 if (!netif_running(dev))
6315 spin_lock_bh(&bp->phy_lock);
6316 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6317 spin_unlock_bh(&bp->phy_lock);
6328 /* Called with rtnl_lock */
6330 bnx2_change_mac_addr(struct net_device *dev, void *p)
6332 struct sockaddr *addr = p;
6333 struct bnx2 *bp = netdev_priv(dev);
6335 if (!is_valid_ether_addr(addr->sa_data))
6338 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6339 if (netif_running(dev))
6340 bnx2_set_mac_addr(bp);
6345 /* Called with rtnl_lock */
6347 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6349 struct bnx2 *bp = netdev_priv(dev);
6351 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6352 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6356 if (netif_running(dev)) {
6357 bnx2_netif_stop(bp);
6361 bnx2_netif_start(bp);
6366 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6368 poll_bnx2(struct net_device *dev)
6370 struct bnx2 *bp = netdev_priv(dev);
6372 disable_irq(bp->pdev->irq);
6373 bnx2_interrupt(bp->pdev->irq, dev);
6374 enable_irq(bp->pdev->irq);
6378 static void __devinit
6379 bnx2_get_5709_media(struct bnx2 *bp)
6381 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6382 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6385 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6387 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6388 bp->phy_flags |= PHY_SERDES_FLAG;
6392 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6393 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6395 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6397 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6402 bp->phy_flags |= PHY_SERDES_FLAG;
6410 bp->phy_flags |= PHY_SERDES_FLAG;
6416 static void __devinit
6417 bnx2_get_pci_speed(struct bnx2 *bp)
6421 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6422 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6425 bp->flags |= PCIX_FLAG;
6427 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6429 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6431 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6432 bp->bus_speed_mhz = 133;
6435 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6436 bp->bus_speed_mhz = 100;
6439 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6440 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6441 bp->bus_speed_mhz = 66;
6444 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6445 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6446 bp->bus_speed_mhz = 50;
6449 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6450 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6451 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6452 bp->bus_speed_mhz = 33;
6457 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6458 bp->bus_speed_mhz = 66;
6460 bp->bus_speed_mhz = 33;
6463 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6464 bp->flags |= PCI_32BIT_FLAG;
6468 static int __devinit
6469 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6472 unsigned long mem_len;
6475 u64 dma_mask, persist_dma_mask;
6477 SET_NETDEV_DEV(dev, &pdev->dev);
6478 bp = netdev_priv(dev);
6483 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6484 rc = pci_enable_device(pdev);
6486 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
6490 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6492 "Cannot find PCI device base address, aborting.\n");
6494 goto err_out_disable;
6497 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6499 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
6500 goto err_out_disable;
6503 pci_set_master(pdev);
6505 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6506 if (bp->pm_cap == 0) {
6508 "Cannot find power management capability, aborting.\n");
6510 goto err_out_release;
6516 spin_lock_init(&bp->phy_lock);
6517 spin_lock_init(&bp->indirect_lock);
6518 INIT_WORK(&bp->reset_task, bnx2_reset_task);
6520 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
6521 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
6522 dev->mem_end = dev->mem_start + mem_len;
6523 dev->irq = pdev->irq;
6525 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6528 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
6530 goto err_out_release;
6533 /* Configure byte swap and enable write to the reg_window registers.
6534 * Rely on CPU to do target byte swapping on big endian systems
6535 * The chip's target access swapping will not swap all accesses
6537 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6538 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6539 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6541 bnx2_set_power_state(bp, PCI_D0);
6543 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6545 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6546 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6548 "Cannot find PCIE capability, aborting.\n");
6552 bp->flags |= PCIE_FLAG;
6554 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6555 if (bp->pcix_cap == 0) {
6557 "Cannot find PCIX capability, aborting.\n");
6563 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6564 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6565 bp->flags |= MSI_CAP_FLAG;
6568 /* 5708 cannot support DMA addresses > 40-bit. */
6569 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6570 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6572 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6574 /* Configure DMA attributes. */
6575 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6576 dev->features |= NETIF_F_HIGHDMA;
6577 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6580 "pci_set_consistent_dma_mask failed, aborting.\n");
6583 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6584 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6588 if (!(bp->flags & PCIE_FLAG))
6589 bnx2_get_pci_speed(bp);
6591 /* 5706A0 may falsely detect SERR and PERR. */
6592 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6593 reg = REG_RD(bp, PCI_COMMAND);
6594 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6595 REG_WR(bp, PCI_COMMAND, reg);
6597 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6598 !(bp->flags & PCIX_FLAG)) {
6601 "5706 A1 can only be used in a PCIX bus, aborting.\n");
6605 bnx2_init_nvram(bp);
6607 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6609 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
6610 BNX2_SHM_HDR_SIGNATURE_SIG) {
6611 u32 off = PCI_FUNC(pdev->devfn) << 2;
6613 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6615 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6617 /* Get the permanent MAC address. First we need to make sure the
6618 * firmware is actually running.
6620 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
6622 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6623 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
6624 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
6629 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6630 for (i = 0, j = 0; i < 3; i++) {
6633 num = (u8) (reg >> (24 - (i * 8)));
6634 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
6635 if (num >= k || !skip0 || k == 1) {
6636 bp->fw_version[j++] = (num / k) + '0';
6641 bp->fw_version[j++] = '.';
6643 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
6644 BNX2_PORT_FEATURE_ASF_ENABLED) {
6645 bp->flags |= ASF_ENABLE_FLAG;
6647 for (i = 0; i < 30; i++) {
6648 reg = REG_RD_IND(bp, bp->shmem_base +
6649 BNX2_BC_STATE_CONDITION);
6650 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
6655 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
6656 reg &= BNX2_CONDITION_MFW_RUN_MASK;
6657 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
6658 reg != BNX2_CONDITION_MFW_RUN_NONE) {
6660 u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
6662 bp->fw_version[j++] = ' ';
6663 for (i = 0; i < 3; i++) {
6664 reg = REG_RD_IND(bp, addr + i * 4);
6666 memcpy(&bp->fw_version[j], ®, 4);
6671 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
6672 bp->mac_addr[0] = (u8) (reg >> 8);
6673 bp->mac_addr[1] = (u8) reg;
6675 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
6676 bp->mac_addr[2] = (u8) (reg >> 24);
6677 bp->mac_addr[3] = (u8) (reg >> 16);
6678 bp->mac_addr[4] = (u8) (reg >> 8);
6679 bp->mac_addr[5] = (u8) reg;
6681 bp->tx_ring_size = MAX_TX_DESC_CNT;
6682 bnx2_set_rx_ring_size(bp, 255);
6686 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6688 bp->tx_quick_cons_trip_int = 20;
6689 bp->tx_quick_cons_trip = 20;
6690 bp->tx_ticks_int = 80;
6693 bp->rx_quick_cons_trip_int = 6;
6694 bp->rx_quick_cons_trip = 6;
6695 bp->rx_ticks_int = 18;
6698 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6700 bp->timer_interval = HZ;
6701 bp->current_interval = HZ;
6705 /* Disable WOL support if we are running on a SERDES chip. */
6706 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6707 bnx2_get_5709_media(bp);
6708 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
6709 bp->phy_flags |= PHY_SERDES_FLAG;
6711 bp->phy_port = PORT_TP;
6712 if (bp->phy_flags & PHY_SERDES_FLAG) {
6713 bp->phy_port = PORT_FIBRE;
6714 bp->flags |= NO_WOL_FLAG;
6715 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
6717 reg = REG_RD_IND(bp, bp->shmem_base +
6718 BNX2_SHARED_HW_CFG_CONFIG);
6719 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6720 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6722 bnx2_init_remote_phy(bp);
6724 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6725 CHIP_NUM(bp) == CHIP_NUM_5708)
6726 bp->phy_flags |= PHY_CRC_FIX_FLAG;
6727 else if (CHIP_ID(bp) == CHIP_ID_5709_A0 ||
6728 CHIP_ID(bp) == CHIP_ID_5709_A1)
6729 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
6731 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6732 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6733 (CHIP_ID(bp) == CHIP_ID_5708_B1))
6734 bp->flags |= NO_WOL_FLAG;
6736 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6737 bp->tx_quick_cons_trip_int =
6738 bp->tx_quick_cons_trip;
6739 bp->tx_ticks_int = bp->tx_ticks;
6740 bp->rx_quick_cons_trip_int =
6741 bp->rx_quick_cons_trip;
6742 bp->rx_ticks_int = bp->rx_ticks;
6743 bp->comp_prod_trip_int = bp->comp_prod_trip;
6744 bp->com_ticks_int = bp->com_ticks;
6745 bp->cmd_ticks_int = bp->cmd_ticks;
6748 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6750 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6751 * with byte enables disabled on the unused 32-bit word. This is legal
6752 * but causes problems on the AMD 8132 which will eventually stop
6753 * responding after a while.
6755 * AMD believes this incompatibility is unique to the 5706, and
6756 * prefers to locally disable MSI rather than globally disabling it.
6758 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6759 struct pci_dev *amd_8132 = NULL;
6761 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6762 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6765 if (amd_8132->revision >= 0x10 &&
6766 amd_8132->revision <= 0x13) {
6768 pci_dev_put(amd_8132);
6774 bnx2_set_default_link(bp);
6775 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6777 init_timer(&bp->timer);
6778 bp->timer.expires = RUN_AT(bp->timer_interval);
6779 bp->timer.data = (unsigned long) bp;
6780 bp->timer.function = bnx2_timer;
6786 iounmap(bp->regview);
6791 pci_release_regions(pdev);
6794 pci_disable_device(pdev);
6795 pci_set_drvdata(pdev, NULL);
6801 static char * __devinit
6802 bnx2_bus_string(struct bnx2 *bp, char *str)
6806 if (bp->flags & PCIE_FLAG) {
6807 s += sprintf(s, "PCI Express");
6809 s += sprintf(s, "PCI");
6810 if (bp->flags & PCIX_FLAG)
6811 s += sprintf(s, "-X");
6812 if (bp->flags & PCI_32BIT_FLAG)
6813 s += sprintf(s, " 32-bit");
6815 s += sprintf(s, " 64-bit");
6816 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
6821 static int __devinit
6822 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6824 static int version_printed = 0;
6825 struct net_device *dev = NULL;
6830 if (version_printed++ == 0)
6831 printk(KERN_INFO "%s", version);
6833 /* dev zeroed in init_etherdev */
6834 dev = alloc_etherdev(sizeof(*bp));
6839 rc = bnx2_init_board(pdev, dev);
6845 dev->open = bnx2_open;
6846 dev->hard_start_xmit = bnx2_start_xmit;
6847 dev->stop = bnx2_close;
6848 dev->get_stats = bnx2_get_stats;
6849 dev->set_multicast_list = bnx2_set_rx_mode;
6850 dev->do_ioctl = bnx2_ioctl;
6851 dev->set_mac_address = bnx2_change_mac_addr;
6852 dev->change_mtu = bnx2_change_mtu;
6853 dev->tx_timeout = bnx2_tx_timeout;
6854 dev->watchdog_timeo = TX_TIMEOUT;
6856 dev->vlan_rx_register = bnx2_vlan_rx_register;
6858 dev->ethtool_ops = &bnx2_ethtool_ops;
6860 bp = netdev_priv(dev);
6861 netif_napi_add(dev, &bp->napi, bnx2_poll, 64);
6863 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6864 dev->poll_controller = poll_bnx2;
6867 pci_set_drvdata(pdev, dev);
6869 memcpy(dev->dev_addr, bp->mac_addr, 6);
6870 memcpy(dev->perm_addr, bp->mac_addr, 6);
6871 bp->name = board_info[ent->driver_data].name;
6873 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
6874 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6875 dev->features |= NETIF_F_IPV6_CSUM;
6878 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6880 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6881 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6882 dev->features |= NETIF_F_TSO6;
6884 if ((rc = register_netdev(dev))) {
6885 dev_err(&pdev->dev, "Cannot register net device\n");
6887 iounmap(bp->regview);
6888 pci_release_regions(pdev);
6889 pci_disable_device(pdev);
6890 pci_set_drvdata(pdev, NULL);
6895 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
6899 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6900 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6901 bnx2_bus_string(bp, str),
6905 printk("node addr ");
6906 for (i = 0; i < 6; i++)
6907 printk("%2.2x", dev->dev_addr[i]);
6913 static void __devexit
6914 bnx2_remove_one(struct pci_dev *pdev)
6916 struct net_device *dev = pci_get_drvdata(pdev);
6917 struct bnx2 *bp = netdev_priv(dev);
6919 flush_scheduled_work();
6921 unregister_netdev(dev);
6924 iounmap(bp->regview);
6927 pci_release_regions(pdev);
6928 pci_disable_device(pdev);
6929 pci_set_drvdata(pdev, NULL);
6933 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6935 struct net_device *dev = pci_get_drvdata(pdev);
6936 struct bnx2 *bp = netdev_priv(dev);
6939 /* PCI register 4 needs to be saved whether netif_running() or not.
6940 * MSI address and data need to be saved if using MSI and
6943 pci_save_state(pdev);
6944 if (!netif_running(dev))
6947 flush_scheduled_work();
6948 bnx2_netif_stop(bp);
6949 netif_device_detach(dev);
6950 del_timer_sync(&bp->timer);
6951 if (bp->flags & NO_WOL_FLAG)
6952 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6954 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6956 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6957 bnx2_reset_chip(bp, reset_code);
6959 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6964 bnx2_resume(struct pci_dev *pdev)
6966 struct net_device *dev = pci_get_drvdata(pdev);
6967 struct bnx2 *bp = netdev_priv(dev);
6969 pci_restore_state(pdev);
6970 if (!netif_running(dev))
6973 bnx2_set_power_state(bp, PCI_D0);
6974 netif_device_attach(dev);
6976 bnx2_netif_start(bp);
6980 static struct pci_driver bnx2_pci_driver = {
6981 .name = DRV_MODULE_NAME,
6982 .id_table = bnx2_pci_tbl,
6983 .probe = bnx2_init_one,
6984 .remove = __devexit_p(bnx2_remove_one),
6985 .suspend = bnx2_suspend,
6986 .resume = bnx2_resume,
6989 static int __init bnx2_init(void)
6991 return pci_register_driver(&bnx2_pci_driver);
6994 static void __exit bnx2_cleanup(void)
6996 pci_unregister_driver(&bnx2_pci_driver);
6999 module_init(bnx2_init);
7000 module_exit(bnx2_cleanup);