1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
45 #include <net/checksum.h>
48 #include <linux/workqueue.h>
49 #include <linux/crc32.h>
50 #include <linux/prefetch.h>
51 #include <linux/cache.h>
52 #include <linux/zlib.h>
58 #define DRV_MODULE_NAME "bnx2"
59 #define PFX DRV_MODULE_NAME ": "
60 #define DRV_MODULE_VERSION "1.5.1"
61 #define DRV_MODULE_RELDATE "November 15, 2006"
63 #define RUN_AT(x) (jiffies + (x))
65 /* Time in jiffies before concluding the transmitter is hung. */
66 #define TX_TIMEOUT (5*HZ)
68 static const char version[] __devinitdata =
69 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
71 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
76 static int disable_msi = 0;
78 module_param(disable_msi, int, 0);
79 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
92 /* indexed by board_t, above */
95 } board_info[] __devinitdata = {
96 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97 { "HP NC370T Multifunction Gigabit Server Adapter" },
98 { "HP NC370i Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100 { "HP NC370F Multifunction Gigabit Server Adapter" },
101 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
106 static struct pci_device_id bnx2_pci_tbl[] = {
107 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
108 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
109 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
110 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
111 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
112 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
113 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
114 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
116 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
126 static struct flash_spec flash_table[] =
129 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
130 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
131 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
133 /* Expansion entry 0001 */
134 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
135 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
136 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
138 /* Saifun SA25F010 (non-buffered flash) */
139 /* strap, cfg1, & write1 need updates */
140 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
141 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
142 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
143 "Non-buffered flash (128kB)"},
144 /* Saifun SA25F020 (non-buffered flash) */
145 /* strap, cfg1, & write1 need updates */
146 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
147 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
148 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
149 "Non-buffered flash (256kB)"},
150 /* Expansion entry 0100 */
151 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
152 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
155 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
156 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
157 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
158 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
159 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
160 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
161 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
162 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
164 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
165 /* Saifun SA25F005 (non-buffered flash) */
166 /* strap, cfg1, & write1 need updates */
167 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
168 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
169 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
170 "Non-buffered flash (64kB)"},
172 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
173 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
174 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
176 /* Expansion entry 1001 */
177 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
178 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
181 /* Expansion entry 1010 */
182 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
183 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186 /* ATMEL AT45DB011B (buffered flash) */
187 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
188 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
189 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
190 "Buffered flash (128kB)"},
191 /* Expansion entry 1100 */
192 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
193 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
196 /* Expansion entry 1101 */
197 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
198 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201 /* Ateml Expansion entry 1110 */
202 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
203 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
204 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
205 "Entry 1110 (Atmel)"},
206 /* ATMEL AT45DB021B (buffered flash) */
207 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
208 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
210 "Buffered flash (256kB)"},
213 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
215 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
220 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
221 if (diff > MAX_TX_DESC_CNT)
222 diff = (diff & MAX_TX_DESC_CNT) - 1;
223 return (bp->tx_ring_size - diff);
227 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
229 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
230 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
234 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
236 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
237 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
241 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
244 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
247 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
248 REG_WR(bp, BNX2_CTX_CTX_CTRL,
249 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
250 for (i = 0; i < 5; i++) {
252 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
253 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
258 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
259 REG_WR(bp, BNX2_CTX_DATA, val);
264 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
269 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
270 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
271 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
273 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
274 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
279 val1 = (bp->phy_addr << 21) | (reg << 16) |
280 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
281 BNX2_EMAC_MDIO_COMM_START_BUSY;
282 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
284 for (i = 0; i < 50; i++) {
287 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
288 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
291 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
292 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
298 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
307 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
308 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
309 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
311 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
312 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
321 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
326 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
327 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
328 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
330 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
331 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
336 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
337 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
338 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
339 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
341 for (i = 0; i < 50; i++) {
344 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
345 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
351 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
356 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
357 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
360 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
370 bnx2_disable_int(struct bnx2 *bp)
372 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
373 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
374 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
378 bnx2_enable_int(struct bnx2 *bp)
380 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
381 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
382 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
384 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
385 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
387 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
391 bnx2_disable_int_sync(struct bnx2 *bp)
393 atomic_inc(&bp->intr_sem);
394 bnx2_disable_int(bp);
395 synchronize_irq(bp->pdev->irq);
399 bnx2_netif_stop(struct bnx2 *bp)
401 bnx2_disable_int_sync(bp);
402 if (netif_running(bp->dev)) {
403 netif_poll_disable(bp->dev);
404 netif_tx_disable(bp->dev);
405 bp->dev->trans_start = jiffies; /* prevent tx timeout */
410 bnx2_netif_start(struct bnx2 *bp)
412 if (atomic_dec_and_test(&bp->intr_sem)) {
413 if (netif_running(bp->dev)) {
414 netif_wake_queue(bp->dev);
415 netif_poll_enable(bp->dev);
422 bnx2_free_mem(struct bnx2 *bp)
426 for (i = 0; i < bp->ctx_pages; i++) {
427 if (bp->ctx_blk[i]) {
428 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
430 bp->ctx_blk_mapping[i]);
431 bp->ctx_blk[i] = NULL;
434 if (bp->status_blk) {
435 pci_free_consistent(bp->pdev, bp->status_stats_size,
436 bp->status_blk, bp->status_blk_mapping);
437 bp->status_blk = NULL;
438 bp->stats_blk = NULL;
440 if (bp->tx_desc_ring) {
441 pci_free_consistent(bp->pdev,
442 sizeof(struct tx_bd) * TX_DESC_CNT,
443 bp->tx_desc_ring, bp->tx_desc_mapping);
444 bp->tx_desc_ring = NULL;
446 kfree(bp->tx_buf_ring);
447 bp->tx_buf_ring = NULL;
448 for (i = 0; i < bp->rx_max_ring; i++) {
449 if (bp->rx_desc_ring[i])
450 pci_free_consistent(bp->pdev,
451 sizeof(struct rx_bd) * RX_DESC_CNT,
453 bp->rx_desc_mapping[i]);
454 bp->rx_desc_ring[i] = NULL;
456 vfree(bp->rx_buf_ring);
457 bp->rx_buf_ring = NULL;
461 bnx2_alloc_mem(struct bnx2 *bp)
463 int i, status_blk_size;
465 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
467 if (bp->tx_buf_ring == NULL)
470 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
471 sizeof(struct tx_bd) *
473 &bp->tx_desc_mapping);
474 if (bp->tx_desc_ring == NULL)
477 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
479 if (bp->rx_buf_ring == NULL)
482 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
485 for (i = 0; i < bp->rx_max_ring; i++) {
486 bp->rx_desc_ring[i] =
487 pci_alloc_consistent(bp->pdev,
488 sizeof(struct rx_bd) * RX_DESC_CNT,
489 &bp->rx_desc_mapping[i]);
490 if (bp->rx_desc_ring[i] == NULL)
495 /* Combine status and statistics blocks into one allocation. */
496 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
497 bp->status_stats_size = status_blk_size +
498 sizeof(struct statistics_block);
500 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
501 &bp->status_blk_mapping);
502 if (bp->status_blk == NULL)
505 memset(bp->status_blk, 0, bp->status_stats_size);
507 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
510 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
512 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
513 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
514 if (bp->ctx_pages == 0)
516 for (i = 0; i < bp->ctx_pages; i++) {
517 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
519 &bp->ctx_blk_mapping[i]);
520 if (bp->ctx_blk[i] == NULL)
532 bnx2_report_fw_link(struct bnx2 *bp)
534 u32 fw_link_status = 0;
539 switch (bp->line_speed) {
541 if (bp->duplex == DUPLEX_HALF)
542 fw_link_status = BNX2_LINK_STATUS_10HALF;
544 fw_link_status = BNX2_LINK_STATUS_10FULL;
547 if (bp->duplex == DUPLEX_HALF)
548 fw_link_status = BNX2_LINK_STATUS_100HALF;
550 fw_link_status = BNX2_LINK_STATUS_100FULL;
553 if (bp->duplex == DUPLEX_HALF)
554 fw_link_status = BNX2_LINK_STATUS_1000HALF;
556 fw_link_status = BNX2_LINK_STATUS_1000FULL;
559 if (bp->duplex == DUPLEX_HALF)
560 fw_link_status = BNX2_LINK_STATUS_2500HALF;
562 fw_link_status = BNX2_LINK_STATUS_2500FULL;
566 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
569 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
571 bnx2_read_phy(bp, MII_BMSR, &bmsr);
572 bnx2_read_phy(bp, MII_BMSR, &bmsr);
574 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
575 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
576 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
578 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
582 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
584 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
588 bnx2_report_link(struct bnx2 *bp)
591 netif_carrier_on(bp->dev);
592 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
594 printk("%d Mbps ", bp->line_speed);
596 if (bp->duplex == DUPLEX_FULL)
597 printk("full duplex");
599 printk("half duplex");
602 if (bp->flow_ctrl & FLOW_CTRL_RX) {
603 printk(", receive ");
604 if (bp->flow_ctrl & FLOW_CTRL_TX)
605 printk("& transmit ");
608 printk(", transmit ");
610 printk("flow control ON");
615 netif_carrier_off(bp->dev);
616 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
619 bnx2_report_fw_link(bp);
623 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
625 u32 local_adv, remote_adv;
628 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
629 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
631 if (bp->duplex == DUPLEX_FULL) {
632 bp->flow_ctrl = bp->req_flow_ctrl;
637 if (bp->duplex != DUPLEX_FULL) {
641 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
642 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
645 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
646 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
647 bp->flow_ctrl |= FLOW_CTRL_TX;
648 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
649 bp->flow_ctrl |= FLOW_CTRL_RX;
653 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
654 bnx2_read_phy(bp, MII_LPA, &remote_adv);
656 if (bp->phy_flags & PHY_SERDES_FLAG) {
657 u32 new_local_adv = 0;
658 u32 new_remote_adv = 0;
660 if (local_adv & ADVERTISE_1000XPAUSE)
661 new_local_adv |= ADVERTISE_PAUSE_CAP;
662 if (local_adv & ADVERTISE_1000XPSE_ASYM)
663 new_local_adv |= ADVERTISE_PAUSE_ASYM;
664 if (remote_adv & ADVERTISE_1000XPAUSE)
665 new_remote_adv |= ADVERTISE_PAUSE_CAP;
666 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
667 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
669 local_adv = new_local_adv;
670 remote_adv = new_remote_adv;
673 /* See Table 28B-3 of 802.3ab-1999 spec. */
674 if (local_adv & ADVERTISE_PAUSE_CAP) {
675 if(local_adv & ADVERTISE_PAUSE_ASYM) {
676 if (remote_adv & ADVERTISE_PAUSE_CAP) {
677 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
679 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
680 bp->flow_ctrl = FLOW_CTRL_RX;
684 if (remote_adv & ADVERTISE_PAUSE_CAP) {
685 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
689 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
690 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
691 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
693 bp->flow_ctrl = FLOW_CTRL_TX;
699 bnx2_5708s_linkup(struct bnx2 *bp)
704 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
705 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
706 case BCM5708S_1000X_STAT1_SPEED_10:
707 bp->line_speed = SPEED_10;
709 case BCM5708S_1000X_STAT1_SPEED_100:
710 bp->line_speed = SPEED_100;
712 case BCM5708S_1000X_STAT1_SPEED_1G:
713 bp->line_speed = SPEED_1000;
715 case BCM5708S_1000X_STAT1_SPEED_2G5:
716 bp->line_speed = SPEED_2500;
719 if (val & BCM5708S_1000X_STAT1_FD)
720 bp->duplex = DUPLEX_FULL;
722 bp->duplex = DUPLEX_HALF;
728 bnx2_5706s_linkup(struct bnx2 *bp)
730 u32 bmcr, local_adv, remote_adv, common;
733 bp->line_speed = SPEED_1000;
735 bnx2_read_phy(bp, MII_BMCR, &bmcr);
736 if (bmcr & BMCR_FULLDPLX) {
737 bp->duplex = DUPLEX_FULL;
740 bp->duplex = DUPLEX_HALF;
743 if (!(bmcr & BMCR_ANENABLE)) {
747 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
748 bnx2_read_phy(bp, MII_LPA, &remote_adv);
750 common = local_adv & remote_adv;
751 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
753 if (common & ADVERTISE_1000XFULL) {
754 bp->duplex = DUPLEX_FULL;
757 bp->duplex = DUPLEX_HALF;
765 bnx2_copper_linkup(struct bnx2 *bp)
769 bnx2_read_phy(bp, MII_BMCR, &bmcr);
770 if (bmcr & BMCR_ANENABLE) {
771 u32 local_adv, remote_adv, common;
773 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
774 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
776 common = local_adv & (remote_adv >> 2);
777 if (common & ADVERTISE_1000FULL) {
778 bp->line_speed = SPEED_1000;
779 bp->duplex = DUPLEX_FULL;
781 else if (common & ADVERTISE_1000HALF) {
782 bp->line_speed = SPEED_1000;
783 bp->duplex = DUPLEX_HALF;
786 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
787 bnx2_read_phy(bp, MII_LPA, &remote_adv);
789 common = local_adv & remote_adv;
790 if (common & ADVERTISE_100FULL) {
791 bp->line_speed = SPEED_100;
792 bp->duplex = DUPLEX_FULL;
794 else if (common & ADVERTISE_100HALF) {
795 bp->line_speed = SPEED_100;
796 bp->duplex = DUPLEX_HALF;
798 else if (common & ADVERTISE_10FULL) {
799 bp->line_speed = SPEED_10;
800 bp->duplex = DUPLEX_FULL;
802 else if (common & ADVERTISE_10HALF) {
803 bp->line_speed = SPEED_10;
804 bp->duplex = DUPLEX_HALF;
813 if (bmcr & BMCR_SPEED100) {
814 bp->line_speed = SPEED_100;
817 bp->line_speed = SPEED_10;
819 if (bmcr & BMCR_FULLDPLX) {
820 bp->duplex = DUPLEX_FULL;
823 bp->duplex = DUPLEX_HALF;
831 bnx2_set_mac_link(struct bnx2 *bp)
835 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
836 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
837 (bp->duplex == DUPLEX_HALF)) {
838 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
841 /* Configure the EMAC mode register. */
842 val = REG_RD(bp, BNX2_EMAC_MODE);
844 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
845 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
846 BNX2_EMAC_MODE_25G_MODE);
849 switch (bp->line_speed) {
851 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
852 val |= BNX2_EMAC_MODE_PORT_MII_10M;
857 val |= BNX2_EMAC_MODE_PORT_MII;
860 val |= BNX2_EMAC_MODE_25G_MODE;
863 val |= BNX2_EMAC_MODE_PORT_GMII;
868 val |= BNX2_EMAC_MODE_PORT_GMII;
871 /* Set the MAC to operate in the appropriate duplex mode. */
872 if (bp->duplex == DUPLEX_HALF)
873 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
874 REG_WR(bp, BNX2_EMAC_MODE, val);
876 /* Enable/disable rx PAUSE. */
877 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
879 if (bp->flow_ctrl & FLOW_CTRL_RX)
880 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
881 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
883 /* Enable/disable tx PAUSE. */
884 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
885 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
887 if (bp->flow_ctrl & FLOW_CTRL_TX)
888 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
889 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
891 /* Acknowledge the interrupt. */
892 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
898 bnx2_set_link(struct bnx2 *bp)
903 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
908 link_up = bp->link_up;
910 bnx2_read_phy(bp, MII_BMSR, &bmsr);
911 bnx2_read_phy(bp, MII_BMSR, &bmsr);
913 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
914 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
917 val = REG_RD(bp, BNX2_EMAC_STATUS);
918 if (val & BNX2_EMAC_STATUS_LINK)
919 bmsr |= BMSR_LSTATUS;
921 bmsr &= ~BMSR_LSTATUS;
924 if (bmsr & BMSR_LSTATUS) {
927 if (bp->phy_flags & PHY_SERDES_FLAG) {
928 if (CHIP_NUM(bp) == CHIP_NUM_5706)
929 bnx2_5706s_linkup(bp);
930 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
931 bnx2_5708s_linkup(bp);
934 bnx2_copper_linkup(bp);
936 bnx2_resolve_flow_ctrl(bp);
939 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
940 (bp->autoneg & AUTONEG_SPEED)) {
944 bnx2_read_phy(bp, MII_BMCR, &bmcr);
945 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
946 if (!(bmcr & BMCR_ANENABLE)) {
947 bnx2_write_phy(bp, MII_BMCR, bmcr |
951 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
955 if (bp->link_up != link_up) {
956 bnx2_report_link(bp);
959 bnx2_set_mac_link(bp);
965 bnx2_reset_phy(struct bnx2 *bp)
970 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
972 #define PHY_RESET_MAX_WAIT 100
973 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
976 bnx2_read_phy(bp, MII_BMCR, ®);
977 if (!(reg & BMCR_RESET)) {
982 if (i == PHY_RESET_MAX_WAIT) {
989 bnx2_phy_get_pause_adv(struct bnx2 *bp)
993 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
994 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
996 if (bp->phy_flags & PHY_SERDES_FLAG) {
997 adv = ADVERTISE_1000XPAUSE;
1000 adv = ADVERTISE_PAUSE_CAP;
1003 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1004 if (bp->phy_flags & PHY_SERDES_FLAG) {
1005 adv = ADVERTISE_1000XPSE_ASYM;
1008 adv = ADVERTISE_PAUSE_ASYM;
1011 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1012 if (bp->phy_flags & PHY_SERDES_FLAG) {
1013 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1016 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1023 bnx2_setup_serdes_phy(struct bnx2 *bp)
1028 if (!(bp->autoneg & AUTONEG_SPEED)) {
1030 int force_link_down = 0;
1032 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1033 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1035 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1036 new_bmcr = bmcr & ~(BMCR_ANENABLE | BCM5708S_BMCR_FORCE_2500);
1037 new_bmcr |= BMCR_SPEED1000;
1038 if (bp->req_line_speed == SPEED_2500) {
1039 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1040 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1041 if (!(up1 & BCM5708S_UP1_2G5)) {
1042 up1 |= BCM5708S_UP1_2G5;
1043 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1044 force_link_down = 1;
1046 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1047 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1048 if (up1 & BCM5708S_UP1_2G5) {
1049 up1 &= ~BCM5708S_UP1_2G5;
1050 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1051 force_link_down = 1;
1055 if (bp->req_duplex == DUPLEX_FULL) {
1056 adv |= ADVERTISE_1000XFULL;
1057 new_bmcr |= BMCR_FULLDPLX;
1060 adv |= ADVERTISE_1000XHALF;
1061 new_bmcr &= ~BMCR_FULLDPLX;
1063 if ((new_bmcr != bmcr) || (force_link_down)) {
1064 /* Force a link down visible on the other side */
1066 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1067 ~(ADVERTISE_1000XFULL |
1068 ADVERTISE_1000XHALF));
1069 bnx2_write_phy(bp, MII_BMCR, bmcr |
1070 BMCR_ANRESTART | BMCR_ANENABLE);
1073 netif_carrier_off(bp->dev);
1074 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1075 bnx2_report_link(bp);
1077 bnx2_write_phy(bp, MII_ADVERTISE, adv);
1078 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1083 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1084 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1085 up1 |= BCM5708S_UP1_2G5;
1086 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1089 if (bp->advertising & ADVERTISED_1000baseT_Full)
1090 new_adv |= ADVERTISE_1000XFULL;
1092 new_adv |= bnx2_phy_get_pause_adv(bp);
1094 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1095 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1097 bp->serdes_an_pending = 0;
1098 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1099 /* Force a link down visible on the other side */
1101 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1102 spin_unlock_bh(&bp->phy_lock);
1104 spin_lock_bh(&bp->phy_lock);
1107 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1108 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1110 /* Speed up link-up time when the link partner
1111 * does not autonegotiate which is very common
1112 * in blade servers. Some blade servers use
1113 * IPMI for kerboard input and it's important
1114 * to minimize link disruptions. Autoneg. involves
1115 * exchanging base pages plus 3 next pages and
1116 * normally completes in about 120 msec.
1118 bp->current_interval = SERDES_AN_TIMEOUT;
1119 bp->serdes_an_pending = 1;
1120 mod_timer(&bp->timer, jiffies + bp->current_interval);
1126 #define ETHTOOL_ALL_FIBRE_SPEED \
1127 (ADVERTISED_1000baseT_Full)
1129 #define ETHTOOL_ALL_COPPER_SPEED \
1130 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1131 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1132 ADVERTISED_1000baseT_Full)
1134 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1135 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1137 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1140 bnx2_setup_copper_phy(struct bnx2 *bp)
1145 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1147 if (bp->autoneg & AUTONEG_SPEED) {
1148 u32 adv_reg, adv1000_reg;
1149 u32 new_adv_reg = 0;
1150 u32 new_adv1000_reg = 0;
1152 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1153 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1154 ADVERTISE_PAUSE_ASYM);
1156 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1157 adv1000_reg &= PHY_ALL_1000_SPEED;
1159 if (bp->advertising & ADVERTISED_10baseT_Half)
1160 new_adv_reg |= ADVERTISE_10HALF;
1161 if (bp->advertising & ADVERTISED_10baseT_Full)
1162 new_adv_reg |= ADVERTISE_10FULL;
1163 if (bp->advertising & ADVERTISED_100baseT_Half)
1164 new_adv_reg |= ADVERTISE_100HALF;
1165 if (bp->advertising & ADVERTISED_100baseT_Full)
1166 new_adv_reg |= ADVERTISE_100FULL;
1167 if (bp->advertising & ADVERTISED_1000baseT_Full)
1168 new_adv1000_reg |= ADVERTISE_1000FULL;
1170 new_adv_reg |= ADVERTISE_CSMA;
1172 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1174 if ((adv1000_reg != new_adv1000_reg) ||
1175 (adv_reg != new_adv_reg) ||
1176 ((bmcr & BMCR_ANENABLE) == 0)) {
1178 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1179 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1180 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1183 else if (bp->link_up) {
1184 /* Flow ctrl may have changed from auto to forced */
1185 /* or vice-versa. */
1187 bnx2_resolve_flow_ctrl(bp);
1188 bnx2_set_mac_link(bp);
1194 if (bp->req_line_speed == SPEED_100) {
1195 new_bmcr |= BMCR_SPEED100;
1197 if (bp->req_duplex == DUPLEX_FULL) {
1198 new_bmcr |= BMCR_FULLDPLX;
1200 if (new_bmcr != bmcr) {
1203 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1204 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1206 if (bmsr & BMSR_LSTATUS) {
1207 /* Force link down */
1208 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1209 spin_unlock_bh(&bp->phy_lock);
1211 spin_lock_bh(&bp->phy_lock);
1213 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1214 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1217 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1219 /* Normally, the new speed is setup after the link has
1220 * gone down and up again. In some cases, link will not go
1221 * down so we need to set up the new speed here.
1223 if (bmsr & BMSR_LSTATUS) {
1224 bp->line_speed = bp->req_line_speed;
1225 bp->duplex = bp->req_duplex;
1226 bnx2_resolve_flow_ctrl(bp);
1227 bnx2_set_mac_link(bp);
1234 bnx2_setup_phy(struct bnx2 *bp)
1236 if (bp->loopback == MAC_LOOPBACK)
1239 if (bp->phy_flags & PHY_SERDES_FLAG) {
1240 return (bnx2_setup_serdes_phy(bp));
1243 return (bnx2_setup_copper_phy(bp));
1248 bnx2_init_5708s_phy(struct bnx2 *bp)
1252 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1253 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1254 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1256 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1257 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1258 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1260 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1261 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1262 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1264 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1265 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1266 val |= BCM5708S_UP1_2G5;
1267 bnx2_write_phy(bp, BCM5708S_UP1, val);
1270 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1271 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1272 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1273 /* increase tx signal amplitude */
1274 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1275 BCM5708S_BLK_ADDR_TX_MISC);
1276 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1277 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1278 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1279 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1282 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1283 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1288 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1289 BNX2_SHARED_HW_CFG_CONFIG);
1290 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1291 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1292 BCM5708S_BLK_ADDR_TX_MISC);
1293 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1294 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1295 BCM5708S_BLK_ADDR_DIG);
1302 bnx2_init_5706s_phy(struct bnx2 *bp)
1304 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1306 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1307 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1309 if (bp->dev->mtu > 1500) {
1312 /* Set extended packet length bit */
1313 bnx2_write_phy(bp, 0x18, 0x7);
1314 bnx2_read_phy(bp, 0x18, &val);
1315 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1317 bnx2_write_phy(bp, 0x1c, 0x6c00);
1318 bnx2_read_phy(bp, 0x1c, &val);
1319 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1324 bnx2_write_phy(bp, 0x18, 0x7);
1325 bnx2_read_phy(bp, 0x18, &val);
1326 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1328 bnx2_write_phy(bp, 0x1c, 0x6c00);
1329 bnx2_read_phy(bp, 0x1c, &val);
1330 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1337 bnx2_init_copper_phy(struct bnx2 *bp)
1341 bp->phy_flags |= PHY_CRC_FIX_FLAG;
1343 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1344 bnx2_write_phy(bp, 0x18, 0x0c00);
1345 bnx2_write_phy(bp, 0x17, 0x000a);
1346 bnx2_write_phy(bp, 0x15, 0x310b);
1347 bnx2_write_phy(bp, 0x17, 0x201f);
1348 bnx2_write_phy(bp, 0x15, 0x9506);
1349 bnx2_write_phy(bp, 0x17, 0x401f);
1350 bnx2_write_phy(bp, 0x15, 0x14e2);
1351 bnx2_write_phy(bp, 0x18, 0x0400);
1354 if (bp->dev->mtu > 1500) {
1355 /* Set extended packet length bit */
1356 bnx2_write_phy(bp, 0x18, 0x7);
1357 bnx2_read_phy(bp, 0x18, &val);
1358 bnx2_write_phy(bp, 0x18, val | 0x4000);
1360 bnx2_read_phy(bp, 0x10, &val);
1361 bnx2_write_phy(bp, 0x10, val | 0x1);
1364 bnx2_write_phy(bp, 0x18, 0x7);
1365 bnx2_read_phy(bp, 0x18, &val);
1366 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1368 bnx2_read_phy(bp, 0x10, &val);
1369 bnx2_write_phy(bp, 0x10, val & ~0x1);
1372 /* ethernet@wirespeed */
1373 bnx2_write_phy(bp, 0x18, 0x7007);
1374 bnx2_read_phy(bp, 0x18, &val);
1375 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1381 bnx2_init_phy(struct bnx2 *bp)
1386 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1387 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1389 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1393 bnx2_read_phy(bp, MII_PHYSID1, &val);
1394 bp->phy_id = val << 16;
1395 bnx2_read_phy(bp, MII_PHYSID2, &val);
1396 bp->phy_id |= val & 0xffff;
1398 if (bp->phy_flags & PHY_SERDES_FLAG) {
1399 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1400 rc = bnx2_init_5706s_phy(bp);
1401 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1402 rc = bnx2_init_5708s_phy(bp);
1405 rc = bnx2_init_copper_phy(bp);
1414 bnx2_set_mac_loopback(struct bnx2 *bp)
1418 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1419 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1420 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1421 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1426 static int bnx2_test_link(struct bnx2 *);
1429 bnx2_set_phy_loopback(struct bnx2 *bp)
1434 spin_lock_bh(&bp->phy_lock);
1435 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1437 spin_unlock_bh(&bp->phy_lock);
1441 for (i = 0; i < 10; i++) {
1442 if (bnx2_test_link(bp) == 0)
1447 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1448 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1449 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1450 BNX2_EMAC_MODE_25G_MODE);
1452 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1453 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1459 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1465 msg_data |= bp->fw_wr_seq;
1467 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1469 /* wait for an acknowledgement. */
1470 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1473 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1475 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1478 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1481 /* If we timed out, inform the firmware that this is the case. */
1482 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1484 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1487 msg_data &= ~BNX2_DRV_MSG_CODE;
1488 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1490 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1495 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1502 bnx2_init_5709_context(struct bnx2 *bp)
1507 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1508 val |= (BCM_PAGE_BITS - 8) << 16;
1509 REG_WR(bp, BNX2_CTX_COMMAND, val);
1510 for (i = 0; i < bp->ctx_pages; i++) {
1513 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1514 (bp->ctx_blk_mapping[i] & 0xffffffff) |
1515 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1516 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1517 (u64) bp->ctx_blk_mapping[i] >> 32);
1518 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1519 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1520 for (j = 0; j < 10; j++) {
1522 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1523 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1527 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1536 bnx2_init_context(struct bnx2 *bp)
1542 u32 vcid_addr, pcid_addr, offset;
1546 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1549 vcid_addr = GET_PCID_ADDR(vcid);
1551 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1556 pcid_addr = GET_PCID_ADDR(new_vcid);
1559 vcid_addr = GET_CID_ADDR(vcid);
1560 pcid_addr = vcid_addr;
1563 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1564 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1566 /* Zero out the context. */
1567 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1568 CTX_WR(bp, 0x00, offset, 0);
1571 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1572 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1577 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1583 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1584 if (good_mbuf == NULL) {
1585 printk(KERN_ERR PFX "Failed to allocate memory in "
1586 "bnx2_alloc_bad_rbuf\n");
1590 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1591 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1595 /* Allocate a bunch of mbufs and save the good ones in an array. */
1596 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1597 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1598 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1600 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1602 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1604 /* The addresses with Bit 9 set are bad memory blocks. */
1605 if (!(val & (1 << 9))) {
1606 good_mbuf[good_mbuf_cnt] = (u16) val;
1610 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1613 /* Free the good ones back to the mbuf pool thus discarding
1614 * all the bad ones. */
1615 while (good_mbuf_cnt) {
1618 val = good_mbuf[good_mbuf_cnt];
1619 val = (val << 9) | val | 1;
1621 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1628 bnx2_set_mac_addr(struct bnx2 *bp)
1631 u8 *mac_addr = bp->dev->dev_addr;
1633 val = (mac_addr[0] << 8) | mac_addr[1];
1635 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1637 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1638 (mac_addr[4] << 8) | mac_addr[5];
1640 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1644 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1646 struct sk_buff *skb;
1647 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1649 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1650 unsigned long align;
1652 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1657 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1658 skb_reserve(skb, BNX2_RX_ALIGN - align);
1660 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1661 PCI_DMA_FROMDEVICE);
1664 pci_unmap_addr_set(rx_buf, mapping, mapping);
1666 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1667 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1669 bp->rx_prod_bseq += bp->rx_buf_use_size;
1675 bnx2_phy_int(struct bnx2 *bp)
1677 u32 new_link_state, old_link_state;
1679 new_link_state = bp->status_blk->status_attn_bits &
1680 STATUS_ATTN_BITS_LINK_STATE;
1681 old_link_state = bp->status_blk->status_attn_bits_ack &
1682 STATUS_ATTN_BITS_LINK_STATE;
1683 if (new_link_state != old_link_state) {
1684 if (new_link_state) {
1685 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1686 STATUS_ATTN_BITS_LINK_STATE);
1689 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1690 STATUS_ATTN_BITS_LINK_STATE);
1697 bnx2_tx_int(struct bnx2 *bp)
1699 struct status_block *sblk = bp->status_blk;
1700 u16 hw_cons, sw_cons, sw_ring_cons;
1703 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1704 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1707 sw_cons = bp->tx_cons;
1709 while (sw_cons != hw_cons) {
1710 struct sw_bd *tx_buf;
1711 struct sk_buff *skb;
1714 sw_ring_cons = TX_RING_IDX(sw_cons);
1716 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1719 /* partial BD completions possible with TSO packets */
1720 if (skb_is_gso(skb)) {
1721 u16 last_idx, last_ring_idx;
1723 last_idx = sw_cons +
1724 skb_shinfo(skb)->nr_frags + 1;
1725 last_ring_idx = sw_ring_cons +
1726 skb_shinfo(skb)->nr_frags + 1;
1727 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1730 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1735 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1736 skb_headlen(skb), PCI_DMA_TODEVICE);
1739 last = skb_shinfo(skb)->nr_frags;
1741 for (i = 0; i < last; i++) {
1742 sw_cons = NEXT_TX_BD(sw_cons);
1744 pci_unmap_page(bp->pdev,
1746 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1748 skb_shinfo(skb)->frags[i].size,
1752 sw_cons = NEXT_TX_BD(sw_cons);
1754 tx_free_bd += last + 1;
1758 hw_cons = bp->hw_tx_cons =
1759 sblk->status_tx_quick_consumer_index0;
1761 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1766 bp->tx_cons = sw_cons;
1767 /* Need to make the tx_cons update visible to bnx2_start_xmit()
1768 * before checking for netif_queue_stopped(). Without the
1769 * memory barrier, there is a small possibility that bnx2_start_xmit()
1770 * will miss it and cause the queue to be stopped forever.
1774 if (unlikely(netif_queue_stopped(bp->dev)) &&
1775 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1776 netif_tx_lock(bp->dev);
1777 if ((netif_queue_stopped(bp->dev)) &&
1778 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
1779 netif_wake_queue(bp->dev);
1780 netif_tx_unlock(bp->dev);
1785 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1788 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1789 struct rx_bd *cons_bd, *prod_bd;
1791 cons_rx_buf = &bp->rx_buf_ring[cons];
1792 prod_rx_buf = &bp->rx_buf_ring[prod];
1794 pci_dma_sync_single_for_device(bp->pdev,
1795 pci_unmap_addr(cons_rx_buf, mapping),
1796 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1798 bp->rx_prod_bseq += bp->rx_buf_use_size;
1800 prod_rx_buf->skb = skb;
1805 pci_unmap_addr_set(prod_rx_buf, mapping,
1806 pci_unmap_addr(cons_rx_buf, mapping));
1808 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1809 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1810 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1811 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
1815 bnx2_rx_int(struct bnx2 *bp, int budget)
1817 struct status_block *sblk = bp->status_blk;
1818 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1819 struct l2_fhdr *rx_hdr;
1822 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1823 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1826 sw_cons = bp->rx_cons;
1827 sw_prod = bp->rx_prod;
1829 /* Memory barrier necessary as speculative reads of the rx
1830 * buffer can be ahead of the index in the status block
1833 while (sw_cons != hw_cons) {
1836 struct sw_bd *rx_buf;
1837 struct sk_buff *skb;
1838 dma_addr_t dma_addr;
1840 sw_ring_cons = RX_RING_IDX(sw_cons);
1841 sw_ring_prod = RX_RING_IDX(sw_prod);
1843 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1848 dma_addr = pci_unmap_addr(rx_buf, mapping);
1850 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
1851 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1853 rx_hdr = (struct l2_fhdr *) skb->data;
1854 len = rx_hdr->l2_fhdr_pkt_len - 4;
1856 if ((status = rx_hdr->l2_fhdr_status) &
1857 (L2_FHDR_ERRORS_BAD_CRC |
1858 L2_FHDR_ERRORS_PHY_DECODE |
1859 L2_FHDR_ERRORS_ALIGNMENT |
1860 L2_FHDR_ERRORS_TOO_SHORT |
1861 L2_FHDR_ERRORS_GIANT_FRAME)) {
1866 /* Since we don't have a jumbo ring, copy small packets
1869 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1870 struct sk_buff *new_skb;
1872 new_skb = netdev_alloc_skb(bp->dev, len + 2);
1873 if (new_skb == NULL)
1877 memcpy(new_skb->data,
1878 skb->data + bp->rx_offset - 2,
1881 skb_reserve(new_skb, 2);
1882 skb_put(new_skb, len);
1884 bnx2_reuse_rx_skb(bp, skb,
1885 sw_ring_cons, sw_ring_prod);
1889 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1890 pci_unmap_single(bp->pdev, dma_addr,
1891 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1893 skb_reserve(skb, bp->rx_offset);
1898 bnx2_reuse_rx_skb(bp, skb,
1899 sw_ring_cons, sw_ring_prod);
1903 skb->protocol = eth_type_trans(skb, bp->dev);
1905 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1906 (ntohs(skb->protocol) != 0x8100)) {
1913 skb->ip_summed = CHECKSUM_NONE;
1915 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1916 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1918 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1919 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
1920 skb->ip_summed = CHECKSUM_UNNECESSARY;
1924 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1925 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1926 rx_hdr->l2_fhdr_vlan_tag);
1930 netif_receive_skb(skb);
1932 bp->dev->last_rx = jiffies;
1936 sw_cons = NEXT_RX_BD(sw_cons);
1937 sw_prod = NEXT_RX_BD(sw_prod);
1939 if ((rx_pkt == budget))
1942 /* Refresh hw_cons to see if there is new work */
1943 if (sw_cons == hw_cons) {
1944 hw_cons = bp->hw_rx_cons =
1945 sblk->status_rx_quick_consumer_index0;
1946 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1951 bp->rx_cons = sw_cons;
1952 bp->rx_prod = sw_prod;
1954 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1956 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1964 /* MSI ISR - The only difference between this and the INTx ISR
1965 * is that the MSI interrupt is always serviced.
1968 bnx2_msi(int irq, void *dev_instance)
1970 struct net_device *dev = dev_instance;
1971 struct bnx2 *bp = netdev_priv(dev);
1973 prefetch(bp->status_blk);
1974 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1975 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1976 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1978 /* Return here if interrupt is disabled. */
1979 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1982 netif_rx_schedule(dev);
1988 bnx2_interrupt(int irq, void *dev_instance)
1990 struct net_device *dev = dev_instance;
1991 struct bnx2 *bp = netdev_priv(dev);
1993 /* When using INTx, it is possible for the interrupt to arrive
1994 * at the CPU before the status block posted prior to the
1995 * interrupt. Reading a register will flush the status block.
1996 * When using MSI, the MSI message will always complete after
1997 * the status block write.
1999 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
2000 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2001 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2004 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2005 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2006 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2008 /* Return here if interrupt is shared and is disabled. */
2009 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2012 netif_rx_schedule(dev);
2018 bnx2_has_work(struct bnx2 *bp)
2020 struct status_block *sblk = bp->status_blk;
2022 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2023 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2026 if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
2034 bnx2_poll(struct net_device *dev, int *budget)
2036 struct bnx2 *bp = netdev_priv(dev);
2038 if ((bp->status_blk->status_attn_bits &
2039 STATUS_ATTN_BITS_LINK_STATE) !=
2040 (bp->status_blk->status_attn_bits_ack &
2041 STATUS_ATTN_BITS_LINK_STATE)) {
2043 spin_lock(&bp->phy_lock);
2045 spin_unlock(&bp->phy_lock);
2047 /* This is needed to take care of transient status
2048 * during link changes.
2050 REG_WR(bp, BNX2_HC_COMMAND,
2051 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2052 REG_RD(bp, BNX2_HC_COMMAND);
2055 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2058 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2059 int orig_budget = *budget;
2062 if (orig_budget > dev->quota)
2063 orig_budget = dev->quota;
2065 work_done = bnx2_rx_int(bp, orig_budget);
2066 *budget -= work_done;
2067 dev->quota -= work_done;
2070 bp->last_status_idx = bp->status_blk->status_idx;
2073 if (!bnx2_has_work(bp)) {
2074 netif_rx_complete(dev);
2075 if (likely(bp->flags & USING_MSI_FLAG)) {
2076 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2077 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2078 bp->last_status_idx);
2081 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2082 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2083 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2084 bp->last_status_idx);
2086 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2087 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2088 bp->last_status_idx);
2095 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2096 * from set_multicast.
2099 bnx2_set_rx_mode(struct net_device *dev)
2101 struct bnx2 *bp = netdev_priv(dev);
2102 u32 rx_mode, sort_mode;
2105 spin_lock_bh(&bp->phy_lock);
2107 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2108 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2109 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2111 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2112 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2114 if (!(bp->flags & ASF_ENABLE_FLAG))
2115 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2117 if (dev->flags & IFF_PROMISC) {
2118 /* Promiscuous mode. */
2119 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2120 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2121 BNX2_RPM_SORT_USER0_PROM_VLAN;
2123 else if (dev->flags & IFF_ALLMULTI) {
2124 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2125 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2128 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2131 /* Accept one or more multicast(s). */
2132 struct dev_mc_list *mclist;
2133 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2138 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2140 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2141 i++, mclist = mclist->next) {
2143 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2145 regidx = (bit & 0xe0) >> 5;
2147 mc_filter[regidx] |= (1 << bit);
2150 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2151 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2155 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2158 if (rx_mode != bp->rx_mode) {
2159 bp->rx_mode = rx_mode;
2160 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2163 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2164 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2165 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2167 spin_unlock_bh(&bp->phy_lock);
2170 #define FW_BUF_SIZE 0x8000
2173 bnx2_gunzip_init(struct bnx2 *bp)
2175 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2178 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2181 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2182 if (bp->strm->workspace == NULL)
2192 vfree(bp->gunzip_buf);
2193 bp->gunzip_buf = NULL;
2196 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2197 "uncompression.\n", bp->dev->name);
2202 bnx2_gunzip_end(struct bnx2 *bp)
2204 kfree(bp->strm->workspace);
2209 if (bp->gunzip_buf) {
2210 vfree(bp->gunzip_buf);
2211 bp->gunzip_buf = NULL;
2216 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2220 /* check gzip header */
2221 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2227 if (zbuf[3] & FNAME)
2228 while ((zbuf[n++] != 0) && (n < len));
2230 bp->strm->next_in = zbuf + n;
2231 bp->strm->avail_in = len - n;
2232 bp->strm->next_out = bp->gunzip_buf;
2233 bp->strm->avail_out = FW_BUF_SIZE;
2235 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2239 rc = zlib_inflate(bp->strm, Z_FINISH);
2241 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2242 *outbuf = bp->gunzip_buf;
2244 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2245 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2246 bp->dev->name, bp->strm->msg);
2248 zlib_inflateEnd(bp->strm);
2250 if (rc == Z_STREAM_END)
2257 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2264 for (i = 0; i < rv2p_code_len; i += 8) {
2265 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2267 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2270 if (rv2p_proc == RV2P_PROC1) {
2271 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2272 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2275 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2276 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2280 /* Reset the processor, un-stall is done later. */
2281 if (rv2p_proc == RV2P_PROC1) {
2282 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2285 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2290 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2297 val = REG_RD_IND(bp, cpu_reg->mode);
2298 val |= cpu_reg->mode_value_halt;
2299 REG_WR_IND(bp, cpu_reg->mode, val);
2300 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2302 /* Load the Text area. */
2303 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2308 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2318 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2319 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2323 /* Load the Data area. */
2324 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2328 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2329 REG_WR_IND(bp, offset, fw->data[j]);
2333 /* Load the SBSS area. */
2334 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2338 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2339 REG_WR_IND(bp, offset, fw->sbss[j]);
2343 /* Load the BSS area. */
2344 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2348 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2349 REG_WR_IND(bp, offset, fw->bss[j]);
2353 /* Load the Read-Only area. */
2354 offset = cpu_reg->spad_base +
2355 (fw->rodata_addr - cpu_reg->mips_view_base);
2359 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2360 REG_WR_IND(bp, offset, fw->rodata[j]);
2364 /* Clear the pre-fetch instruction. */
2365 REG_WR_IND(bp, cpu_reg->inst, 0);
2366 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2368 /* Start the CPU. */
2369 val = REG_RD_IND(bp, cpu_reg->mode);
2370 val &= ~cpu_reg->mode_value_halt;
2371 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2372 REG_WR_IND(bp, cpu_reg->mode, val);
2378 bnx2_init_cpus(struct bnx2 *bp)
2380 struct cpu_reg cpu_reg;
2386 if ((rc = bnx2_gunzip_init(bp)) != 0)
2389 /* Initialize the RV2P processor. */
2390 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2395 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2397 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2402 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2404 /* Initialize the RX Processor. */
2405 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2406 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2407 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2408 cpu_reg.state = BNX2_RXP_CPU_STATE;
2409 cpu_reg.state_value_clear = 0xffffff;
2410 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2411 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2412 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2413 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2414 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2415 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2416 cpu_reg.mips_view_base = 0x8000000;
2418 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2419 fw = &bnx2_rxp_fw_09;
2421 fw = &bnx2_rxp_fw_06;
2423 rc = load_cpu_fw(bp, &cpu_reg, fw);
2427 /* Initialize the TX Processor. */
2428 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2429 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2430 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2431 cpu_reg.state = BNX2_TXP_CPU_STATE;
2432 cpu_reg.state_value_clear = 0xffffff;
2433 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2434 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2435 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2436 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2437 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2438 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2439 cpu_reg.mips_view_base = 0x8000000;
2441 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2442 fw = &bnx2_txp_fw_09;
2444 fw = &bnx2_txp_fw_06;
2446 rc = load_cpu_fw(bp, &cpu_reg, fw);
2450 /* Initialize the TX Patch-up Processor. */
2451 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2452 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2453 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2454 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2455 cpu_reg.state_value_clear = 0xffffff;
2456 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2457 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2458 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2459 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2460 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2461 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2462 cpu_reg.mips_view_base = 0x8000000;
2464 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2465 fw = &bnx2_tpat_fw_09;
2467 fw = &bnx2_tpat_fw_06;
2469 rc = load_cpu_fw(bp, &cpu_reg, fw);
2473 /* Initialize the Completion Processor. */
2474 cpu_reg.mode = BNX2_COM_CPU_MODE;
2475 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2476 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2477 cpu_reg.state = BNX2_COM_CPU_STATE;
2478 cpu_reg.state_value_clear = 0xffffff;
2479 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2480 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2481 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2482 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2483 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2484 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2485 cpu_reg.mips_view_base = 0x8000000;
2487 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2488 fw = &bnx2_com_fw_09;
2490 fw = &bnx2_com_fw_06;
2492 rc = load_cpu_fw(bp, &cpu_reg, fw);
2496 /* Initialize the Command Processor. */
2497 cpu_reg.mode = BNX2_CP_CPU_MODE;
2498 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2499 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2500 cpu_reg.state = BNX2_CP_CPU_STATE;
2501 cpu_reg.state_value_clear = 0xffffff;
2502 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2503 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2504 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2505 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2506 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2507 cpu_reg.spad_base = BNX2_CP_SCRATCH;
2508 cpu_reg.mips_view_base = 0x8000000;
2510 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2511 fw = &bnx2_cp_fw_09;
2513 load_cpu_fw(bp, &cpu_reg, fw);
2518 bnx2_gunzip_end(bp);
2523 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2527 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2533 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2534 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2535 PCI_PM_CTRL_PME_STATUS);
2537 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2538 /* delay required during transition out of D3hot */
2541 val = REG_RD(bp, BNX2_EMAC_MODE);
2542 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2543 val &= ~BNX2_EMAC_MODE_MPKT;
2544 REG_WR(bp, BNX2_EMAC_MODE, val);
2546 val = REG_RD(bp, BNX2_RPM_CONFIG);
2547 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2548 REG_WR(bp, BNX2_RPM_CONFIG, val);
2559 autoneg = bp->autoneg;
2560 advertising = bp->advertising;
2562 bp->autoneg = AUTONEG_SPEED;
2563 bp->advertising = ADVERTISED_10baseT_Half |
2564 ADVERTISED_10baseT_Full |
2565 ADVERTISED_100baseT_Half |
2566 ADVERTISED_100baseT_Full |
2569 bnx2_setup_copper_phy(bp);
2571 bp->autoneg = autoneg;
2572 bp->advertising = advertising;
2574 bnx2_set_mac_addr(bp);
2576 val = REG_RD(bp, BNX2_EMAC_MODE);
2578 /* Enable port mode. */
2579 val &= ~BNX2_EMAC_MODE_PORT;
2580 val |= BNX2_EMAC_MODE_PORT_MII |
2581 BNX2_EMAC_MODE_MPKT_RCVD |
2582 BNX2_EMAC_MODE_ACPI_RCVD |
2583 BNX2_EMAC_MODE_MPKT;
2585 REG_WR(bp, BNX2_EMAC_MODE, val);
2587 /* receive all multicast */
2588 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2589 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2592 REG_WR(bp, BNX2_EMAC_RX_MODE,
2593 BNX2_EMAC_RX_MODE_SORT_MODE);
2595 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2596 BNX2_RPM_SORT_USER0_MC_EN;
2597 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2598 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2599 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2600 BNX2_RPM_SORT_USER0_ENA);
2602 /* Need to enable EMAC and RPM for WOL. */
2603 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2604 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2605 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2606 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2608 val = REG_RD(bp, BNX2_RPM_CONFIG);
2609 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2610 REG_WR(bp, BNX2_RPM_CONFIG, val);
2612 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2615 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2618 if (!(bp->flags & NO_WOL_FLAG))
2619 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2621 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2622 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2623 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2632 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2634 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2637 /* No more memory access after this point until
2638 * device is brought back to D0.
2650 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2655 /* Request access to the flash interface. */
2656 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2657 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2658 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2659 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2665 if (j >= NVRAM_TIMEOUT_COUNT)
2672 bnx2_release_nvram_lock(struct bnx2 *bp)
2677 /* Relinquish nvram interface. */
2678 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2680 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2681 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2682 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2688 if (j >= NVRAM_TIMEOUT_COUNT)
2696 bnx2_enable_nvram_write(struct bnx2 *bp)
2700 val = REG_RD(bp, BNX2_MISC_CFG);
2701 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2703 if (!bp->flash_info->buffered) {
2706 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2707 REG_WR(bp, BNX2_NVM_COMMAND,
2708 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2710 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2713 val = REG_RD(bp, BNX2_NVM_COMMAND);
2714 if (val & BNX2_NVM_COMMAND_DONE)
2718 if (j >= NVRAM_TIMEOUT_COUNT)
2725 bnx2_disable_nvram_write(struct bnx2 *bp)
2729 val = REG_RD(bp, BNX2_MISC_CFG);
2730 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2735 bnx2_enable_nvram_access(struct bnx2 *bp)
2739 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2740 /* Enable both bits, even on read. */
2741 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2742 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2746 bnx2_disable_nvram_access(struct bnx2 *bp)
2750 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2751 /* Disable both bits, even after read. */
2752 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2753 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2754 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2758 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2763 if (bp->flash_info->buffered)
2764 /* Buffered flash, no erase needed */
2767 /* Build an erase command */
2768 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2769 BNX2_NVM_COMMAND_DOIT;
2771 /* Need to clear DONE bit separately. */
2772 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2774 /* Address of the NVRAM to read from. */
2775 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2777 /* Issue an erase command. */
2778 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2780 /* Wait for completion. */
2781 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2786 val = REG_RD(bp, BNX2_NVM_COMMAND);
2787 if (val & BNX2_NVM_COMMAND_DONE)
2791 if (j >= NVRAM_TIMEOUT_COUNT)
2798 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2803 /* Build the command word. */
2804 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2806 /* Calculate an offset of a buffered flash. */
2807 if (bp->flash_info->buffered) {
2808 offset = ((offset / bp->flash_info->page_size) <<
2809 bp->flash_info->page_bits) +
2810 (offset % bp->flash_info->page_size);
2813 /* Need to clear DONE bit separately. */
2814 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2816 /* Address of the NVRAM to read from. */
2817 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2819 /* Issue a read command. */
2820 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2822 /* Wait for completion. */
2823 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2828 val = REG_RD(bp, BNX2_NVM_COMMAND);
2829 if (val & BNX2_NVM_COMMAND_DONE) {
2830 val = REG_RD(bp, BNX2_NVM_READ);
2832 val = be32_to_cpu(val);
2833 memcpy(ret_val, &val, 4);
2837 if (j >= NVRAM_TIMEOUT_COUNT)
2845 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2850 /* Build the command word. */
2851 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2853 /* Calculate an offset of a buffered flash. */
2854 if (bp->flash_info->buffered) {
2855 offset = ((offset / bp->flash_info->page_size) <<
2856 bp->flash_info->page_bits) +
2857 (offset % bp->flash_info->page_size);
2860 /* Need to clear DONE bit separately. */
2861 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2863 memcpy(&val32, val, 4);
2864 val32 = cpu_to_be32(val32);
2866 /* Write the data. */
2867 REG_WR(bp, BNX2_NVM_WRITE, val32);
2869 /* Address of the NVRAM to write to. */
2870 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2872 /* Issue the write command. */
2873 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2875 /* Wait for completion. */
2876 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2879 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2882 if (j >= NVRAM_TIMEOUT_COUNT)
2889 bnx2_init_nvram(struct bnx2 *bp)
2892 int j, entry_count, rc;
2893 struct flash_spec *flash;
2895 /* Determine the selected interface. */
2896 val = REG_RD(bp, BNX2_NVM_CFG1);
2898 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2901 if (val & 0x40000000) {
2903 /* Flash interface has been reconfigured */
2904 for (j = 0, flash = &flash_table[0]; j < entry_count;
2906 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2907 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2908 bp->flash_info = flash;
2915 /* Not yet been reconfigured */
2917 if (val & (1 << 23))
2918 mask = FLASH_BACKUP_STRAP_MASK;
2920 mask = FLASH_STRAP_MASK;
2922 for (j = 0, flash = &flash_table[0]; j < entry_count;
2925 if ((val & mask) == (flash->strapping & mask)) {
2926 bp->flash_info = flash;
2928 /* Request access to the flash interface. */
2929 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2932 /* Enable access to flash interface */
2933 bnx2_enable_nvram_access(bp);
2935 /* Reconfigure the flash interface */
2936 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2937 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2938 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2939 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2941 /* Disable access to flash interface */
2942 bnx2_disable_nvram_access(bp);
2943 bnx2_release_nvram_lock(bp);
2948 } /* if (val & 0x40000000) */
2950 if (j == entry_count) {
2951 bp->flash_info = NULL;
2952 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
2956 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2957 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2959 bp->flash_size = val;
2961 bp->flash_size = bp->flash_info->total_size;
2967 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2971 u32 cmd_flags, offset32, len32, extra;
2976 /* Request access to the flash interface. */
2977 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2980 /* Enable access to flash interface */
2981 bnx2_enable_nvram_access(bp);
2994 pre_len = 4 - (offset & 3);
2996 if (pre_len >= len32) {
2998 cmd_flags = BNX2_NVM_COMMAND_FIRST |
2999 BNX2_NVM_COMMAND_LAST;
3002 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3005 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3010 memcpy(ret_buf, buf + (offset & 3), pre_len);
3017 extra = 4 - (len32 & 3);
3018 len32 = (len32 + 4) & ~3;
3025 cmd_flags = BNX2_NVM_COMMAND_LAST;
3027 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3028 BNX2_NVM_COMMAND_LAST;
3030 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3032 memcpy(ret_buf, buf, 4 - extra);
3034 else if (len32 > 0) {
3037 /* Read the first word. */
3041 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3043 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3045 /* Advance to the next dword. */
3050 while (len32 > 4 && rc == 0) {
3051 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3053 /* Advance to the next dword. */
3062 cmd_flags = BNX2_NVM_COMMAND_LAST;
3063 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3065 memcpy(ret_buf, buf, 4 - extra);
3068 /* Disable access to flash interface */
3069 bnx2_disable_nvram_access(bp);
3071 bnx2_release_nvram_lock(bp);
3077 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3080 u32 written, offset32, len32;
3081 u8 *buf, start[4], end[4], *flash_buffer = NULL;
3083 int align_start, align_end;
3088 align_start = align_end = 0;
3090 if ((align_start = (offset32 & 3))) {
3092 len32 += align_start;
3093 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3098 if ((len32 > 4) || !align_start) {
3099 align_end = 4 - (len32 & 3);
3101 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
3108 if (align_start || align_end) {
3109 buf = kmalloc(len32, GFP_KERNEL);
3113 memcpy(buf, start, 4);
3116 memcpy(buf + len32 - 4, end, 4);
3118 memcpy(buf + align_start, data_buf, buf_size);
3121 if (bp->flash_info->buffered == 0) {
3122 flash_buffer = kmalloc(264, GFP_KERNEL);
3123 if (flash_buffer == NULL) {
3125 goto nvram_write_end;
3130 while ((written < len32) && (rc == 0)) {
3131 u32 page_start, page_end, data_start, data_end;
3132 u32 addr, cmd_flags;
3135 /* Find the page_start addr */
3136 page_start = offset32 + written;
3137 page_start -= (page_start % bp->flash_info->page_size);
3138 /* Find the page_end addr */
3139 page_end = page_start + bp->flash_info->page_size;
3140 /* Find the data_start addr */
3141 data_start = (written == 0) ? offset32 : page_start;
3142 /* Find the data_end addr */
3143 data_end = (page_end > offset32 + len32) ?
3144 (offset32 + len32) : page_end;
3146 /* Request access to the flash interface. */
3147 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3148 goto nvram_write_end;
3150 /* Enable access to flash interface */
3151 bnx2_enable_nvram_access(bp);
3153 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3154 if (bp->flash_info->buffered == 0) {
3157 /* Read the whole page into the buffer
3158 * (non-buffer flash only) */
3159 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3160 if (j == (bp->flash_info->page_size - 4)) {
3161 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3163 rc = bnx2_nvram_read_dword(bp,
3169 goto nvram_write_end;
3175 /* Enable writes to flash interface (unlock write-protect) */
3176 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3177 goto nvram_write_end;
3179 /* Erase the page */
3180 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3181 goto nvram_write_end;
3183 /* Re-enable the write again for the actual write */
3184 bnx2_enable_nvram_write(bp);
3186 /* Loop to write back the buffer data from page_start to
3189 if (bp->flash_info->buffered == 0) {
3190 for (addr = page_start; addr < data_start;
3191 addr += 4, i += 4) {
3193 rc = bnx2_nvram_write_dword(bp, addr,
3194 &flash_buffer[i], cmd_flags);
3197 goto nvram_write_end;
3203 /* Loop to write the new data from data_start to data_end */
3204 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3205 if ((addr == page_end - 4) ||
3206 ((bp->flash_info->buffered) &&
3207 (addr == data_end - 4))) {
3209 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3211 rc = bnx2_nvram_write_dword(bp, addr, buf,
3215 goto nvram_write_end;
3221 /* Loop to write back the buffer data from data_end
3223 if (bp->flash_info->buffered == 0) {
3224 for (addr = data_end; addr < page_end;
3225 addr += 4, i += 4) {
3227 if (addr == page_end-4) {
3228 cmd_flags = BNX2_NVM_COMMAND_LAST;
3230 rc = bnx2_nvram_write_dword(bp, addr,
3231 &flash_buffer[i], cmd_flags);
3234 goto nvram_write_end;
3240 /* Disable writes to flash interface (lock write-protect) */
3241 bnx2_disable_nvram_write(bp);
3243 /* Disable access to flash interface */
3244 bnx2_disable_nvram_access(bp);
3245 bnx2_release_nvram_lock(bp);
3247 /* Increment written */
3248 written += data_end - data_start;
3252 if (bp->flash_info->buffered == 0)
3253 kfree(flash_buffer);
3255 if (align_start || align_end)
3261 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3266 /* Wait for the current PCI transaction to complete before
3267 * issuing a reset. */
3268 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3269 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3270 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3271 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3272 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3273 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3276 /* Wait for the firmware to tell us it is ok to issue a reset. */
3277 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3279 /* Deposit a driver reset signature so the firmware knows that
3280 * this is a soft reset. */
3281 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3282 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3284 /* Do a dummy read to force the chip to complete all current transaction
3285 * before we issue a reset. */
3286 val = REG_RD(bp, BNX2_MISC_ID);
3288 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3289 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3290 REG_RD(bp, BNX2_MISC_COMMAND);
3293 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3294 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3296 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3299 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3300 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3301 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3304 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3306 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3307 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3308 current->state = TASK_UNINTERRUPTIBLE;
3309 schedule_timeout(HZ / 50);
3312 /* Reset takes approximate 30 usec */
3313 for (i = 0; i < 10; i++) {
3314 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3315 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3316 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3321 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3322 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3323 printk(KERN_ERR PFX "Chip reset did not complete\n");
3328 /* Make sure byte swapping is properly configured. */
3329 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3330 if (val != 0x01020304) {
3331 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3335 /* Wait for the firmware to finish its initialization. */
3336 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3340 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3341 /* Adjust the voltage regular to two steps lower. The default
3342 * of this register is 0x0000000e. */
3343 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3345 /* Remove bad rbuf memory from the free pool. */
3346 rc = bnx2_alloc_bad_rbuf(bp);
3353 bnx2_init_chip(struct bnx2 *bp)
3358 /* Make sure the interrupt is not active. */
3359 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3361 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3362 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3364 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3366 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3367 DMA_READ_CHANS << 12 |
3368 DMA_WRITE_CHANS << 16;
3370 val |= (0x2 << 20) | (1 << 11);
3372 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3375 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3376 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3377 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3379 REG_WR(bp, BNX2_DMA_CONFIG, val);
3381 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3382 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3383 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3384 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3387 if (bp->flags & PCIX_FLAG) {
3390 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3392 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3393 val16 & ~PCI_X_CMD_ERO);
3396 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3397 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3398 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3399 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3401 /* Initialize context mapping and zero out the quick contexts. The
3402 * context block must have already been enabled. */
3403 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3404 bnx2_init_5709_context(bp);
3406 bnx2_init_context(bp);
3408 if ((rc = bnx2_init_cpus(bp)) != 0)
3411 bnx2_init_nvram(bp);
3413 bnx2_set_mac_addr(bp);
3415 val = REG_RD(bp, BNX2_MQ_CONFIG);
3416 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3417 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3418 REG_WR(bp, BNX2_MQ_CONFIG, val);
3420 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3421 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3422 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3424 val = (BCM_PAGE_BITS - 8) << 24;
3425 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3427 /* Configure page size. */
3428 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3429 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3430 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3431 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3433 val = bp->mac_addr[0] +
3434 (bp->mac_addr[1] << 8) +
3435 (bp->mac_addr[2] << 16) +
3437 (bp->mac_addr[4] << 8) +
3438 (bp->mac_addr[5] << 16);
3439 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3441 /* Program the MTU. Also include 4 bytes for CRC32. */
3442 val = bp->dev->mtu + ETH_HLEN + 4;
3443 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3444 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3445 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3447 bp->last_status_idx = 0;
3448 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3450 /* Set up how to generate a link change interrupt. */
3451 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3453 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3454 (u64) bp->status_blk_mapping & 0xffffffff);
3455 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3457 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3458 (u64) bp->stats_blk_mapping & 0xffffffff);
3459 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3460 (u64) bp->stats_blk_mapping >> 32);
3462 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3463 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3465 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3466 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3468 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3469 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3471 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3473 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3475 REG_WR(bp, BNX2_HC_COM_TICKS,
3476 (bp->com_ticks_int << 16) | bp->com_ticks);
3478 REG_WR(bp, BNX2_HC_CMD_TICKS,
3479 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3481 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3482 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3484 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3485 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3487 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3488 BNX2_HC_CONFIG_TX_TMR_MODE |
3489 BNX2_HC_CONFIG_COLLECT_STATS);
3492 /* Clear internal stats counters. */
3493 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3495 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3497 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3498 BNX2_PORT_FEATURE_ASF_ENABLED)
3499 bp->flags |= ASF_ENABLE_FLAG;
3501 /* Initialize the receive filter. */
3502 bnx2_set_rx_mode(bp->dev);
3504 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3507 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3508 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3512 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3518 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3520 u32 val, offset0, offset1, offset2, offset3;
3522 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3523 offset0 = BNX2_L2CTX_TYPE_XI;
3524 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3525 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3526 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3528 offset0 = BNX2_L2CTX_TYPE;
3529 offset1 = BNX2_L2CTX_CMD_TYPE;
3530 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3531 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3533 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3534 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3536 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3537 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3539 val = (u64) bp->tx_desc_mapping >> 32;
3540 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3542 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3543 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3547 bnx2_init_tx_ring(struct bnx2 *bp)
3552 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3554 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3556 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3557 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3562 bp->tx_prod_bseq = 0;
3565 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3566 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
3568 bnx2_init_tx_context(bp, cid);
3572 bnx2_init_rx_ring(struct bnx2 *bp)
3576 u16 prod, ring_prod;
3579 /* 8 for CRC and VLAN */
3580 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3582 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
3584 ring_prod = prod = bp->rx_prod = 0;
3587 bp->rx_prod_bseq = 0;
3589 for (i = 0; i < bp->rx_max_ring; i++) {
3592 rxbd = &bp->rx_desc_ring[i][0];
3593 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3594 rxbd->rx_bd_len = bp->rx_buf_use_size;
3595 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3597 if (i == (bp->rx_max_ring - 1))
3601 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3602 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3606 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3607 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3609 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3611 val = (u64) bp->rx_desc_mapping[0] >> 32;
3612 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3614 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3615 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3617 for (i = 0; i < bp->rx_ring_size; i++) {
3618 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3621 prod = NEXT_RX_BD(prod);
3622 ring_prod = RX_RING_IDX(prod);
3626 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3628 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3632 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3636 bp->rx_ring_size = size;
3638 while (size > MAX_RX_DESC_CNT) {
3639 size -= MAX_RX_DESC_CNT;
3642 /* round to next power of 2 */
3644 while ((max & num_rings) == 0)
3647 if (num_rings != max)
3650 bp->rx_max_ring = max;
3651 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3655 bnx2_free_tx_skbs(struct bnx2 *bp)
3659 if (bp->tx_buf_ring == NULL)
3662 for (i = 0; i < TX_DESC_CNT; ) {
3663 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3664 struct sk_buff *skb = tx_buf->skb;
3672 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3673 skb_headlen(skb), PCI_DMA_TODEVICE);
3677 last = skb_shinfo(skb)->nr_frags;
3678 for (j = 0; j < last; j++) {
3679 tx_buf = &bp->tx_buf_ring[i + j + 1];
3680 pci_unmap_page(bp->pdev,
3681 pci_unmap_addr(tx_buf, mapping),
3682 skb_shinfo(skb)->frags[j].size,
3692 bnx2_free_rx_skbs(struct bnx2 *bp)
3696 if (bp->rx_buf_ring == NULL)
3699 for (i = 0; i < bp->rx_max_ring_idx; i++) {
3700 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3701 struct sk_buff *skb = rx_buf->skb;
3706 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3707 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3716 bnx2_free_skbs(struct bnx2 *bp)
3718 bnx2_free_tx_skbs(bp);
3719 bnx2_free_rx_skbs(bp);
3723 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3727 rc = bnx2_reset_chip(bp, reset_code);
3732 if ((rc = bnx2_init_chip(bp)) != 0)
3735 bnx2_init_tx_ring(bp);
3736 bnx2_init_rx_ring(bp);
3741 bnx2_init_nic(struct bnx2 *bp)
3745 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3748 spin_lock_bh(&bp->phy_lock);
3750 spin_unlock_bh(&bp->phy_lock);
3756 bnx2_test_registers(struct bnx2 *bp)
3760 static const struct {
3766 { 0x006c, 0, 0x00000000, 0x0000003f },
3767 { 0x0090, 0, 0xffffffff, 0x00000000 },
3768 { 0x0094, 0, 0x00000000, 0x00000000 },
3770 { 0x0404, 0, 0x00003f00, 0x00000000 },
3771 { 0x0418, 0, 0x00000000, 0xffffffff },
3772 { 0x041c, 0, 0x00000000, 0xffffffff },
3773 { 0x0420, 0, 0x00000000, 0x80ffffff },
3774 { 0x0424, 0, 0x00000000, 0x00000000 },
3775 { 0x0428, 0, 0x00000000, 0x00000001 },
3776 { 0x0450, 0, 0x00000000, 0x0000ffff },
3777 { 0x0454, 0, 0x00000000, 0xffffffff },
3778 { 0x0458, 0, 0x00000000, 0xffffffff },
3780 { 0x0808, 0, 0x00000000, 0xffffffff },
3781 { 0x0854, 0, 0x00000000, 0xffffffff },
3782 { 0x0868, 0, 0x00000000, 0x77777777 },
3783 { 0x086c, 0, 0x00000000, 0x77777777 },
3784 { 0x0870, 0, 0x00000000, 0x77777777 },
3785 { 0x0874, 0, 0x00000000, 0x77777777 },
3787 { 0x0c00, 0, 0x00000000, 0x00000001 },
3788 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3789 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
3791 { 0x1000, 0, 0x00000000, 0x00000001 },
3792 { 0x1004, 0, 0x00000000, 0x000f0001 },
3794 { 0x1408, 0, 0x01c00800, 0x00000000 },
3795 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3796 { 0x14a8, 0, 0x00000000, 0x000001ff },
3797 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3798 { 0x14b0, 0, 0x00000002, 0x00000001 },
3799 { 0x14b8, 0, 0x00000000, 0x00000000 },
3800 { 0x14c0, 0, 0x00000000, 0x00000009 },
3801 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3802 { 0x14cc, 0, 0x00000000, 0x00000001 },
3803 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3805 { 0x1800, 0, 0x00000000, 0x00000001 },
3806 { 0x1804, 0, 0x00000000, 0x00000003 },
3808 { 0x2800, 0, 0x00000000, 0x00000001 },
3809 { 0x2804, 0, 0x00000000, 0x00003f01 },
3810 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3811 { 0x2810, 0, 0xffff0000, 0x00000000 },
3812 { 0x2814, 0, 0xffff0000, 0x00000000 },
3813 { 0x2818, 0, 0xffff0000, 0x00000000 },
3814 { 0x281c, 0, 0xffff0000, 0x00000000 },
3815 { 0x2834, 0, 0xffffffff, 0x00000000 },
3816 { 0x2840, 0, 0x00000000, 0xffffffff },
3817 { 0x2844, 0, 0x00000000, 0xffffffff },
3818 { 0x2848, 0, 0xffffffff, 0x00000000 },
3819 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3821 { 0x2c00, 0, 0x00000000, 0x00000011 },
3822 { 0x2c04, 0, 0x00000000, 0x00030007 },
3824 { 0x3c00, 0, 0x00000000, 0x00000001 },
3825 { 0x3c04, 0, 0x00000000, 0x00070000 },
3826 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3827 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3828 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3829 { 0x3c14, 0, 0x00000000, 0xffffffff },
3830 { 0x3c18, 0, 0x00000000, 0xffffffff },
3831 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3832 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3834 { 0x5004, 0, 0x00000000, 0x0000007f },
3835 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3836 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3838 { 0x5c00, 0, 0x00000000, 0x00000001 },
3839 { 0x5c04, 0, 0x00000000, 0x0003000f },
3840 { 0x5c08, 0, 0x00000003, 0x00000000 },
3841 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3842 { 0x5c10, 0, 0x00000000, 0xffffffff },
3843 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3844 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3845 { 0x5c88, 0, 0x00000000, 0x00077373 },
3846 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3848 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3849 { 0x680c, 0, 0xffffffff, 0x00000000 },
3850 { 0x6810, 0, 0xffffffff, 0x00000000 },
3851 { 0x6814, 0, 0xffffffff, 0x00000000 },
3852 { 0x6818, 0, 0xffffffff, 0x00000000 },
3853 { 0x681c, 0, 0xffffffff, 0x00000000 },
3854 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3855 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3856 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3857 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3858 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3859 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3860 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3861 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3862 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3863 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3864 { 0x684c, 0, 0xffffffff, 0x00000000 },
3865 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3866 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3867 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3868 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3869 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3870 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3872 { 0xffff, 0, 0x00000000, 0x00000000 },
3876 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3877 u32 offset, rw_mask, ro_mask, save_val, val;
3879 offset = (u32) reg_tbl[i].offset;
3880 rw_mask = reg_tbl[i].rw_mask;
3881 ro_mask = reg_tbl[i].ro_mask;
3883 save_val = readl(bp->regview + offset);
3885 writel(0, bp->regview + offset);
3887 val = readl(bp->regview + offset);
3888 if ((val & rw_mask) != 0) {
3892 if ((val & ro_mask) != (save_val & ro_mask)) {
3896 writel(0xffffffff, bp->regview + offset);
3898 val = readl(bp->regview + offset);
3899 if ((val & rw_mask) != rw_mask) {
3903 if ((val & ro_mask) != (save_val & ro_mask)) {
3907 writel(save_val, bp->regview + offset);
3911 writel(save_val, bp->regview + offset);
3919 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3921 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
3922 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3925 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3928 for (offset = 0; offset < size; offset += 4) {
3930 REG_WR_IND(bp, start + offset, test_pattern[i]);
3932 if (REG_RD_IND(bp, start + offset) !=
3942 bnx2_test_memory(struct bnx2 *bp)
3946 static const struct {
3950 { 0x60000, 0x4000 },
3951 { 0xa0000, 0x3000 },
3952 { 0xe0000, 0x4000 },
3953 { 0x120000, 0x4000 },
3954 { 0x1a0000, 0x4000 },
3955 { 0x160000, 0x4000 },
3959 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3960 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3961 mem_tbl[i].len)) != 0) {
3969 #define BNX2_MAC_LOOPBACK 0
3970 #define BNX2_PHY_LOOPBACK 1
3973 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
3975 unsigned int pkt_size, num_pkts, i;
3976 struct sk_buff *skb, *rx_skb;
3977 unsigned char *packet;
3978 u16 rx_start_idx, rx_idx;
3981 struct sw_bd *rx_buf;
3982 struct l2_fhdr *rx_hdr;
3985 if (loopback_mode == BNX2_MAC_LOOPBACK) {
3986 bp->loopback = MAC_LOOPBACK;
3987 bnx2_set_mac_loopback(bp);
3989 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
3990 bp->loopback = PHY_LOOPBACK;
3991 bnx2_set_phy_loopback(bp);
3997 skb = netdev_alloc_skb(bp->dev, pkt_size);
4000 packet = skb_put(skb, pkt_size);
4001 memcpy(packet, bp->mac_addr, 6);
4002 memset(packet + 6, 0x0, 8);
4003 for (i = 14; i < pkt_size; i++)
4004 packet[i] = (unsigned char) (i & 0xff);
4006 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4009 REG_WR(bp, BNX2_HC_COMMAND,
4010 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4012 REG_RD(bp, BNX2_HC_COMMAND);
4015 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4019 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4021 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4022 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4023 txbd->tx_bd_mss_nbytes = pkt_size;
4024 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4027 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4028 bp->tx_prod_bseq += pkt_size;
4030 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4031 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4035 REG_WR(bp, BNX2_HC_COMMAND,
4036 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4038 REG_RD(bp, BNX2_HC_COMMAND);
4042 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4045 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4046 goto loopback_test_done;
4049 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4050 if (rx_idx != rx_start_idx + num_pkts) {
4051 goto loopback_test_done;
4054 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4055 rx_skb = rx_buf->skb;
4057 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4058 skb_reserve(rx_skb, bp->rx_offset);
4060 pci_dma_sync_single_for_cpu(bp->pdev,
4061 pci_unmap_addr(rx_buf, mapping),
4062 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4064 if (rx_hdr->l2_fhdr_status &
4065 (L2_FHDR_ERRORS_BAD_CRC |
4066 L2_FHDR_ERRORS_PHY_DECODE |
4067 L2_FHDR_ERRORS_ALIGNMENT |
4068 L2_FHDR_ERRORS_TOO_SHORT |
4069 L2_FHDR_ERRORS_GIANT_FRAME)) {
4071 goto loopback_test_done;
4074 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4075 goto loopback_test_done;
4078 for (i = 14; i < pkt_size; i++) {
4079 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4080 goto loopback_test_done;
4091 #define BNX2_MAC_LOOPBACK_FAILED 1
4092 #define BNX2_PHY_LOOPBACK_FAILED 2
4093 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4094 BNX2_PHY_LOOPBACK_FAILED)
4097 bnx2_test_loopback(struct bnx2 *bp)
4101 if (!netif_running(bp->dev))
4102 return BNX2_LOOPBACK_FAILED;
4104 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4105 spin_lock_bh(&bp->phy_lock);
4107 spin_unlock_bh(&bp->phy_lock);
4108 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4109 rc |= BNX2_MAC_LOOPBACK_FAILED;
4110 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4111 rc |= BNX2_PHY_LOOPBACK_FAILED;
4115 #define NVRAM_SIZE 0x200
4116 #define CRC32_RESIDUAL 0xdebb20e3
4119 bnx2_test_nvram(struct bnx2 *bp)
4121 u32 buf[NVRAM_SIZE / 4];
4122 u8 *data = (u8 *) buf;
4126 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4127 goto test_nvram_done;
4129 magic = be32_to_cpu(buf[0]);
4130 if (magic != 0x669955aa) {
4132 goto test_nvram_done;
4135 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4136 goto test_nvram_done;
4138 csum = ether_crc_le(0x100, data);
4139 if (csum != CRC32_RESIDUAL) {
4141 goto test_nvram_done;
4144 csum = ether_crc_le(0x100, data + 0x100);
4145 if (csum != CRC32_RESIDUAL) {
4154 bnx2_test_link(struct bnx2 *bp)
4158 spin_lock_bh(&bp->phy_lock);
4159 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4160 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4161 spin_unlock_bh(&bp->phy_lock);
4163 if (bmsr & BMSR_LSTATUS) {
4170 bnx2_test_intr(struct bnx2 *bp)
4175 if (!netif_running(bp->dev))
4178 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4180 /* This register is not touched during run-time. */
4181 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4182 REG_RD(bp, BNX2_HC_COMMAND);
4184 for (i = 0; i < 10; i++) {
4185 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4191 msleep_interruptible(10);
4200 bnx2_5706_serdes_timer(struct bnx2 *bp)
4202 spin_lock(&bp->phy_lock);
4203 if (bp->serdes_an_pending)
4204 bp->serdes_an_pending--;
4205 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4208 bp->current_interval = bp->timer_interval;
4210 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4212 if (bmcr & BMCR_ANENABLE) {
4215 bnx2_write_phy(bp, 0x1c, 0x7c00);
4216 bnx2_read_phy(bp, 0x1c, &phy1);
4218 bnx2_write_phy(bp, 0x17, 0x0f01);
4219 bnx2_read_phy(bp, 0x15, &phy2);
4220 bnx2_write_phy(bp, 0x17, 0x0f01);
4221 bnx2_read_phy(bp, 0x15, &phy2);
4223 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4224 !(phy2 & 0x20)) { /* no CONFIG */
4226 bmcr &= ~BMCR_ANENABLE;
4227 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4228 bnx2_write_phy(bp, MII_BMCR, bmcr);
4229 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4233 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4234 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4237 bnx2_write_phy(bp, 0x17, 0x0f01);
4238 bnx2_read_phy(bp, 0x15, &phy2);
4242 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4243 bmcr |= BMCR_ANENABLE;
4244 bnx2_write_phy(bp, MII_BMCR, bmcr);
4246 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4249 bp->current_interval = bp->timer_interval;
4251 spin_unlock(&bp->phy_lock);
4255 bnx2_5708_serdes_timer(struct bnx2 *bp)
4257 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4258 bp->serdes_an_pending = 0;
4262 spin_lock(&bp->phy_lock);
4263 if (bp->serdes_an_pending)
4264 bp->serdes_an_pending--;
4265 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4268 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4270 if (bmcr & BMCR_ANENABLE) {
4271 bmcr &= ~BMCR_ANENABLE;
4272 bmcr |= BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500;
4273 bnx2_write_phy(bp, MII_BMCR, bmcr);
4274 bp->current_interval = SERDES_FORCED_TIMEOUT;
4276 bmcr &= ~(BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500);
4277 bmcr |= BMCR_ANENABLE;
4278 bnx2_write_phy(bp, MII_BMCR, bmcr);
4279 bp->serdes_an_pending = 2;
4280 bp->current_interval = bp->timer_interval;
4284 bp->current_interval = bp->timer_interval;
4286 spin_unlock(&bp->phy_lock);
4290 bnx2_timer(unsigned long data)
4292 struct bnx2 *bp = (struct bnx2 *) data;
4295 if (!netif_running(bp->dev))
4298 if (atomic_read(&bp->intr_sem) != 0)
4299 goto bnx2_restart_timer;
4301 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4302 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4304 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4306 if (bp->phy_flags & PHY_SERDES_FLAG) {
4307 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4308 bnx2_5706_serdes_timer(bp);
4309 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
4310 bnx2_5708_serdes_timer(bp);
4314 mod_timer(&bp->timer, jiffies + bp->current_interval);
4317 /* Called with rtnl_lock */
4319 bnx2_open(struct net_device *dev)
4321 struct bnx2 *bp = netdev_priv(dev);
4324 bnx2_set_power_state(bp, PCI_D0);
4325 bnx2_disable_int(bp);
4327 rc = bnx2_alloc_mem(bp);
4331 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4332 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4335 if (pci_enable_msi(bp->pdev) == 0) {
4336 bp->flags |= USING_MSI_FLAG;
4337 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4341 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4342 IRQF_SHARED, dev->name, dev);
4346 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
4354 rc = bnx2_init_nic(bp);
4357 free_irq(bp->pdev->irq, dev);
4358 if (bp->flags & USING_MSI_FLAG) {
4359 pci_disable_msi(bp->pdev);
4360 bp->flags &= ~USING_MSI_FLAG;
4367 mod_timer(&bp->timer, jiffies + bp->current_interval);
4369 atomic_set(&bp->intr_sem, 0);
4371 bnx2_enable_int(bp);
4373 if (bp->flags & USING_MSI_FLAG) {
4374 /* Test MSI to make sure it is working
4375 * If MSI test fails, go back to INTx mode
4377 if (bnx2_test_intr(bp) != 0) {
4378 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4379 " using MSI, switching to INTx mode. Please"
4380 " report this failure to the PCI maintainer"
4381 " and include system chipset information.\n",
4384 bnx2_disable_int(bp);
4385 free_irq(bp->pdev->irq, dev);
4386 pci_disable_msi(bp->pdev);
4387 bp->flags &= ~USING_MSI_FLAG;
4389 rc = bnx2_init_nic(bp);
4392 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4393 IRQF_SHARED, dev->name, dev);
4398 del_timer_sync(&bp->timer);
4401 bnx2_enable_int(bp);
4404 if (bp->flags & USING_MSI_FLAG) {
4405 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4408 netif_start_queue(dev);
4414 bnx2_reset_task(struct work_struct *work)
4416 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
4418 if (!netif_running(bp->dev))
4421 bp->in_reset_task = 1;
4422 bnx2_netif_stop(bp);
4426 atomic_set(&bp->intr_sem, 1);
4427 bnx2_netif_start(bp);
4428 bp->in_reset_task = 0;
4432 bnx2_tx_timeout(struct net_device *dev)
4434 struct bnx2 *bp = netdev_priv(dev);
4436 /* This allows the netif to be shutdown gracefully before resetting */
4437 schedule_work(&bp->reset_task);
4441 /* Called with rtnl_lock */
4443 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4445 struct bnx2 *bp = netdev_priv(dev);
4447 bnx2_netif_stop(bp);
4450 bnx2_set_rx_mode(dev);
4452 bnx2_netif_start(bp);
4455 /* Called with rtnl_lock */
4457 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4459 struct bnx2 *bp = netdev_priv(dev);
4461 bnx2_netif_stop(bp);
4464 bp->vlgrp->vlan_devices[vid] = NULL;
4465 bnx2_set_rx_mode(dev);
4467 bnx2_netif_start(bp);
4471 /* Called with netif_tx_lock.
4472 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4473 * netif_wake_queue().
4476 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4478 struct bnx2 *bp = netdev_priv(dev);
4481 struct sw_bd *tx_buf;
4482 u32 len, vlan_tag_flags, last_frag, mss;
4483 u16 prod, ring_prod;
4486 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4487 netif_stop_queue(dev);
4488 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4491 return NETDEV_TX_BUSY;
4493 len = skb_headlen(skb);
4495 ring_prod = TX_RING_IDX(prod);
4498 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4499 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4502 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4504 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4507 if ((mss = skb_shinfo(skb)->gso_size) &&
4508 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4509 u32 tcp_opt_len, ip_tcp_len;
4511 if (skb_header_cloned(skb) &&
4512 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4514 return NETDEV_TX_OK;
4517 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4518 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4521 if (skb->h.th->doff > 5) {
4522 tcp_opt_len = (skb->h.th->doff - 5) << 2;
4524 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4526 skb->nh.iph->check = 0;
4527 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4529 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4533 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4534 vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4535 (tcp_opt_len >> 2)) << 8;
4544 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4546 tx_buf = &bp->tx_buf_ring[ring_prod];
4548 pci_unmap_addr_set(tx_buf, mapping, mapping);
4550 txbd = &bp->tx_desc_ring[ring_prod];
4552 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4553 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4554 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4555 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4557 last_frag = skb_shinfo(skb)->nr_frags;
4559 for (i = 0; i < last_frag; i++) {
4560 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4562 prod = NEXT_TX_BD(prod);
4563 ring_prod = TX_RING_IDX(prod);
4564 txbd = &bp->tx_desc_ring[ring_prod];
4567 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4568 len, PCI_DMA_TODEVICE);
4569 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4572 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4573 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4574 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4575 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4578 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4580 prod = NEXT_TX_BD(prod);
4581 bp->tx_prod_bseq += skb->len;
4583 REG_WR16(bp, bp->tx_bidx_addr, prod);
4584 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4589 dev->trans_start = jiffies;
4591 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4592 netif_stop_queue(dev);
4593 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
4594 netif_wake_queue(dev);
4597 return NETDEV_TX_OK;
4600 /* Called with rtnl_lock */
4602 bnx2_close(struct net_device *dev)
4604 struct bnx2 *bp = netdev_priv(dev);
4607 /* Calling flush_scheduled_work() may deadlock because
4608 * linkwatch_event() may be on the workqueue and it will try to get
4609 * the rtnl_lock which we are holding.
4611 while (bp->in_reset_task)
4614 bnx2_netif_stop(bp);
4615 del_timer_sync(&bp->timer);
4616 if (bp->flags & NO_WOL_FLAG)
4617 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
4619 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4621 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4622 bnx2_reset_chip(bp, reset_code);
4623 free_irq(bp->pdev->irq, dev);
4624 if (bp->flags & USING_MSI_FLAG) {
4625 pci_disable_msi(bp->pdev);
4626 bp->flags &= ~USING_MSI_FLAG;
4631 netif_carrier_off(bp->dev);
4632 bnx2_set_power_state(bp, PCI_D3hot);
4636 #define GET_NET_STATS64(ctr) \
4637 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4638 (unsigned long) (ctr##_lo)
4640 #define GET_NET_STATS32(ctr) \
4643 #if (BITS_PER_LONG == 64)
4644 #define GET_NET_STATS GET_NET_STATS64
4646 #define GET_NET_STATS GET_NET_STATS32
4649 static struct net_device_stats *
4650 bnx2_get_stats(struct net_device *dev)
4652 struct bnx2 *bp = netdev_priv(dev);
4653 struct statistics_block *stats_blk = bp->stats_blk;
4654 struct net_device_stats *net_stats = &bp->net_stats;
4656 if (bp->stats_blk == NULL) {
4659 net_stats->rx_packets =
4660 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4661 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4662 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4664 net_stats->tx_packets =
4665 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4666 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4667 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4669 net_stats->rx_bytes =
4670 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4672 net_stats->tx_bytes =
4673 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4675 net_stats->multicast =
4676 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4678 net_stats->collisions =
4679 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4681 net_stats->rx_length_errors =
4682 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4683 stats_blk->stat_EtherStatsOverrsizePkts);
4685 net_stats->rx_over_errors =
4686 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4688 net_stats->rx_frame_errors =
4689 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4691 net_stats->rx_crc_errors =
4692 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4694 net_stats->rx_errors = net_stats->rx_length_errors +
4695 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4696 net_stats->rx_crc_errors;
4698 net_stats->tx_aborted_errors =
4699 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4700 stats_blk->stat_Dot3StatsLateCollisions);
4702 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4703 (CHIP_ID(bp) == CHIP_ID_5708_A0))
4704 net_stats->tx_carrier_errors = 0;
4706 net_stats->tx_carrier_errors =
4708 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4711 net_stats->tx_errors =
4713 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4715 net_stats->tx_aborted_errors +
4716 net_stats->tx_carrier_errors;
4718 net_stats->rx_missed_errors =
4719 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4720 stats_blk->stat_FwRxDrop);
4725 /* All ethtool functions called with rtnl_lock */
4728 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4730 struct bnx2 *bp = netdev_priv(dev);
4732 cmd->supported = SUPPORTED_Autoneg;
4733 if (bp->phy_flags & PHY_SERDES_FLAG) {
4734 cmd->supported |= SUPPORTED_1000baseT_Full |
4737 cmd->port = PORT_FIBRE;
4740 cmd->supported |= SUPPORTED_10baseT_Half |
4741 SUPPORTED_10baseT_Full |
4742 SUPPORTED_100baseT_Half |
4743 SUPPORTED_100baseT_Full |
4744 SUPPORTED_1000baseT_Full |
4747 cmd->port = PORT_TP;
4750 cmd->advertising = bp->advertising;
4752 if (bp->autoneg & AUTONEG_SPEED) {
4753 cmd->autoneg = AUTONEG_ENABLE;
4756 cmd->autoneg = AUTONEG_DISABLE;
4759 if (netif_carrier_ok(dev)) {
4760 cmd->speed = bp->line_speed;
4761 cmd->duplex = bp->duplex;
4768 cmd->transceiver = XCVR_INTERNAL;
4769 cmd->phy_address = bp->phy_addr;
4775 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4777 struct bnx2 *bp = netdev_priv(dev);
4778 u8 autoneg = bp->autoneg;
4779 u8 req_duplex = bp->req_duplex;
4780 u16 req_line_speed = bp->req_line_speed;
4781 u32 advertising = bp->advertising;
4783 if (cmd->autoneg == AUTONEG_ENABLE) {
4784 autoneg |= AUTONEG_SPEED;
4786 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
4788 /* allow advertising 1 speed */
4789 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4790 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4791 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4792 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4794 if (bp->phy_flags & PHY_SERDES_FLAG)
4797 advertising = cmd->advertising;
4800 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4801 advertising = cmd->advertising;
4803 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4807 if (bp->phy_flags & PHY_SERDES_FLAG) {
4808 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4811 advertising = ETHTOOL_ALL_COPPER_SPEED;
4814 advertising |= ADVERTISED_Autoneg;
4817 if (bp->phy_flags & PHY_SERDES_FLAG) {
4818 if ((cmd->speed != SPEED_1000 &&
4819 cmd->speed != SPEED_2500) ||
4820 (cmd->duplex != DUPLEX_FULL))
4823 if (cmd->speed == SPEED_2500 &&
4824 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
4827 else if (cmd->speed == SPEED_1000) {
4830 autoneg &= ~AUTONEG_SPEED;
4831 req_line_speed = cmd->speed;
4832 req_duplex = cmd->duplex;
4836 bp->autoneg = autoneg;
4837 bp->advertising = advertising;
4838 bp->req_line_speed = req_line_speed;
4839 bp->req_duplex = req_duplex;
4841 spin_lock_bh(&bp->phy_lock);
4845 spin_unlock_bh(&bp->phy_lock);
4851 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4853 struct bnx2 *bp = netdev_priv(dev);
4855 strcpy(info->driver, DRV_MODULE_NAME);
4856 strcpy(info->version, DRV_MODULE_VERSION);
4857 strcpy(info->bus_info, pci_name(bp->pdev));
4858 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4859 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4860 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
4861 info->fw_version[1] = info->fw_version[3] = '.';
4862 info->fw_version[5] = 0;
4865 #define BNX2_REGDUMP_LEN (32 * 1024)
4868 bnx2_get_regs_len(struct net_device *dev)
4870 return BNX2_REGDUMP_LEN;
4874 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4876 u32 *p = _p, i, offset;
4878 struct bnx2 *bp = netdev_priv(dev);
4879 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4880 0x0800, 0x0880, 0x0c00, 0x0c10,
4881 0x0c30, 0x0d08, 0x1000, 0x101c,
4882 0x1040, 0x1048, 0x1080, 0x10a4,
4883 0x1400, 0x1490, 0x1498, 0x14f0,
4884 0x1500, 0x155c, 0x1580, 0x15dc,
4885 0x1600, 0x1658, 0x1680, 0x16d8,
4886 0x1800, 0x1820, 0x1840, 0x1854,
4887 0x1880, 0x1894, 0x1900, 0x1984,
4888 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4889 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4890 0x2000, 0x2030, 0x23c0, 0x2400,
4891 0x2800, 0x2820, 0x2830, 0x2850,
4892 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4893 0x3c00, 0x3c94, 0x4000, 0x4010,
4894 0x4080, 0x4090, 0x43c0, 0x4458,
4895 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4896 0x4fc0, 0x5010, 0x53c0, 0x5444,
4897 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4898 0x5fc0, 0x6000, 0x6400, 0x6428,
4899 0x6800, 0x6848, 0x684c, 0x6860,
4900 0x6888, 0x6910, 0x8000 };
4904 memset(p, 0, BNX2_REGDUMP_LEN);
4906 if (!netif_running(bp->dev))
4910 offset = reg_boundaries[0];
4912 while (offset < BNX2_REGDUMP_LEN) {
4913 *p++ = REG_RD(bp, offset);
4915 if (offset == reg_boundaries[i + 1]) {
4916 offset = reg_boundaries[i + 2];
4917 p = (u32 *) (orig_p + offset);
4924 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4926 struct bnx2 *bp = netdev_priv(dev);
4928 if (bp->flags & NO_WOL_FLAG) {
4933 wol->supported = WAKE_MAGIC;
4935 wol->wolopts = WAKE_MAGIC;
4939 memset(&wol->sopass, 0, sizeof(wol->sopass));
4943 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4945 struct bnx2 *bp = netdev_priv(dev);
4947 if (wol->wolopts & ~WAKE_MAGIC)
4950 if (wol->wolopts & WAKE_MAGIC) {
4951 if (bp->flags & NO_WOL_FLAG)
4963 bnx2_nway_reset(struct net_device *dev)
4965 struct bnx2 *bp = netdev_priv(dev);
4968 if (!(bp->autoneg & AUTONEG_SPEED)) {
4972 spin_lock_bh(&bp->phy_lock);
4974 /* Force a link down visible on the other side */
4975 if (bp->phy_flags & PHY_SERDES_FLAG) {
4976 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
4977 spin_unlock_bh(&bp->phy_lock);
4981 spin_lock_bh(&bp->phy_lock);
4983 bp->current_interval = SERDES_AN_TIMEOUT;
4984 bp->serdes_an_pending = 1;
4985 mod_timer(&bp->timer, jiffies + bp->current_interval);
4988 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4989 bmcr &= ~BMCR_LOOPBACK;
4990 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4992 spin_unlock_bh(&bp->phy_lock);
4998 bnx2_get_eeprom_len(struct net_device *dev)
5000 struct bnx2 *bp = netdev_priv(dev);
5002 if (bp->flash_info == NULL)
5005 return (int) bp->flash_size;
5009 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5012 struct bnx2 *bp = netdev_priv(dev);
5015 /* parameters already validated in ethtool_get_eeprom */
5017 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5023 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5026 struct bnx2 *bp = netdev_priv(dev);
5029 /* parameters already validated in ethtool_set_eeprom */
5031 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5037 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5039 struct bnx2 *bp = netdev_priv(dev);
5041 memset(coal, 0, sizeof(struct ethtool_coalesce));
5043 coal->rx_coalesce_usecs = bp->rx_ticks;
5044 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5045 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5046 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5048 coal->tx_coalesce_usecs = bp->tx_ticks;
5049 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5050 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5051 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5053 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5059 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5061 struct bnx2 *bp = netdev_priv(dev);
5063 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5064 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5066 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5067 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5069 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5070 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5072 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5073 if (bp->rx_quick_cons_trip_int > 0xff)
5074 bp->rx_quick_cons_trip_int = 0xff;
5076 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5077 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5079 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5080 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5082 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5083 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5085 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5086 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5089 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5090 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5091 bp->stats_ticks &= 0xffff00;
5093 if (netif_running(bp->dev)) {
5094 bnx2_netif_stop(bp);
5096 bnx2_netif_start(bp);
5103 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5105 struct bnx2 *bp = netdev_priv(dev);
5107 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5108 ering->rx_mini_max_pending = 0;
5109 ering->rx_jumbo_max_pending = 0;
5111 ering->rx_pending = bp->rx_ring_size;
5112 ering->rx_mini_pending = 0;
5113 ering->rx_jumbo_pending = 0;
5115 ering->tx_max_pending = MAX_TX_DESC_CNT;
5116 ering->tx_pending = bp->tx_ring_size;
5120 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5122 struct bnx2 *bp = netdev_priv(dev);
5124 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5125 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5126 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5130 if (netif_running(bp->dev)) {
5131 bnx2_netif_stop(bp);
5132 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5137 bnx2_set_rx_ring_size(bp, ering->rx_pending);
5138 bp->tx_ring_size = ering->tx_pending;
5140 if (netif_running(bp->dev)) {
5143 rc = bnx2_alloc_mem(bp);
5147 bnx2_netif_start(bp);
5154 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5156 struct bnx2 *bp = netdev_priv(dev);
5158 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5159 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5160 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5164 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5166 struct bnx2 *bp = netdev_priv(dev);
5168 bp->req_flow_ctrl = 0;
5169 if (epause->rx_pause)
5170 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5171 if (epause->tx_pause)
5172 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5174 if (epause->autoneg) {
5175 bp->autoneg |= AUTONEG_FLOW_CTRL;
5178 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5181 spin_lock_bh(&bp->phy_lock);
5185 spin_unlock_bh(&bp->phy_lock);
5191 bnx2_get_rx_csum(struct net_device *dev)
5193 struct bnx2 *bp = netdev_priv(dev);
5199 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5201 struct bnx2 *bp = netdev_priv(dev);
5208 bnx2_set_tso(struct net_device *dev, u32 data)
5211 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5213 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
5217 #define BNX2_NUM_STATS 46
5220 char string[ETH_GSTRING_LEN];
5221 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5223 { "rx_error_bytes" },
5225 { "tx_error_bytes" },
5226 { "rx_ucast_packets" },
5227 { "rx_mcast_packets" },
5228 { "rx_bcast_packets" },
5229 { "tx_ucast_packets" },
5230 { "tx_mcast_packets" },
5231 { "tx_bcast_packets" },
5232 { "tx_mac_errors" },
5233 { "tx_carrier_errors" },
5234 { "rx_crc_errors" },
5235 { "rx_align_errors" },
5236 { "tx_single_collisions" },
5237 { "tx_multi_collisions" },
5239 { "tx_excess_collisions" },
5240 { "tx_late_collisions" },
5241 { "tx_total_collisions" },
5244 { "rx_undersize_packets" },
5245 { "rx_oversize_packets" },
5246 { "rx_64_byte_packets" },
5247 { "rx_65_to_127_byte_packets" },
5248 { "rx_128_to_255_byte_packets" },
5249 { "rx_256_to_511_byte_packets" },
5250 { "rx_512_to_1023_byte_packets" },
5251 { "rx_1024_to_1522_byte_packets" },
5252 { "rx_1523_to_9022_byte_packets" },
5253 { "tx_64_byte_packets" },
5254 { "tx_65_to_127_byte_packets" },
5255 { "tx_128_to_255_byte_packets" },
5256 { "tx_256_to_511_byte_packets" },
5257 { "tx_512_to_1023_byte_packets" },
5258 { "tx_1024_to_1522_byte_packets" },
5259 { "tx_1523_to_9022_byte_packets" },
5260 { "rx_xon_frames" },
5261 { "rx_xoff_frames" },
5262 { "tx_xon_frames" },
5263 { "tx_xoff_frames" },
5264 { "rx_mac_ctrl_frames" },
5265 { "rx_filtered_packets" },
5267 { "rx_fw_discards" },
5270 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5272 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5273 STATS_OFFSET32(stat_IfHCInOctets_hi),
5274 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5275 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5276 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5277 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5278 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5279 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5280 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5281 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5282 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5283 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5284 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5285 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5286 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5287 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5288 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5289 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5290 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5291 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5292 STATS_OFFSET32(stat_EtherStatsCollisions),
5293 STATS_OFFSET32(stat_EtherStatsFragments),
5294 STATS_OFFSET32(stat_EtherStatsJabbers),
5295 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5296 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5297 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5298 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5299 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5300 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5301 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5302 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5303 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5304 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5305 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5306 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5307 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5308 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5309 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5310 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5311 STATS_OFFSET32(stat_XonPauseFramesReceived),
5312 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5313 STATS_OFFSET32(stat_OutXonSent),
5314 STATS_OFFSET32(stat_OutXoffSent),
5315 STATS_OFFSET32(stat_MacControlFramesReceived),
5316 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5317 STATS_OFFSET32(stat_IfInMBUFDiscards),
5318 STATS_OFFSET32(stat_FwRxDrop),
5321 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5322 * skipped because of errata.
5324 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5325 8,0,8,8,8,8,8,8,8,8,
5326 4,0,4,4,4,4,4,4,4,4,
5327 4,4,4,4,4,4,4,4,4,4,
5328 4,4,4,4,4,4,4,4,4,4,
5332 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5333 8,0,8,8,8,8,8,8,8,8,
5334 4,4,4,4,4,4,4,4,4,4,
5335 4,4,4,4,4,4,4,4,4,4,
5336 4,4,4,4,4,4,4,4,4,4,
5340 #define BNX2_NUM_TESTS 6
5343 char string[ETH_GSTRING_LEN];
5344 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5345 { "register_test (offline)" },
5346 { "memory_test (offline)" },
5347 { "loopback_test (offline)" },
5348 { "nvram_test (online)" },
5349 { "interrupt_test (online)" },
5350 { "link_test (online)" },
5354 bnx2_self_test_count(struct net_device *dev)
5356 return BNX2_NUM_TESTS;
5360 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5362 struct bnx2 *bp = netdev_priv(dev);
5364 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5365 if (etest->flags & ETH_TEST_FL_OFFLINE) {
5368 bnx2_netif_stop(bp);
5369 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5372 if (bnx2_test_registers(bp) != 0) {
5374 etest->flags |= ETH_TEST_FL_FAILED;
5376 if (bnx2_test_memory(bp) != 0) {
5378 etest->flags |= ETH_TEST_FL_FAILED;
5380 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5381 etest->flags |= ETH_TEST_FL_FAILED;
5383 if (!netif_running(bp->dev)) {
5384 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5388 bnx2_netif_start(bp);
5391 /* wait for link up */
5392 for (i = 0; i < 7; i++) {
5395 msleep_interruptible(1000);
5399 if (bnx2_test_nvram(bp) != 0) {
5401 etest->flags |= ETH_TEST_FL_FAILED;
5403 if (bnx2_test_intr(bp) != 0) {
5405 etest->flags |= ETH_TEST_FL_FAILED;
5408 if (bnx2_test_link(bp) != 0) {
5410 etest->flags |= ETH_TEST_FL_FAILED;
5416 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5418 switch (stringset) {
5420 memcpy(buf, bnx2_stats_str_arr,
5421 sizeof(bnx2_stats_str_arr));
5424 memcpy(buf, bnx2_tests_str_arr,
5425 sizeof(bnx2_tests_str_arr));
5431 bnx2_get_stats_count(struct net_device *dev)
5433 return BNX2_NUM_STATS;
5437 bnx2_get_ethtool_stats(struct net_device *dev,
5438 struct ethtool_stats *stats, u64 *buf)
5440 struct bnx2 *bp = netdev_priv(dev);
5442 u32 *hw_stats = (u32 *) bp->stats_blk;
5443 u8 *stats_len_arr = NULL;
5445 if (hw_stats == NULL) {
5446 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5450 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5451 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5452 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5453 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5454 stats_len_arr = bnx2_5706_stats_len_arr;
5456 stats_len_arr = bnx2_5708_stats_len_arr;
5458 for (i = 0; i < BNX2_NUM_STATS; i++) {
5459 if (stats_len_arr[i] == 0) {
5460 /* skip this counter */
5464 if (stats_len_arr[i] == 4) {
5465 /* 4-byte counter */
5467 *(hw_stats + bnx2_stats_offset_arr[i]);
5470 /* 8-byte counter */
5471 buf[i] = (((u64) *(hw_stats +
5472 bnx2_stats_offset_arr[i])) << 32) +
5473 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5478 bnx2_phys_id(struct net_device *dev, u32 data)
5480 struct bnx2 *bp = netdev_priv(dev);
5487 save = REG_RD(bp, BNX2_MISC_CFG);
5488 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5490 for (i = 0; i < (data * 2); i++) {
5492 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5495 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5496 BNX2_EMAC_LED_1000MB_OVERRIDE |
5497 BNX2_EMAC_LED_100MB_OVERRIDE |
5498 BNX2_EMAC_LED_10MB_OVERRIDE |
5499 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5500 BNX2_EMAC_LED_TRAFFIC);
5502 msleep_interruptible(500);
5503 if (signal_pending(current))
5506 REG_WR(bp, BNX2_EMAC_LED, 0);
5507 REG_WR(bp, BNX2_MISC_CFG, save);
5511 static const struct ethtool_ops bnx2_ethtool_ops = {
5512 .get_settings = bnx2_get_settings,
5513 .set_settings = bnx2_set_settings,
5514 .get_drvinfo = bnx2_get_drvinfo,
5515 .get_regs_len = bnx2_get_regs_len,
5516 .get_regs = bnx2_get_regs,
5517 .get_wol = bnx2_get_wol,
5518 .set_wol = bnx2_set_wol,
5519 .nway_reset = bnx2_nway_reset,
5520 .get_link = ethtool_op_get_link,
5521 .get_eeprom_len = bnx2_get_eeprom_len,
5522 .get_eeprom = bnx2_get_eeprom,
5523 .set_eeprom = bnx2_set_eeprom,
5524 .get_coalesce = bnx2_get_coalesce,
5525 .set_coalesce = bnx2_set_coalesce,
5526 .get_ringparam = bnx2_get_ringparam,
5527 .set_ringparam = bnx2_set_ringparam,
5528 .get_pauseparam = bnx2_get_pauseparam,
5529 .set_pauseparam = bnx2_set_pauseparam,
5530 .get_rx_csum = bnx2_get_rx_csum,
5531 .set_rx_csum = bnx2_set_rx_csum,
5532 .get_tx_csum = ethtool_op_get_tx_csum,
5533 .set_tx_csum = ethtool_op_set_tx_csum,
5534 .get_sg = ethtool_op_get_sg,
5535 .set_sg = ethtool_op_set_sg,
5537 .get_tso = ethtool_op_get_tso,
5538 .set_tso = bnx2_set_tso,
5540 .self_test_count = bnx2_self_test_count,
5541 .self_test = bnx2_self_test,
5542 .get_strings = bnx2_get_strings,
5543 .phys_id = bnx2_phys_id,
5544 .get_stats_count = bnx2_get_stats_count,
5545 .get_ethtool_stats = bnx2_get_ethtool_stats,
5546 .get_perm_addr = ethtool_op_get_perm_addr,
5549 /* Called with rtnl_lock */
5551 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5553 struct mii_ioctl_data *data = if_mii(ifr);
5554 struct bnx2 *bp = netdev_priv(dev);
5559 data->phy_id = bp->phy_addr;
5565 spin_lock_bh(&bp->phy_lock);
5566 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5567 spin_unlock_bh(&bp->phy_lock);
5569 data->val_out = mii_regval;
5575 if (!capable(CAP_NET_ADMIN))
5578 spin_lock_bh(&bp->phy_lock);
5579 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5580 spin_unlock_bh(&bp->phy_lock);
5591 /* Called with rtnl_lock */
5593 bnx2_change_mac_addr(struct net_device *dev, void *p)
5595 struct sockaddr *addr = p;
5596 struct bnx2 *bp = netdev_priv(dev);
5598 if (!is_valid_ether_addr(addr->sa_data))
5601 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5602 if (netif_running(dev))
5603 bnx2_set_mac_addr(bp);
5608 /* Called with rtnl_lock */
5610 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5612 struct bnx2 *bp = netdev_priv(dev);
5614 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5615 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5619 if (netif_running(dev)) {
5620 bnx2_netif_stop(bp);
5624 bnx2_netif_start(bp);
5629 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5631 poll_bnx2(struct net_device *dev)
5633 struct bnx2 *bp = netdev_priv(dev);
5635 disable_irq(bp->pdev->irq);
5636 bnx2_interrupt(bp->pdev->irq, dev);
5637 enable_irq(bp->pdev->irq);
5641 static int __devinit
5642 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5645 unsigned long mem_len;
5649 SET_MODULE_OWNER(dev);
5650 SET_NETDEV_DEV(dev, &pdev->dev);
5651 bp = netdev_priv(dev);
5656 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5657 rc = pci_enable_device(pdev);
5659 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
5663 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5665 "Cannot find PCI device base address, aborting.\n");
5667 goto err_out_disable;
5670 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5672 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
5673 goto err_out_disable;
5676 pci_set_master(pdev);
5678 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5679 if (bp->pm_cap == 0) {
5681 "Cannot find power management capability, aborting.\n");
5683 goto err_out_release;
5686 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5687 bp->flags |= USING_DAC_FLAG;
5688 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
5690 "pci_set_consistent_dma_mask failed, aborting.\n");
5692 goto err_out_release;
5695 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
5696 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
5698 goto err_out_release;
5704 spin_lock_init(&bp->phy_lock);
5705 INIT_WORK(&bp->reset_task, bnx2_reset_task);
5707 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5708 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
5709 dev->mem_end = dev->mem_start + mem_len;
5710 dev->irq = pdev->irq;
5712 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5715 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
5717 goto err_out_release;
5720 /* Configure byte swap and enable write to the reg_window registers.
5721 * Rely on CPU to do target byte swapping on big endian systems
5722 * The chip's target access swapping will not swap all accesses
5724 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5725 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5726 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5728 bnx2_set_power_state(bp, PCI_D0);
5730 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5732 if (CHIP_NUM(bp) != CHIP_NUM_5709) {
5733 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5734 if (bp->pcix_cap == 0) {
5736 "Cannot find PCIX capability, aborting.\n");
5742 /* Get bus information. */
5743 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5744 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5747 bp->flags |= PCIX_FLAG;
5749 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5751 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5753 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5754 bp->bus_speed_mhz = 133;
5757 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5758 bp->bus_speed_mhz = 100;
5761 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5762 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5763 bp->bus_speed_mhz = 66;
5766 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5767 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5768 bp->bus_speed_mhz = 50;
5771 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5772 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5773 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5774 bp->bus_speed_mhz = 33;
5779 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5780 bp->bus_speed_mhz = 66;
5782 bp->bus_speed_mhz = 33;
5785 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5786 bp->flags |= PCI_32BIT_FLAG;
5788 /* 5706A0 may falsely detect SERR and PERR. */
5789 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5790 reg = REG_RD(bp, PCI_COMMAND);
5791 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5792 REG_WR(bp, PCI_COMMAND, reg);
5794 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5795 !(bp->flags & PCIX_FLAG)) {
5798 "5706 A1 can only be used in a PCIX bus, aborting.\n");
5802 bnx2_init_nvram(bp);
5804 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5806 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5807 BNX2_SHM_HDR_SIGNATURE_SIG)
5808 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5810 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5812 /* Get the permanent MAC address. First we need to make sure the
5813 * firmware is actually running.
5815 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
5817 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5818 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5819 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
5824 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
5826 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
5827 bp->mac_addr[0] = (u8) (reg >> 8);
5828 bp->mac_addr[1] = (u8) reg;
5830 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
5831 bp->mac_addr[2] = (u8) (reg >> 24);
5832 bp->mac_addr[3] = (u8) (reg >> 16);
5833 bp->mac_addr[4] = (u8) (reg >> 8);
5834 bp->mac_addr[5] = (u8) reg;
5836 bp->tx_ring_size = MAX_TX_DESC_CNT;
5837 bnx2_set_rx_ring_size(bp, 255);
5841 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5843 bp->tx_quick_cons_trip_int = 20;
5844 bp->tx_quick_cons_trip = 20;
5845 bp->tx_ticks_int = 80;
5848 bp->rx_quick_cons_trip_int = 6;
5849 bp->rx_quick_cons_trip = 6;
5850 bp->rx_ticks_int = 18;
5853 bp->stats_ticks = 1000000 & 0xffff00;
5855 bp->timer_interval = HZ;
5856 bp->current_interval = HZ;
5860 /* Disable WOL support if we are running on a SERDES chip. */
5861 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5862 if (CHIP_BOND_ID(bp) != BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
5863 bp->phy_flags |= PHY_SERDES_FLAG;
5864 } else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
5865 bp->phy_flags |= PHY_SERDES_FLAG;
5867 if (bp->phy_flags & PHY_SERDES_FLAG) {
5868 bp->flags |= NO_WOL_FLAG;
5869 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
5871 reg = REG_RD_IND(bp, bp->shmem_base +
5872 BNX2_SHARED_HW_CFG_CONFIG);
5873 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5874 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5878 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
5879 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5880 (CHIP_ID(bp) == CHIP_ID_5708_B1))
5881 bp->flags |= NO_WOL_FLAG;
5883 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5884 bp->tx_quick_cons_trip_int =
5885 bp->tx_quick_cons_trip;
5886 bp->tx_ticks_int = bp->tx_ticks;
5887 bp->rx_quick_cons_trip_int =
5888 bp->rx_quick_cons_trip;
5889 bp->rx_ticks_int = bp->rx_ticks;
5890 bp->comp_prod_trip_int = bp->comp_prod_trip;
5891 bp->com_ticks_int = bp->com_ticks;
5892 bp->cmd_ticks_int = bp->cmd_ticks;
5895 /* Disable MSI on 5706 if AMD 8132 bridge is found.
5897 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
5898 * with byte enables disabled on the unused 32-bit word. This is legal
5899 * but causes problems on the AMD 8132 which will eventually stop
5900 * responding after a while.
5902 * AMD believes this incompatibility is unique to the 5706, and
5903 * prefers to locally disable MSI rather than globally disabling it
5904 * using pci_msi_quirk.
5906 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
5907 struct pci_dev *amd_8132 = NULL;
5909 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
5910 PCI_DEVICE_ID_AMD_8132_BRIDGE,
5914 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
5915 if (rev >= 0x10 && rev <= 0x13) {
5917 pci_dev_put(amd_8132);
5923 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5924 bp->req_line_speed = 0;
5925 if (bp->phy_flags & PHY_SERDES_FLAG) {
5926 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
5928 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
5929 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5930 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5932 bp->req_line_speed = bp->line_speed = SPEED_1000;
5933 bp->req_duplex = DUPLEX_FULL;
5937 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5940 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5942 init_timer(&bp->timer);
5943 bp->timer.expires = RUN_AT(bp->timer_interval);
5944 bp->timer.data = (unsigned long) bp;
5945 bp->timer.function = bnx2_timer;
5951 iounmap(bp->regview);
5956 pci_release_regions(pdev);
5959 pci_disable_device(pdev);
5960 pci_set_drvdata(pdev, NULL);
5966 static int __devinit
5967 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5969 static int version_printed = 0;
5970 struct net_device *dev = NULL;
5974 if (version_printed++ == 0)
5975 printk(KERN_INFO "%s", version);
5977 /* dev zeroed in init_etherdev */
5978 dev = alloc_etherdev(sizeof(*bp));
5983 rc = bnx2_init_board(pdev, dev);
5989 dev->open = bnx2_open;
5990 dev->hard_start_xmit = bnx2_start_xmit;
5991 dev->stop = bnx2_close;
5992 dev->get_stats = bnx2_get_stats;
5993 dev->set_multicast_list = bnx2_set_rx_mode;
5994 dev->do_ioctl = bnx2_ioctl;
5995 dev->set_mac_address = bnx2_change_mac_addr;
5996 dev->change_mtu = bnx2_change_mtu;
5997 dev->tx_timeout = bnx2_tx_timeout;
5998 dev->watchdog_timeo = TX_TIMEOUT;
6000 dev->vlan_rx_register = bnx2_vlan_rx_register;
6001 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
6003 dev->poll = bnx2_poll;
6004 dev->ethtool_ops = &bnx2_ethtool_ops;
6007 bp = netdev_priv(dev);
6009 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6010 dev->poll_controller = poll_bnx2;
6013 if ((rc = register_netdev(dev))) {
6014 dev_err(&pdev->dev, "Cannot register net device\n");
6016 iounmap(bp->regview);
6017 pci_release_regions(pdev);
6018 pci_disable_device(pdev);
6019 pci_set_drvdata(pdev, NULL);
6024 pci_set_drvdata(pdev, dev);
6026 memcpy(dev->dev_addr, bp->mac_addr, 6);
6027 memcpy(dev->perm_addr, bp->mac_addr, 6);
6028 bp->name = board_info[ent->driver_data].name,
6029 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
6033 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6034 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6035 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
6036 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
6041 printk("node addr ");
6042 for (i = 0; i < 6; i++)
6043 printk("%2.2x", dev->dev_addr[i]);
6046 dev->features |= NETIF_F_SG;
6047 if (bp->flags & USING_DAC_FLAG)
6048 dev->features |= NETIF_F_HIGHDMA;
6049 dev->features |= NETIF_F_IP_CSUM;
6051 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6054 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6057 netif_carrier_off(bp->dev);
6062 static void __devexit
6063 bnx2_remove_one(struct pci_dev *pdev)
6065 struct net_device *dev = pci_get_drvdata(pdev);
6066 struct bnx2 *bp = netdev_priv(dev);
6068 flush_scheduled_work();
6070 unregister_netdev(dev);
6073 iounmap(bp->regview);
6076 pci_release_regions(pdev);
6077 pci_disable_device(pdev);
6078 pci_set_drvdata(pdev, NULL);
6082 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6084 struct net_device *dev = pci_get_drvdata(pdev);
6085 struct bnx2 *bp = netdev_priv(dev);
6088 if (!netif_running(dev))
6091 flush_scheduled_work();
6092 bnx2_netif_stop(bp);
6093 netif_device_detach(dev);
6094 del_timer_sync(&bp->timer);
6095 if (bp->flags & NO_WOL_FLAG)
6096 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6098 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6100 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6101 bnx2_reset_chip(bp, reset_code);
6103 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6108 bnx2_resume(struct pci_dev *pdev)
6110 struct net_device *dev = pci_get_drvdata(pdev);
6111 struct bnx2 *bp = netdev_priv(dev);
6113 if (!netif_running(dev))
6116 bnx2_set_power_state(bp, PCI_D0);
6117 netif_device_attach(dev);
6119 bnx2_netif_start(bp);
6123 static struct pci_driver bnx2_pci_driver = {
6124 .name = DRV_MODULE_NAME,
6125 .id_table = bnx2_pci_tbl,
6126 .probe = bnx2_init_one,
6127 .remove = __devexit_p(bnx2_remove_one),
6128 .suspend = bnx2_suspend,
6129 .resume = bnx2_resume,
6132 static int __init bnx2_init(void)
6134 return pci_register_driver(&bnx2_pci_driver);
6137 static void __exit bnx2_cleanup(void)
6139 pci_unregister_driver(&bnx2_pci_driver);
6142 module_init(bnx2_init);
6143 module_exit(bnx2_cleanup);