1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/if_vlan.h>
39 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
52 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
59 #define DRV_MODULE_NAME "bnx2"
60 #define PFX DRV_MODULE_NAME ": "
61 #define DRV_MODULE_VERSION "2.0.1"
62 #define DRV_MODULE_RELDATE "May 6, 2009"
63 #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-4.6.16.fw"
64 #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-4.6.16.fw"
65 #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-4.6.17.fw"
66 #define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-4.6.15.fw"
68 #define RUN_AT(x) (jiffies + (x))
70 /* Time in jiffies before concluding the transmitter is hung. */
71 #define TX_TIMEOUT (5*HZ)
73 static char version[] __devinitdata =
74 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
80 MODULE_FIRMWARE(FW_MIPS_FILE_06);
81 MODULE_FIRMWARE(FW_RV2P_FILE_06);
82 MODULE_FIRMWARE(FW_MIPS_FILE_09);
83 MODULE_FIRMWARE(FW_RV2P_FILE_09);
85 static int disable_msi = 0;
87 module_param(disable_msi, int, 0);
88 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
104 /* indexed by board_t, above */
107 } board_info[] __devinitdata = {
108 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
109 { "HP NC370T Multifunction Gigabit Server Adapter" },
110 { "HP NC370i Multifunction Gigabit Server Adapter" },
111 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
112 { "HP NC370F Multifunction Gigabit Server Adapter" },
113 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
114 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
115 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
116 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
117 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
118 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
121 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
123 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
126 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
128 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
130 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
131 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
132 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
134 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
135 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
136 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
137 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
138 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
139 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
140 { PCI_VENDOR_ID_BROADCOM, 0x163b,
141 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
142 { PCI_VENDOR_ID_BROADCOM, 0x163c,
143 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
147 static struct flash_spec flash_table[] =
149 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
150 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
152 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
153 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
154 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
156 /* Expansion entry 0001 */
157 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
158 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
159 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
161 /* Saifun SA25F010 (non-buffered flash) */
162 /* strap, cfg1, & write1 need updates */
163 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
164 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
165 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
166 "Non-buffered flash (128kB)"},
167 /* Saifun SA25F020 (non-buffered flash) */
168 /* strap, cfg1, & write1 need updates */
169 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
170 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
171 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
172 "Non-buffered flash (256kB)"},
173 /* Expansion entry 0100 */
174 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
175 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
176 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
178 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
179 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
180 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
181 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
182 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
183 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
184 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
185 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
186 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
187 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
188 /* Saifun SA25F005 (non-buffered flash) */
189 /* strap, cfg1, & write1 need updates */
190 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
191 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
192 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
193 "Non-buffered flash (64kB)"},
195 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
196 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
197 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
199 /* Expansion entry 1001 */
200 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
201 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
202 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
204 /* Expansion entry 1010 */
205 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
206 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
207 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
209 /* ATMEL AT45DB011B (buffered flash) */
210 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
211 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
212 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
213 "Buffered flash (128kB)"},
214 /* Expansion entry 1100 */
215 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
216 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
217 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
219 /* Expansion entry 1101 */
220 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
221 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
222 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
224 /* Ateml Expansion entry 1110 */
225 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
226 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
227 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
228 "Entry 1110 (Atmel)"},
229 /* ATMEL AT45DB021B (buffered flash) */
230 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
231 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
232 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
233 "Buffered flash (256kB)"},
236 static struct flash_spec flash_5709 = {
237 .flags = BNX2_NV_BUFFERED,
238 .page_bits = BCM5709_FLASH_PAGE_BITS,
239 .page_size = BCM5709_FLASH_PAGE_SIZE,
240 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
241 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
242 .name = "5709 Buffered flash (256kB)",
245 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
247 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
253 /* The ring uses 256 indices for 255 entries, one of them
254 * needs to be skipped.
256 diff = txr->tx_prod - txr->tx_cons;
257 if (unlikely(diff >= TX_DESC_CNT)) {
259 if (diff == TX_DESC_CNT)
260 diff = MAX_TX_DESC_CNT;
262 return (bp->tx_ring_size - diff);
266 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
270 spin_lock_bh(&bp->indirect_lock);
271 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
272 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
273 spin_unlock_bh(&bp->indirect_lock);
278 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
280 spin_lock_bh(&bp->indirect_lock);
281 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
282 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
283 spin_unlock_bh(&bp->indirect_lock);
287 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
289 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
293 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
295 return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
299 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
302 spin_lock_bh(&bp->indirect_lock);
303 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
306 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
307 REG_WR(bp, BNX2_CTX_CTX_CTRL,
308 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
309 for (i = 0; i < 5; i++) {
310 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
311 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
316 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
317 REG_WR(bp, BNX2_CTX_DATA, val);
319 spin_unlock_bh(&bp->indirect_lock);
324 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
326 struct bnx2 *bp = netdev_priv(dev);
327 struct drv_ctl_io *io = &info->data.io;
330 case DRV_CTL_IO_WR_CMD:
331 bnx2_reg_wr_ind(bp, io->offset, io->data);
333 case DRV_CTL_IO_RD_CMD:
334 io->data = bnx2_reg_rd_ind(bp, io->offset);
336 case DRV_CTL_CTX_WR_CMD:
337 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
345 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
347 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
348 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
351 if (bp->flags & BNX2_FLAG_USING_MSIX) {
352 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
353 bnapi->cnic_present = 0;
354 sb_id = bp->irq_nvecs;
355 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
357 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
358 bnapi->cnic_tag = bnapi->last_status_idx;
359 bnapi->cnic_present = 1;
361 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
364 cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
365 cp->irq_arr[0].status_blk = (void *)
366 ((unsigned long) bnapi->status_blk.msi +
367 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
368 cp->irq_arr[0].status_blk_num = sb_id;
372 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
375 struct bnx2 *bp = netdev_priv(dev);
376 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
381 if (cp->drv_state & CNIC_DRV_STATE_REGD)
384 bp->cnic_data = data;
385 rcu_assign_pointer(bp->cnic_ops, ops);
388 cp->drv_state = CNIC_DRV_STATE_REGD;
390 bnx2_setup_cnic_irq_info(bp);
395 static int bnx2_unregister_cnic(struct net_device *dev)
397 struct bnx2 *bp = netdev_priv(dev);
398 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
399 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
402 bnapi->cnic_present = 0;
403 rcu_assign_pointer(bp->cnic_ops, NULL);
408 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
410 struct bnx2 *bp = netdev_priv(dev);
411 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
413 cp->drv_owner = THIS_MODULE;
414 cp->chip_id = bp->chip_id;
416 cp->io_base = bp->regview;
417 cp->drv_ctl = bnx2_drv_ctl;
418 cp->drv_register_cnic = bnx2_register_cnic;
419 cp->drv_unregister_cnic = bnx2_unregister_cnic;
423 EXPORT_SYMBOL(bnx2_cnic_probe);
426 bnx2_cnic_stop(struct bnx2 *bp)
428 struct cnic_ops *c_ops;
429 struct cnic_ctl_info info;
432 c_ops = rcu_dereference(bp->cnic_ops);
434 info.cmd = CNIC_CTL_STOP_CMD;
435 c_ops->cnic_ctl(bp->cnic_data, &info);
441 bnx2_cnic_start(struct bnx2 *bp)
443 struct cnic_ops *c_ops;
444 struct cnic_ctl_info info;
447 c_ops = rcu_dereference(bp->cnic_ops);
449 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
450 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
452 bnapi->cnic_tag = bnapi->last_status_idx;
454 info.cmd = CNIC_CTL_START_CMD;
455 c_ops->cnic_ctl(bp->cnic_data, &info);
463 bnx2_cnic_stop(struct bnx2 *bp)
468 bnx2_cnic_start(struct bnx2 *bp)
475 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
480 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
481 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
482 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
484 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
485 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
490 val1 = (bp->phy_addr << 21) | (reg << 16) |
491 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
492 BNX2_EMAC_MDIO_COMM_START_BUSY;
493 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
495 for (i = 0; i < 50; i++) {
498 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
499 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
502 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
503 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
509 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
518 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
519 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
520 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
522 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
523 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
532 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
537 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
538 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
539 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
541 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
542 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
547 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
548 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
549 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
550 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
552 for (i = 0; i < 50; i++) {
555 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
556 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
562 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
567 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
568 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
569 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
571 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
572 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
581 bnx2_disable_int(struct bnx2 *bp)
584 struct bnx2_napi *bnapi;
586 for (i = 0; i < bp->irq_nvecs; i++) {
587 bnapi = &bp->bnx2_napi[i];
588 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
589 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
591 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
595 bnx2_enable_int(struct bnx2 *bp)
598 struct bnx2_napi *bnapi;
600 for (i = 0; i < bp->irq_nvecs; i++) {
601 bnapi = &bp->bnx2_napi[i];
603 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
604 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
605 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
606 bnapi->last_status_idx);
608 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
609 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
610 bnapi->last_status_idx);
612 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
616 bnx2_disable_int_sync(struct bnx2 *bp)
620 atomic_inc(&bp->intr_sem);
621 bnx2_disable_int(bp);
622 for (i = 0; i < bp->irq_nvecs; i++)
623 synchronize_irq(bp->irq_tbl[i].vector);
627 bnx2_napi_disable(struct bnx2 *bp)
631 for (i = 0; i < bp->irq_nvecs; i++)
632 napi_disable(&bp->bnx2_napi[i].napi);
636 bnx2_napi_enable(struct bnx2 *bp)
640 for (i = 0; i < bp->irq_nvecs; i++)
641 napi_enable(&bp->bnx2_napi[i].napi);
645 bnx2_netif_stop(struct bnx2 *bp)
648 bnx2_disable_int_sync(bp);
649 if (netif_running(bp->dev)) {
650 bnx2_napi_disable(bp);
651 netif_tx_disable(bp->dev);
652 bp->dev->trans_start = jiffies; /* prevent tx timeout */
657 bnx2_netif_start(struct bnx2 *bp)
659 if (atomic_dec_and_test(&bp->intr_sem)) {
660 if (netif_running(bp->dev)) {
661 netif_tx_wake_all_queues(bp->dev);
662 bnx2_napi_enable(bp);
670 bnx2_free_tx_mem(struct bnx2 *bp)
674 for (i = 0; i < bp->num_tx_rings; i++) {
675 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
676 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
678 if (txr->tx_desc_ring) {
679 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
681 txr->tx_desc_mapping);
682 txr->tx_desc_ring = NULL;
684 kfree(txr->tx_buf_ring);
685 txr->tx_buf_ring = NULL;
690 bnx2_free_rx_mem(struct bnx2 *bp)
694 for (i = 0; i < bp->num_rx_rings; i++) {
695 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
696 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
699 for (j = 0; j < bp->rx_max_ring; j++) {
700 if (rxr->rx_desc_ring[j])
701 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
702 rxr->rx_desc_ring[j],
703 rxr->rx_desc_mapping[j]);
704 rxr->rx_desc_ring[j] = NULL;
706 if (rxr->rx_buf_ring)
707 vfree(rxr->rx_buf_ring);
708 rxr->rx_buf_ring = NULL;
710 for (j = 0; j < bp->rx_max_pg_ring; j++) {
711 if (rxr->rx_pg_desc_ring[j])
712 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
713 rxr->rx_pg_desc_ring[j],
714 rxr->rx_pg_desc_mapping[j]);
715 rxr->rx_pg_desc_ring[j] = NULL;
718 vfree(rxr->rx_pg_ring);
719 rxr->rx_pg_ring = NULL;
724 bnx2_alloc_tx_mem(struct bnx2 *bp)
728 for (i = 0; i < bp->num_tx_rings; i++) {
729 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
730 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
732 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
733 if (txr->tx_buf_ring == NULL)
737 pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
738 &txr->tx_desc_mapping);
739 if (txr->tx_desc_ring == NULL)
746 bnx2_alloc_rx_mem(struct bnx2 *bp)
750 for (i = 0; i < bp->num_rx_rings; i++) {
751 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
752 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
756 vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
757 if (rxr->rx_buf_ring == NULL)
760 memset(rxr->rx_buf_ring, 0,
761 SW_RXBD_RING_SIZE * bp->rx_max_ring);
763 for (j = 0; j < bp->rx_max_ring; j++) {
764 rxr->rx_desc_ring[j] =
765 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
766 &rxr->rx_desc_mapping[j]);
767 if (rxr->rx_desc_ring[j] == NULL)
772 if (bp->rx_pg_ring_size) {
773 rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
775 if (rxr->rx_pg_ring == NULL)
778 memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
782 for (j = 0; j < bp->rx_max_pg_ring; j++) {
783 rxr->rx_pg_desc_ring[j] =
784 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
785 &rxr->rx_pg_desc_mapping[j]);
786 if (rxr->rx_pg_desc_ring[j] == NULL)
795 bnx2_free_mem(struct bnx2 *bp)
798 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
800 bnx2_free_tx_mem(bp);
801 bnx2_free_rx_mem(bp);
803 for (i = 0; i < bp->ctx_pages; i++) {
804 if (bp->ctx_blk[i]) {
805 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
807 bp->ctx_blk_mapping[i]);
808 bp->ctx_blk[i] = NULL;
811 if (bnapi->status_blk.msi) {
812 pci_free_consistent(bp->pdev, bp->status_stats_size,
813 bnapi->status_blk.msi,
814 bp->status_blk_mapping);
815 bnapi->status_blk.msi = NULL;
816 bp->stats_blk = NULL;
821 bnx2_alloc_mem(struct bnx2 *bp)
823 int i, status_blk_size, err;
824 struct bnx2_napi *bnapi;
827 /* Combine status and statistics blocks into one allocation. */
828 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
829 if (bp->flags & BNX2_FLAG_MSIX_CAP)
830 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
831 BNX2_SBLK_MSIX_ALIGN_SIZE);
832 bp->status_stats_size = status_blk_size +
833 sizeof(struct statistics_block);
835 status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
836 &bp->status_blk_mapping);
837 if (status_blk == NULL)
840 memset(status_blk, 0, bp->status_stats_size);
842 bnapi = &bp->bnx2_napi[0];
843 bnapi->status_blk.msi = status_blk;
844 bnapi->hw_tx_cons_ptr =
845 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
846 bnapi->hw_rx_cons_ptr =
847 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
848 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
849 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
850 struct status_block_msix *sblk;
852 bnapi = &bp->bnx2_napi[i];
854 sblk = (void *) (status_blk +
855 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
856 bnapi->status_blk.msix = sblk;
857 bnapi->hw_tx_cons_ptr =
858 &sblk->status_tx_quick_consumer_index;
859 bnapi->hw_rx_cons_ptr =
860 &sblk->status_rx_quick_consumer_index;
861 bnapi->int_num = i << 24;
865 bp->stats_blk = status_blk + status_blk_size;
867 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
869 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
870 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
871 if (bp->ctx_pages == 0)
873 for (i = 0; i < bp->ctx_pages; i++) {
874 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
876 &bp->ctx_blk_mapping[i]);
877 if (bp->ctx_blk[i] == NULL)
882 err = bnx2_alloc_rx_mem(bp);
886 err = bnx2_alloc_tx_mem(bp);
898 bnx2_report_fw_link(struct bnx2 *bp)
900 u32 fw_link_status = 0;
902 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
908 switch (bp->line_speed) {
910 if (bp->duplex == DUPLEX_HALF)
911 fw_link_status = BNX2_LINK_STATUS_10HALF;
913 fw_link_status = BNX2_LINK_STATUS_10FULL;
916 if (bp->duplex == DUPLEX_HALF)
917 fw_link_status = BNX2_LINK_STATUS_100HALF;
919 fw_link_status = BNX2_LINK_STATUS_100FULL;
922 if (bp->duplex == DUPLEX_HALF)
923 fw_link_status = BNX2_LINK_STATUS_1000HALF;
925 fw_link_status = BNX2_LINK_STATUS_1000FULL;
928 if (bp->duplex == DUPLEX_HALF)
929 fw_link_status = BNX2_LINK_STATUS_2500HALF;
931 fw_link_status = BNX2_LINK_STATUS_2500FULL;
935 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
938 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
940 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
941 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
943 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
944 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
945 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
947 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
951 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
953 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
957 bnx2_xceiver_str(struct bnx2 *bp)
959 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
960 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
965 bnx2_report_link(struct bnx2 *bp)
968 netif_carrier_on(bp->dev);
969 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
970 bnx2_xceiver_str(bp));
972 printk("%d Mbps ", bp->line_speed);
974 if (bp->duplex == DUPLEX_FULL)
975 printk("full duplex");
977 printk("half duplex");
980 if (bp->flow_ctrl & FLOW_CTRL_RX) {
981 printk(", receive ");
982 if (bp->flow_ctrl & FLOW_CTRL_TX)
983 printk("& transmit ");
986 printk(", transmit ");
988 printk("flow control ON");
993 netif_carrier_off(bp->dev);
994 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
995 bnx2_xceiver_str(bp));
998 bnx2_report_fw_link(bp);
1002 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1004 u32 local_adv, remote_adv;
1007 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1008 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1010 if (bp->duplex == DUPLEX_FULL) {
1011 bp->flow_ctrl = bp->req_flow_ctrl;
1016 if (bp->duplex != DUPLEX_FULL) {
1020 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1021 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1024 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1025 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1026 bp->flow_ctrl |= FLOW_CTRL_TX;
1027 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1028 bp->flow_ctrl |= FLOW_CTRL_RX;
1032 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1033 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1035 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1036 u32 new_local_adv = 0;
1037 u32 new_remote_adv = 0;
1039 if (local_adv & ADVERTISE_1000XPAUSE)
1040 new_local_adv |= ADVERTISE_PAUSE_CAP;
1041 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1042 new_local_adv |= ADVERTISE_PAUSE_ASYM;
1043 if (remote_adv & ADVERTISE_1000XPAUSE)
1044 new_remote_adv |= ADVERTISE_PAUSE_CAP;
1045 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1046 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1048 local_adv = new_local_adv;
1049 remote_adv = new_remote_adv;
1052 /* See Table 28B-3 of 802.3ab-1999 spec. */
1053 if (local_adv & ADVERTISE_PAUSE_CAP) {
1054 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1055 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1056 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1058 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1059 bp->flow_ctrl = FLOW_CTRL_RX;
1063 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1064 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1068 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1069 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1070 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1072 bp->flow_ctrl = FLOW_CTRL_TX;
1078 bnx2_5709s_linkup(struct bnx2 *bp)
1084 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1085 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1086 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1088 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1089 bp->line_speed = bp->req_line_speed;
1090 bp->duplex = bp->req_duplex;
1093 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1095 case MII_BNX2_GP_TOP_AN_SPEED_10:
1096 bp->line_speed = SPEED_10;
1098 case MII_BNX2_GP_TOP_AN_SPEED_100:
1099 bp->line_speed = SPEED_100;
1101 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1102 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1103 bp->line_speed = SPEED_1000;
1105 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1106 bp->line_speed = SPEED_2500;
1109 if (val & MII_BNX2_GP_TOP_AN_FD)
1110 bp->duplex = DUPLEX_FULL;
1112 bp->duplex = DUPLEX_HALF;
1117 bnx2_5708s_linkup(struct bnx2 *bp)
1122 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1123 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1124 case BCM5708S_1000X_STAT1_SPEED_10:
1125 bp->line_speed = SPEED_10;
1127 case BCM5708S_1000X_STAT1_SPEED_100:
1128 bp->line_speed = SPEED_100;
1130 case BCM5708S_1000X_STAT1_SPEED_1G:
1131 bp->line_speed = SPEED_1000;
1133 case BCM5708S_1000X_STAT1_SPEED_2G5:
1134 bp->line_speed = SPEED_2500;
1137 if (val & BCM5708S_1000X_STAT1_FD)
1138 bp->duplex = DUPLEX_FULL;
1140 bp->duplex = DUPLEX_HALF;
1146 bnx2_5706s_linkup(struct bnx2 *bp)
1148 u32 bmcr, local_adv, remote_adv, common;
1151 bp->line_speed = SPEED_1000;
1153 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1154 if (bmcr & BMCR_FULLDPLX) {
1155 bp->duplex = DUPLEX_FULL;
1158 bp->duplex = DUPLEX_HALF;
1161 if (!(bmcr & BMCR_ANENABLE)) {
1165 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1166 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1168 common = local_adv & remote_adv;
1169 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1171 if (common & ADVERTISE_1000XFULL) {
1172 bp->duplex = DUPLEX_FULL;
1175 bp->duplex = DUPLEX_HALF;
1183 bnx2_copper_linkup(struct bnx2 *bp)
1187 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1188 if (bmcr & BMCR_ANENABLE) {
1189 u32 local_adv, remote_adv, common;
1191 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1192 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1194 common = local_adv & (remote_adv >> 2);
1195 if (common & ADVERTISE_1000FULL) {
1196 bp->line_speed = SPEED_1000;
1197 bp->duplex = DUPLEX_FULL;
1199 else if (common & ADVERTISE_1000HALF) {
1200 bp->line_speed = SPEED_1000;
1201 bp->duplex = DUPLEX_HALF;
1204 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1205 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1207 common = local_adv & remote_adv;
1208 if (common & ADVERTISE_100FULL) {
1209 bp->line_speed = SPEED_100;
1210 bp->duplex = DUPLEX_FULL;
1212 else if (common & ADVERTISE_100HALF) {
1213 bp->line_speed = SPEED_100;
1214 bp->duplex = DUPLEX_HALF;
1216 else if (common & ADVERTISE_10FULL) {
1217 bp->line_speed = SPEED_10;
1218 bp->duplex = DUPLEX_FULL;
1220 else if (common & ADVERTISE_10HALF) {
1221 bp->line_speed = SPEED_10;
1222 bp->duplex = DUPLEX_HALF;
1231 if (bmcr & BMCR_SPEED100) {
1232 bp->line_speed = SPEED_100;
1235 bp->line_speed = SPEED_10;
1237 if (bmcr & BMCR_FULLDPLX) {
1238 bp->duplex = DUPLEX_FULL;
1241 bp->duplex = DUPLEX_HALF;
1249 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1251 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1253 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1254 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1257 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1258 u32 lo_water, hi_water;
1260 if (bp->flow_ctrl & FLOW_CTRL_TX)
1261 lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1263 lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1264 if (lo_water >= bp->rx_ring_size)
1267 hi_water = bp->rx_ring_size / 4;
1269 if (hi_water <= lo_water)
1272 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1273 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1277 else if (hi_water == 0)
1279 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1281 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1285 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1290 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1293 bnx2_init_rx_context(bp, cid);
1298 bnx2_set_mac_link(struct bnx2 *bp)
1302 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1303 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1304 (bp->duplex == DUPLEX_HALF)) {
1305 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1308 /* Configure the EMAC mode register. */
1309 val = REG_RD(bp, BNX2_EMAC_MODE);
1311 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1312 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1313 BNX2_EMAC_MODE_25G_MODE);
1316 switch (bp->line_speed) {
1318 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1319 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1324 val |= BNX2_EMAC_MODE_PORT_MII;
1327 val |= BNX2_EMAC_MODE_25G_MODE;
1330 val |= BNX2_EMAC_MODE_PORT_GMII;
1335 val |= BNX2_EMAC_MODE_PORT_GMII;
1338 /* Set the MAC to operate in the appropriate duplex mode. */
1339 if (bp->duplex == DUPLEX_HALF)
1340 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1341 REG_WR(bp, BNX2_EMAC_MODE, val);
1343 /* Enable/disable rx PAUSE. */
1344 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1346 if (bp->flow_ctrl & FLOW_CTRL_RX)
1347 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1348 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1350 /* Enable/disable tx PAUSE. */
1351 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1352 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1354 if (bp->flow_ctrl & FLOW_CTRL_TX)
1355 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1356 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1358 /* Acknowledge the interrupt. */
1359 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1361 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1362 bnx2_init_all_rx_contexts(bp);
1366 bnx2_enable_bmsr1(struct bnx2 *bp)
1368 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1369 (CHIP_NUM(bp) == CHIP_NUM_5709))
1370 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1371 MII_BNX2_BLK_ADDR_GP_STATUS);
1375 bnx2_disable_bmsr1(struct bnx2 *bp)
1377 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1378 (CHIP_NUM(bp) == CHIP_NUM_5709))
1379 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1380 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1384 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1389 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1392 if (bp->autoneg & AUTONEG_SPEED)
1393 bp->advertising |= ADVERTISED_2500baseX_Full;
1395 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1396 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1398 bnx2_read_phy(bp, bp->mii_up1, &up1);
1399 if (!(up1 & BCM5708S_UP1_2G5)) {
1400 up1 |= BCM5708S_UP1_2G5;
1401 bnx2_write_phy(bp, bp->mii_up1, up1);
1405 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1406 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1407 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1413 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1418 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1421 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1422 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1424 bnx2_read_phy(bp, bp->mii_up1, &up1);
1425 if (up1 & BCM5708S_UP1_2G5) {
1426 up1 &= ~BCM5708S_UP1_2G5;
1427 bnx2_write_phy(bp, bp->mii_up1, up1);
1431 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1432 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1433 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1439 bnx2_enable_forced_2g5(struct bnx2 *bp)
1443 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1446 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1449 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1450 MII_BNX2_BLK_ADDR_SERDES_DIG);
1451 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1452 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1453 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1454 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1456 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1457 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1458 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1460 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1461 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1462 bmcr |= BCM5708S_BMCR_FORCE_2500;
1465 if (bp->autoneg & AUTONEG_SPEED) {
1466 bmcr &= ~BMCR_ANENABLE;
1467 if (bp->req_duplex == DUPLEX_FULL)
1468 bmcr |= BMCR_FULLDPLX;
1470 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1474 bnx2_disable_forced_2g5(struct bnx2 *bp)
1478 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1481 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1484 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1485 MII_BNX2_BLK_ADDR_SERDES_DIG);
1486 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1487 val &= ~MII_BNX2_SD_MISC1_FORCE;
1488 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1490 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1491 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1492 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1494 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1495 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1496 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1499 if (bp->autoneg & AUTONEG_SPEED)
1500 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1501 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1505 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1509 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1510 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1512 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1514 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1518 bnx2_set_link(struct bnx2 *bp)
1523 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1528 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1531 link_up = bp->link_up;
1533 bnx2_enable_bmsr1(bp);
1534 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1535 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1536 bnx2_disable_bmsr1(bp);
1538 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1539 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1542 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1543 bnx2_5706s_force_link_dn(bp, 0);
1544 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1546 val = REG_RD(bp, BNX2_EMAC_STATUS);
1548 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1549 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1550 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1552 if ((val & BNX2_EMAC_STATUS_LINK) &&
1553 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1554 bmsr |= BMSR_LSTATUS;
1556 bmsr &= ~BMSR_LSTATUS;
1559 if (bmsr & BMSR_LSTATUS) {
1562 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1563 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1564 bnx2_5706s_linkup(bp);
1565 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1566 bnx2_5708s_linkup(bp);
1567 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1568 bnx2_5709s_linkup(bp);
1571 bnx2_copper_linkup(bp);
1573 bnx2_resolve_flow_ctrl(bp);
1576 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1577 (bp->autoneg & AUTONEG_SPEED))
1578 bnx2_disable_forced_2g5(bp);
1580 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1583 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1584 bmcr |= BMCR_ANENABLE;
1585 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1587 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1592 if (bp->link_up != link_up) {
1593 bnx2_report_link(bp);
1596 bnx2_set_mac_link(bp);
1602 bnx2_reset_phy(struct bnx2 *bp)
1607 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1609 #define PHY_RESET_MAX_WAIT 100
1610 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1613 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1614 if (!(reg & BMCR_RESET)) {
1619 if (i == PHY_RESET_MAX_WAIT) {
1626 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1630 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1631 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1633 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1634 adv = ADVERTISE_1000XPAUSE;
1637 adv = ADVERTISE_PAUSE_CAP;
1640 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1641 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1642 adv = ADVERTISE_1000XPSE_ASYM;
1645 adv = ADVERTISE_PAUSE_ASYM;
1648 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1649 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1650 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1653 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1659 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1662 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1663 __releases(&bp->phy_lock)
1664 __acquires(&bp->phy_lock)
1666 u32 speed_arg = 0, pause_adv;
1668 pause_adv = bnx2_phy_get_pause_adv(bp);
1670 if (bp->autoneg & AUTONEG_SPEED) {
1671 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1672 if (bp->advertising & ADVERTISED_10baseT_Half)
1673 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1674 if (bp->advertising & ADVERTISED_10baseT_Full)
1675 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1676 if (bp->advertising & ADVERTISED_100baseT_Half)
1677 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1678 if (bp->advertising & ADVERTISED_100baseT_Full)
1679 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1680 if (bp->advertising & ADVERTISED_1000baseT_Full)
1681 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1682 if (bp->advertising & ADVERTISED_2500baseX_Full)
1683 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1685 if (bp->req_line_speed == SPEED_2500)
1686 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1687 else if (bp->req_line_speed == SPEED_1000)
1688 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1689 else if (bp->req_line_speed == SPEED_100) {
1690 if (bp->req_duplex == DUPLEX_FULL)
1691 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1693 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1694 } else if (bp->req_line_speed == SPEED_10) {
1695 if (bp->req_duplex == DUPLEX_FULL)
1696 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1698 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1702 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1703 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1704 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1705 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1707 if (port == PORT_TP)
1708 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1709 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1711 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1713 spin_unlock_bh(&bp->phy_lock);
1714 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1715 spin_lock_bh(&bp->phy_lock);
1721 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1722 __releases(&bp->phy_lock)
1723 __acquires(&bp->phy_lock)
1728 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1729 return (bnx2_setup_remote_phy(bp, port));
1731 if (!(bp->autoneg & AUTONEG_SPEED)) {
1733 int force_link_down = 0;
1735 if (bp->req_line_speed == SPEED_2500) {
1736 if (!bnx2_test_and_enable_2g5(bp))
1737 force_link_down = 1;
1738 } else if (bp->req_line_speed == SPEED_1000) {
1739 if (bnx2_test_and_disable_2g5(bp))
1740 force_link_down = 1;
1742 bnx2_read_phy(bp, bp->mii_adv, &adv);
1743 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1745 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1746 new_bmcr = bmcr & ~BMCR_ANENABLE;
1747 new_bmcr |= BMCR_SPEED1000;
1749 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1750 if (bp->req_line_speed == SPEED_2500)
1751 bnx2_enable_forced_2g5(bp);
1752 else if (bp->req_line_speed == SPEED_1000) {
1753 bnx2_disable_forced_2g5(bp);
1754 new_bmcr &= ~0x2000;
1757 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1758 if (bp->req_line_speed == SPEED_2500)
1759 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1761 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1764 if (bp->req_duplex == DUPLEX_FULL) {
1765 adv |= ADVERTISE_1000XFULL;
1766 new_bmcr |= BMCR_FULLDPLX;
1769 adv |= ADVERTISE_1000XHALF;
1770 new_bmcr &= ~BMCR_FULLDPLX;
1772 if ((new_bmcr != bmcr) || (force_link_down)) {
1773 /* Force a link down visible on the other side */
1775 bnx2_write_phy(bp, bp->mii_adv, adv &
1776 ~(ADVERTISE_1000XFULL |
1777 ADVERTISE_1000XHALF));
1778 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1779 BMCR_ANRESTART | BMCR_ANENABLE);
1782 netif_carrier_off(bp->dev);
1783 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1784 bnx2_report_link(bp);
1786 bnx2_write_phy(bp, bp->mii_adv, adv);
1787 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1789 bnx2_resolve_flow_ctrl(bp);
1790 bnx2_set_mac_link(bp);
1795 bnx2_test_and_enable_2g5(bp);
1797 if (bp->advertising & ADVERTISED_1000baseT_Full)
1798 new_adv |= ADVERTISE_1000XFULL;
1800 new_adv |= bnx2_phy_get_pause_adv(bp);
1802 bnx2_read_phy(bp, bp->mii_adv, &adv);
1803 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1805 bp->serdes_an_pending = 0;
1806 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1807 /* Force a link down visible on the other side */
1809 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1810 spin_unlock_bh(&bp->phy_lock);
1812 spin_lock_bh(&bp->phy_lock);
1815 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1816 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1818 /* Speed up link-up time when the link partner
1819 * does not autonegotiate which is very common
1820 * in blade servers. Some blade servers use
1821 * IPMI for kerboard input and it's important
1822 * to minimize link disruptions. Autoneg. involves
1823 * exchanging base pages plus 3 next pages and
1824 * normally completes in about 120 msec.
1826 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1827 bp->serdes_an_pending = 1;
1828 mod_timer(&bp->timer, jiffies + bp->current_interval);
1830 bnx2_resolve_flow_ctrl(bp);
1831 bnx2_set_mac_link(bp);
1837 #define ETHTOOL_ALL_FIBRE_SPEED \
1838 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1839 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1840 (ADVERTISED_1000baseT_Full)
1842 #define ETHTOOL_ALL_COPPER_SPEED \
1843 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1844 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1845 ADVERTISED_1000baseT_Full)
1847 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1848 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1850 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1853 bnx2_set_default_remote_link(struct bnx2 *bp)
1857 if (bp->phy_port == PORT_TP)
1858 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1860 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1862 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1863 bp->req_line_speed = 0;
1864 bp->autoneg |= AUTONEG_SPEED;
1865 bp->advertising = ADVERTISED_Autoneg;
1866 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1867 bp->advertising |= ADVERTISED_10baseT_Half;
1868 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1869 bp->advertising |= ADVERTISED_10baseT_Full;
1870 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1871 bp->advertising |= ADVERTISED_100baseT_Half;
1872 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1873 bp->advertising |= ADVERTISED_100baseT_Full;
1874 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1875 bp->advertising |= ADVERTISED_1000baseT_Full;
1876 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1877 bp->advertising |= ADVERTISED_2500baseX_Full;
1880 bp->advertising = 0;
1881 bp->req_duplex = DUPLEX_FULL;
1882 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1883 bp->req_line_speed = SPEED_10;
1884 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1885 bp->req_duplex = DUPLEX_HALF;
1887 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1888 bp->req_line_speed = SPEED_100;
1889 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1890 bp->req_duplex = DUPLEX_HALF;
1892 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1893 bp->req_line_speed = SPEED_1000;
1894 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1895 bp->req_line_speed = SPEED_2500;
1900 bnx2_set_default_link(struct bnx2 *bp)
1902 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1903 bnx2_set_default_remote_link(bp);
1907 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1908 bp->req_line_speed = 0;
1909 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1912 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1914 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1915 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1916 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1918 bp->req_line_speed = bp->line_speed = SPEED_1000;
1919 bp->req_duplex = DUPLEX_FULL;
1922 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1926 bnx2_send_heart_beat(struct bnx2 *bp)
1931 spin_lock(&bp->indirect_lock);
1932 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1933 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1934 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1935 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1936 spin_unlock(&bp->indirect_lock);
1940 bnx2_remote_phy_event(struct bnx2 *bp)
1943 u8 link_up = bp->link_up;
1946 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1948 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1949 bnx2_send_heart_beat(bp);
1951 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1953 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1959 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1960 bp->duplex = DUPLEX_FULL;
1962 case BNX2_LINK_STATUS_10HALF:
1963 bp->duplex = DUPLEX_HALF;
1964 case BNX2_LINK_STATUS_10FULL:
1965 bp->line_speed = SPEED_10;
1967 case BNX2_LINK_STATUS_100HALF:
1968 bp->duplex = DUPLEX_HALF;
1969 case BNX2_LINK_STATUS_100BASE_T4:
1970 case BNX2_LINK_STATUS_100FULL:
1971 bp->line_speed = SPEED_100;
1973 case BNX2_LINK_STATUS_1000HALF:
1974 bp->duplex = DUPLEX_HALF;
1975 case BNX2_LINK_STATUS_1000FULL:
1976 bp->line_speed = SPEED_1000;
1978 case BNX2_LINK_STATUS_2500HALF:
1979 bp->duplex = DUPLEX_HALF;
1980 case BNX2_LINK_STATUS_2500FULL:
1981 bp->line_speed = SPEED_2500;
1989 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1990 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1991 if (bp->duplex == DUPLEX_FULL)
1992 bp->flow_ctrl = bp->req_flow_ctrl;
1994 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1995 bp->flow_ctrl |= FLOW_CTRL_TX;
1996 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1997 bp->flow_ctrl |= FLOW_CTRL_RX;
2000 old_port = bp->phy_port;
2001 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2002 bp->phy_port = PORT_FIBRE;
2004 bp->phy_port = PORT_TP;
2006 if (old_port != bp->phy_port)
2007 bnx2_set_default_link(bp);
2010 if (bp->link_up != link_up)
2011 bnx2_report_link(bp);
2013 bnx2_set_mac_link(bp);
2017 bnx2_set_remote_link(struct bnx2 *bp)
2021 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2023 case BNX2_FW_EVT_CODE_LINK_EVENT:
2024 bnx2_remote_phy_event(bp);
2026 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2028 bnx2_send_heart_beat(bp);
2035 bnx2_setup_copper_phy(struct bnx2 *bp)
2036 __releases(&bp->phy_lock)
2037 __acquires(&bp->phy_lock)
2042 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2044 if (bp->autoneg & AUTONEG_SPEED) {
2045 u32 adv_reg, adv1000_reg;
2046 u32 new_adv_reg = 0;
2047 u32 new_adv1000_reg = 0;
2049 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2050 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2051 ADVERTISE_PAUSE_ASYM);
2053 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2054 adv1000_reg &= PHY_ALL_1000_SPEED;
2056 if (bp->advertising & ADVERTISED_10baseT_Half)
2057 new_adv_reg |= ADVERTISE_10HALF;
2058 if (bp->advertising & ADVERTISED_10baseT_Full)
2059 new_adv_reg |= ADVERTISE_10FULL;
2060 if (bp->advertising & ADVERTISED_100baseT_Half)
2061 new_adv_reg |= ADVERTISE_100HALF;
2062 if (bp->advertising & ADVERTISED_100baseT_Full)
2063 new_adv_reg |= ADVERTISE_100FULL;
2064 if (bp->advertising & ADVERTISED_1000baseT_Full)
2065 new_adv1000_reg |= ADVERTISE_1000FULL;
2067 new_adv_reg |= ADVERTISE_CSMA;
2069 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2071 if ((adv1000_reg != new_adv1000_reg) ||
2072 (adv_reg != new_adv_reg) ||
2073 ((bmcr & BMCR_ANENABLE) == 0)) {
2075 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
2076 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
2077 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2080 else if (bp->link_up) {
2081 /* Flow ctrl may have changed from auto to forced */
2082 /* or vice-versa. */
2084 bnx2_resolve_flow_ctrl(bp);
2085 bnx2_set_mac_link(bp);
2091 if (bp->req_line_speed == SPEED_100) {
2092 new_bmcr |= BMCR_SPEED100;
2094 if (bp->req_duplex == DUPLEX_FULL) {
2095 new_bmcr |= BMCR_FULLDPLX;
2097 if (new_bmcr != bmcr) {
2100 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2101 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2103 if (bmsr & BMSR_LSTATUS) {
2104 /* Force link down */
2105 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2106 spin_unlock_bh(&bp->phy_lock);
2108 spin_lock_bh(&bp->phy_lock);
2110 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2111 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2114 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2116 /* Normally, the new speed is setup after the link has
2117 * gone down and up again. In some cases, link will not go
2118 * down so we need to set up the new speed here.
2120 if (bmsr & BMSR_LSTATUS) {
2121 bp->line_speed = bp->req_line_speed;
2122 bp->duplex = bp->req_duplex;
2123 bnx2_resolve_flow_ctrl(bp);
2124 bnx2_set_mac_link(bp);
2127 bnx2_resolve_flow_ctrl(bp);
2128 bnx2_set_mac_link(bp);
2134 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2135 __releases(&bp->phy_lock)
2136 __acquires(&bp->phy_lock)
2138 if (bp->loopback == MAC_LOOPBACK)
2141 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2142 return (bnx2_setup_serdes_phy(bp, port));
2145 return (bnx2_setup_copper_phy(bp));
2150 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2154 bp->mii_bmcr = MII_BMCR + 0x10;
2155 bp->mii_bmsr = MII_BMSR + 0x10;
2156 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2157 bp->mii_adv = MII_ADVERTISE + 0x10;
2158 bp->mii_lpa = MII_LPA + 0x10;
2159 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2161 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2162 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2164 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2168 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2170 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2171 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2172 val |= MII_BNX2_SD_1000XCTL1_FIBER;
2173 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2175 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2176 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2177 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2178 val |= BCM5708S_UP1_2G5;
2180 val &= ~BCM5708S_UP1_2G5;
2181 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2183 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2184 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2185 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2186 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2188 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2190 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2191 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2192 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2194 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2200 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2207 bp->mii_up1 = BCM5708S_UP1;
2209 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2210 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2211 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2213 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2214 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2215 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2217 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2218 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2219 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2221 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2222 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2223 val |= BCM5708S_UP1_2G5;
2224 bnx2_write_phy(bp, BCM5708S_UP1, val);
2227 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2228 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2229 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2230 /* increase tx signal amplitude */
2231 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2232 BCM5708S_BLK_ADDR_TX_MISC);
2233 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2234 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2235 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2236 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2239 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2240 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2245 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2246 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2247 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2248 BCM5708S_BLK_ADDR_TX_MISC);
2249 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2250 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2251 BCM5708S_BLK_ADDR_DIG);
2258 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2263 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2265 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2266 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2268 if (bp->dev->mtu > 1500) {
2271 /* Set extended packet length bit */
2272 bnx2_write_phy(bp, 0x18, 0x7);
2273 bnx2_read_phy(bp, 0x18, &val);
2274 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2276 bnx2_write_phy(bp, 0x1c, 0x6c00);
2277 bnx2_read_phy(bp, 0x1c, &val);
2278 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2283 bnx2_write_phy(bp, 0x18, 0x7);
2284 bnx2_read_phy(bp, 0x18, &val);
2285 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2287 bnx2_write_phy(bp, 0x1c, 0x6c00);
2288 bnx2_read_phy(bp, 0x1c, &val);
2289 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2296 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2303 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2304 bnx2_write_phy(bp, 0x18, 0x0c00);
2305 bnx2_write_phy(bp, 0x17, 0x000a);
2306 bnx2_write_phy(bp, 0x15, 0x310b);
2307 bnx2_write_phy(bp, 0x17, 0x201f);
2308 bnx2_write_phy(bp, 0x15, 0x9506);
2309 bnx2_write_phy(bp, 0x17, 0x401f);
2310 bnx2_write_phy(bp, 0x15, 0x14e2);
2311 bnx2_write_phy(bp, 0x18, 0x0400);
2314 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2315 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2316 MII_BNX2_DSP_EXPAND_REG | 0x8);
2317 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2319 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2322 if (bp->dev->mtu > 1500) {
2323 /* Set extended packet length bit */
2324 bnx2_write_phy(bp, 0x18, 0x7);
2325 bnx2_read_phy(bp, 0x18, &val);
2326 bnx2_write_phy(bp, 0x18, val | 0x4000);
2328 bnx2_read_phy(bp, 0x10, &val);
2329 bnx2_write_phy(bp, 0x10, val | 0x1);
2332 bnx2_write_phy(bp, 0x18, 0x7);
2333 bnx2_read_phy(bp, 0x18, &val);
2334 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2336 bnx2_read_phy(bp, 0x10, &val);
2337 bnx2_write_phy(bp, 0x10, val & ~0x1);
2340 /* ethernet@wirespeed */
2341 bnx2_write_phy(bp, 0x18, 0x7007);
2342 bnx2_read_phy(bp, 0x18, &val);
2343 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2349 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2350 __releases(&bp->phy_lock)
2351 __acquires(&bp->phy_lock)
2356 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2357 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2359 bp->mii_bmcr = MII_BMCR;
2360 bp->mii_bmsr = MII_BMSR;
2361 bp->mii_bmsr1 = MII_BMSR;
2362 bp->mii_adv = MII_ADVERTISE;
2363 bp->mii_lpa = MII_LPA;
2365 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2367 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2370 bnx2_read_phy(bp, MII_PHYSID1, &val);
2371 bp->phy_id = val << 16;
2372 bnx2_read_phy(bp, MII_PHYSID2, &val);
2373 bp->phy_id |= val & 0xffff;
2375 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2376 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2377 rc = bnx2_init_5706s_phy(bp, reset_phy);
2378 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2379 rc = bnx2_init_5708s_phy(bp, reset_phy);
2380 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2381 rc = bnx2_init_5709s_phy(bp, reset_phy);
2384 rc = bnx2_init_copper_phy(bp, reset_phy);
2389 rc = bnx2_setup_phy(bp, bp->phy_port);
2395 bnx2_set_mac_loopback(struct bnx2 *bp)
2399 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2400 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2401 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2402 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2407 static int bnx2_test_link(struct bnx2 *);
2410 bnx2_set_phy_loopback(struct bnx2 *bp)
2415 spin_lock_bh(&bp->phy_lock);
2416 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2418 spin_unlock_bh(&bp->phy_lock);
2422 for (i = 0; i < 10; i++) {
2423 if (bnx2_test_link(bp) == 0)
2428 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2429 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2430 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2431 BNX2_EMAC_MODE_25G_MODE);
2433 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2434 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2440 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2446 msg_data |= bp->fw_wr_seq;
2448 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2453 /* wait for an acknowledgement. */
2454 for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2457 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2459 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2462 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2465 /* If we timed out, inform the firmware that this is the case. */
2466 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2468 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2471 msg_data &= ~BNX2_DRV_MSG_CODE;
2472 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2474 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2479 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2486 bnx2_init_5709_context(struct bnx2 *bp)
2491 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2492 val |= (BCM_PAGE_BITS - 8) << 16;
2493 REG_WR(bp, BNX2_CTX_COMMAND, val);
2494 for (i = 0; i < 10; i++) {
2495 val = REG_RD(bp, BNX2_CTX_COMMAND);
2496 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2500 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2503 for (i = 0; i < bp->ctx_pages; i++) {
2507 memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2511 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2512 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2513 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2514 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2515 (u64) bp->ctx_blk_mapping[i] >> 32);
2516 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2517 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2518 for (j = 0; j < 10; j++) {
2520 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2521 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2525 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2534 bnx2_init_context(struct bnx2 *bp)
2540 u32 vcid_addr, pcid_addr, offset;
2545 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2548 vcid_addr = GET_PCID_ADDR(vcid);
2550 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2555 pcid_addr = GET_PCID_ADDR(new_vcid);
2558 vcid_addr = GET_CID_ADDR(vcid);
2559 pcid_addr = vcid_addr;
2562 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2563 vcid_addr += (i << PHY_CTX_SHIFT);
2564 pcid_addr += (i << PHY_CTX_SHIFT);
2566 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2567 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2569 /* Zero out the context. */
2570 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2571 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2577 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2583 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2584 if (good_mbuf == NULL) {
2585 printk(KERN_ERR PFX "Failed to allocate memory in "
2586 "bnx2_alloc_bad_rbuf\n");
2590 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2591 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2595 /* Allocate a bunch of mbufs and save the good ones in an array. */
2596 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2597 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2598 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2599 BNX2_RBUF_COMMAND_ALLOC_REQ);
2601 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2603 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2605 /* The addresses with Bit 9 set are bad memory blocks. */
2606 if (!(val & (1 << 9))) {
2607 good_mbuf[good_mbuf_cnt] = (u16) val;
2611 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2614 /* Free the good ones back to the mbuf pool thus discarding
2615 * all the bad ones. */
2616 while (good_mbuf_cnt) {
2619 val = good_mbuf[good_mbuf_cnt];
2620 val = (val << 9) | val | 1;
2622 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2629 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2633 val = (mac_addr[0] << 8) | mac_addr[1];
2635 REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2637 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2638 (mac_addr[4] << 8) | mac_addr[5];
2640 REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2644 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2647 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2648 struct rx_bd *rxbd =
2649 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2650 struct page *page = alloc_page(GFP_ATOMIC);
2654 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2655 PCI_DMA_FROMDEVICE);
2656 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2662 pci_unmap_addr_set(rx_pg, mapping, mapping);
2663 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2664 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2669 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2671 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2672 struct page *page = rx_pg->page;
2677 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2678 PCI_DMA_FROMDEVICE);
2685 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2687 struct sk_buff *skb;
2688 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2690 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2691 unsigned long align;
2693 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2698 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2699 skb_reserve(skb, BNX2_RX_ALIGN - align);
2701 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2702 PCI_DMA_FROMDEVICE);
2703 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2709 pci_unmap_addr_set(rx_buf, mapping, mapping);
2711 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2712 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2714 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2720 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2722 struct status_block *sblk = bnapi->status_blk.msi;
2723 u32 new_link_state, old_link_state;
2726 new_link_state = sblk->status_attn_bits & event;
2727 old_link_state = sblk->status_attn_bits_ack & event;
2728 if (new_link_state != old_link_state) {
2730 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2732 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2740 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2742 spin_lock(&bp->phy_lock);
2744 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2746 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2747 bnx2_set_remote_link(bp);
2749 spin_unlock(&bp->phy_lock);
2754 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2758 /* Tell compiler that status block fields can change. */
2760 cons = *bnapi->hw_tx_cons_ptr;
2762 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2768 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2770 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2771 u16 hw_cons, sw_cons, sw_ring_cons;
2772 int tx_pkt = 0, index;
2773 struct netdev_queue *txq;
2775 index = (bnapi - bp->bnx2_napi);
2776 txq = netdev_get_tx_queue(bp->dev, index);
2778 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2779 sw_cons = txr->tx_cons;
2781 while (sw_cons != hw_cons) {
2782 struct sw_tx_bd *tx_buf;
2783 struct sk_buff *skb;
2786 sw_ring_cons = TX_RING_IDX(sw_cons);
2788 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2791 /* partial BD completions possible with TSO packets */
2792 if (skb_is_gso(skb)) {
2793 u16 last_idx, last_ring_idx;
2795 last_idx = sw_cons +
2796 skb_shinfo(skb)->nr_frags + 1;
2797 last_ring_idx = sw_ring_cons +
2798 skb_shinfo(skb)->nr_frags + 1;
2799 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2802 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2807 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
2810 last = skb_shinfo(skb)->nr_frags;
2812 for (i = 0; i < last; i++) {
2813 sw_cons = NEXT_TX_BD(sw_cons);
2816 sw_cons = NEXT_TX_BD(sw_cons);
2820 if (tx_pkt == budget)
2823 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2826 txr->hw_tx_cons = hw_cons;
2827 txr->tx_cons = sw_cons;
2829 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2830 * before checking for netif_tx_queue_stopped(). Without the
2831 * memory barrier, there is a small possibility that bnx2_start_xmit()
2832 * will miss it and cause the queue to be stopped forever.
2836 if (unlikely(netif_tx_queue_stopped(txq)) &&
2837 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2838 __netif_tx_lock(txq, smp_processor_id());
2839 if ((netif_tx_queue_stopped(txq)) &&
2840 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2841 netif_tx_wake_queue(txq);
2842 __netif_tx_unlock(txq);
2849 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2850 struct sk_buff *skb, int count)
2852 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2853 struct rx_bd *cons_bd, *prod_bd;
2856 u16 cons = rxr->rx_pg_cons;
2858 cons_rx_pg = &rxr->rx_pg_ring[cons];
2860 /* The caller was unable to allocate a new page to replace the
2861 * last one in the frags array, so we need to recycle that page
2862 * and then free the skb.
2866 struct skb_shared_info *shinfo;
2868 shinfo = skb_shinfo(skb);
2870 page = shinfo->frags[shinfo->nr_frags].page;
2871 shinfo->frags[shinfo->nr_frags].page = NULL;
2873 cons_rx_pg->page = page;
2877 hw_prod = rxr->rx_pg_prod;
2879 for (i = 0; i < count; i++) {
2880 prod = RX_PG_RING_IDX(hw_prod);
2882 prod_rx_pg = &rxr->rx_pg_ring[prod];
2883 cons_rx_pg = &rxr->rx_pg_ring[cons];
2884 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2885 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2888 prod_rx_pg->page = cons_rx_pg->page;
2889 cons_rx_pg->page = NULL;
2890 pci_unmap_addr_set(prod_rx_pg, mapping,
2891 pci_unmap_addr(cons_rx_pg, mapping));
2893 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2894 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2897 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2898 hw_prod = NEXT_RX_BD(hw_prod);
2900 rxr->rx_pg_prod = hw_prod;
2901 rxr->rx_pg_cons = cons;
2905 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2906 struct sk_buff *skb, u16 cons, u16 prod)
2908 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2909 struct rx_bd *cons_bd, *prod_bd;
2911 cons_rx_buf = &rxr->rx_buf_ring[cons];
2912 prod_rx_buf = &rxr->rx_buf_ring[prod];
2914 pci_dma_sync_single_for_device(bp->pdev,
2915 pci_unmap_addr(cons_rx_buf, mapping),
2916 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2918 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2920 prod_rx_buf->skb = skb;
2925 pci_unmap_addr_set(prod_rx_buf, mapping,
2926 pci_unmap_addr(cons_rx_buf, mapping));
2928 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2929 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2930 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2931 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2935 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2936 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2940 u16 prod = ring_idx & 0xffff;
2942 err = bnx2_alloc_rx_skb(bp, rxr, prod);
2943 if (unlikely(err)) {
2944 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2946 unsigned int raw_len = len + 4;
2947 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2949 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2954 skb_reserve(skb, BNX2_RX_OFFSET);
2955 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2956 PCI_DMA_FROMDEVICE);
2962 unsigned int i, frag_len, frag_size, pages;
2963 struct sw_pg *rx_pg;
2964 u16 pg_cons = rxr->rx_pg_cons;
2965 u16 pg_prod = rxr->rx_pg_prod;
2967 frag_size = len + 4 - hdr_len;
2968 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2969 skb_put(skb, hdr_len);
2971 for (i = 0; i < pages; i++) {
2972 dma_addr_t mapping_old;
2974 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2975 if (unlikely(frag_len <= 4)) {
2976 unsigned int tail = 4 - frag_len;
2978 rxr->rx_pg_cons = pg_cons;
2979 rxr->rx_pg_prod = pg_prod;
2980 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
2987 &skb_shinfo(skb)->frags[i - 1];
2989 skb->data_len -= tail;
2990 skb->truesize -= tail;
2994 rx_pg = &rxr->rx_pg_ring[pg_cons];
2996 /* Don't unmap yet. If we're unable to allocate a new
2997 * page, we need to recycle the page and the DMA addr.
2999 mapping_old = pci_unmap_addr(rx_pg, mapping);
3003 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3006 err = bnx2_alloc_rx_page(bp, rxr,
3007 RX_PG_RING_IDX(pg_prod));
3008 if (unlikely(err)) {
3009 rxr->rx_pg_cons = pg_cons;
3010 rxr->rx_pg_prod = pg_prod;
3011 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3016 pci_unmap_page(bp->pdev, mapping_old,
3017 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3019 frag_size -= frag_len;
3020 skb->data_len += frag_len;
3021 skb->truesize += frag_len;
3022 skb->len += frag_len;
3024 pg_prod = NEXT_RX_BD(pg_prod);
3025 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3027 rxr->rx_pg_prod = pg_prod;
3028 rxr->rx_pg_cons = pg_cons;
3034 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3038 /* Tell compiler that status block fields can change. */
3040 cons = *bnapi->hw_rx_cons_ptr;
3042 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3048 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3050 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3051 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3052 struct l2_fhdr *rx_hdr;
3053 int rx_pkt = 0, pg_ring_used = 0;
3055 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3056 sw_cons = rxr->rx_cons;
3057 sw_prod = rxr->rx_prod;
3059 /* Memory barrier necessary as speculative reads of the rx
3060 * buffer can be ahead of the index in the status block
3063 while (sw_cons != hw_cons) {
3064 unsigned int len, hdr_len;
3066 struct sw_bd *rx_buf;
3067 struct sk_buff *skb;
3068 dma_addr_t dma_addr;
3070 int hw_vlan __maybe_unused = 0;
3072 sw_ring_cons = RX_RING_IDX(sw_cons);
3073 sw_ring_prod = RX_RING_IDX(sw_prod);
3075 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3080 dma_addr = pci_unmap_addr(rx_buf, mapping);
3082 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
3083 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3084 PCI_DMA_FROMDEVICE);
3086 rx_hdr = (struct l2_fhdr *) skb->data;
3087 len = rx_hdr->l2_fhdr_pkt_len;
3088 status = rx_hdr->l2_fhdr_status;
3091 if (status & L2_FHDR_STATUS_SPLIT) {
3092 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3094 } else if (len > bp->rx_jumbo_thresh) {
3095 hdr_len = bp->rx_jumbo_thresh;
3099 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3100 L2_FHDR_ERRORS_PHY_DECODE |
3101 L2_FHDR_ERRORS_ALIGNMENT |
3102 L2_FHDR_ERRORS_TOO_SHORT |
3103 L2_FHDR_ERRORS_GIANT_FRAME))) {
3105 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3110 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3112 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3119 if (len <= bp->rx_copy_thresh) {
3120 struct sk_buff *new_skb;
3122 new_skb = netdev_alloc_skb(bp->dev, len + 6);
3123 if (new_skb == NULL) {
3124 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3130 skb_copy_from_linear_data_offset(skb,
3132 new_skb->data, len + 6);
3133 skb_reserve(new_skb, 6);
3134 skb_put(new_skb, len);
3136 bnx2_reuse_rx_skb(bp, rxr, skb,
3137 sw_ring_cons, sw_ring_prod);
3140 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
3141 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
3144 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3145 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
3146 vtag = rx_hdr->l2_fhdr_vlan_tag;
3153 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
3156 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3157 ve->h_vlan_proto = htons(ETH_P_8021Q);
3158 ve->h_vlan_TCI = htons(vtag);
3163 skb->protocol = eth_type_trans(skb, bp->dev);
3165 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3166 (ntohs(skb->protocol) != 0x8100)) {
3173 skb->ip_summed = CHECKSUM_NONE;
3175 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3176 L2_FHDR_STATUS_UDP_DATAGRAM))) {
3178 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3179 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3180 skb->ip_summed = CHECKSUM_UNNECESSARY;
3183 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3187 vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
3190 netif_receive_skb(skb);
3195 sw_cons = NEXT_RX_BD(sw_cons);
3196 sw_prod = NEXT_RX_BD(sw_prod);
3198 if ((rx_pkt == budget))
3201 /* Refresh hw_cons to see if there is new work */
3202 if (sw_cons == hw_cons) {
3203 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3207 rxr->rx_cons = sw_cons;
3208 rxr->rx_prod = sw_prod;
3211 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3213 REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3215 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3223 /* MSI ISR - The only difference between this and the INTx ISR
3224 * is that the MSI interrupt is always serviced.
3227 bnx2_msi(int irq, void *dev_instance)
3229 struct bnx2_napi *bnapi = dev_instance;
3230 struct bnx2 *bp = bnapi->bp;
3232 prefetch(bnapi->status_blk.msi);
3233 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3234 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3235 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3237 /* Return here if interrupt is disabled. */
3238 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3241 napi_schedule(&bnapi->napi);
3247 bnx2_msi_1shot(int irq, void *dev_instance)
3249 struct bnx2_napi *bnapi = dev_instance;
3250 struct bnx2 *bp = bnapi->bp;
3252 prefetch(bnapi->status_blk.msi);
3254 /* Return here if interrupt is disabled. */
3255 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3258 napi_schedule(&bnapi->napi);
3264 bnx2_interrupt(int irq, void *dev_instance)
3266 struct bnx2_napi *bnapi = dev_instance;
3267 struct bnx2 *bp = bnapi->bp;
3268 struct status_block *sblk = bnapi->status_blk.msi;
3270 /* When using INTx, it is possible for the interrupt to arrive
3271 * at the CPU before the status block posted prior to the
3272 * interrupt. Reading a register will flush the status block.
3273 * When using MSI, the MSI message will always complete after
3274 * the status block write.
3276 if ((sblk->status_idx == bnapi->last_status_idx) &&
3277 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3278 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3281 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3282 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3283 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3285 /* Read back to deassert IRQ immediately to avoid too many
3286 * spurious interrupts.
3288 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3290 /* Return here if interrupt is shared and is disabled. */
3291 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3294 if (napi_schedule_prep(&bnapi->napi)) {
3295 bnapi->last_status_idx = sblk->status_idx;
3296 __napi_schedule(&bnapi->napi);
3303 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3305 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3306 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3308 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3309 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3314 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3315 STATUS_ATTN_BITS_TIMER_ABORT)
3318 bnx2_has_work(struct bnx2_napi *bnapi)
3320 struct status_block *sblk = bnapi->status_blk.msi;
3322 if (bnx2_has_fast_work(bnapi))
3326 if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3330 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3331 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3338 bnx2_chk_missed_msi(struct bnx2 *bp)
3340 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3343 if (bnx2_has_work(bnapi)) {
3344 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3345 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3348 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3349 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3350 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3351 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3352 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3356 bp->idle_chk_status_idx = bnapi->last_status_idx;
3360 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3362 struct cnic_ops *c_ops;
3364 if (!bnapi->cnic_present)
3368 c_ops = rcu_dereference(bp->cnic_ops);
3370 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3371 bnapi->status_blk.msi);
3376 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3378 struct status_block *sblk = bnapi->status_blk.msi;
3379 u32 status_attn_bits = sblk->status_attn_bits;
3380 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3382 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3383 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3385 bnx2_phy_int(bp, bnapi);
3387 /* This is needed to take care of transient status
3388 * during link changes.
3390 REG_WR(bp, BNX2_HC_COMMAND,
3391 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3392 REG_RD(bp, BNX2_HC_COMMAND);
3396 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3397 int work_done, int budget)
3399 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3400 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3402 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3403 bnx2_tx_int(bp, bnapi, 0);
3405 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3406 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3411 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3413 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3414 struct bnx2 *bp = bnapi->bp;
3416 struct status_block_msix *sblk = bnapi->status_blk.msix;
3419 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3420 if (unlikely(work_done >= budget))
3423 bnapi->last_status_idx = sblk->status_idx;
3424 /* status idx must be read before checking for more work. */
3426 if (likely(!bnx2_has_fast_work(bnapi))) {
3428 napi_complete(napi);
3429 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3430 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3431 bnapi->last_status_idx);
3438 static int bnx2_poll(struct napi_struct *napi, int budget)
3440 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3441 struct bnx2 *bp = bnapi->bp;
3443 struct status_block *sblk = bnapi->status_blk.msi;
3446 bnx2_poll_link(bp, bnapi);
3448 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3451 bnx2_poll_cnic(bp, bnapi);
3454 /* bnapi->last_status_idx is used below to tell the hw how
3455 * much work has been processed, so we must read it before
3456 * checking for more work.
3458 bnapi->last_status_idx = sblk->status_idx;
3460 if (unlikely(work_done >= budget))
3464 if (likely(!bnx2_has_work(bnapi))) {
3465 napi_complete(napi);
3466 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3467 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3468 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3469 bnapi->last_status_idx);
3472 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3473 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3474 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3475 bnapi->last_status_idx);
3477 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3478 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3479 bnapi->last_status_idx);
3487 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3488 * from set_multicast.
3491 bnx2_set_rx_mode(struct net_device *dev)
3493 struct bnx2 *bp = netdev_priv(dev);
3494 u32 rx_mode, sort_mode;
3495 struct dev_addr_list *uc_ptr;
3498 if (!netif_running(dev))
3501 spin_lock_bh(&bp->phy_lock);
3503 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3504 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3505 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3507 if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3508 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3510 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3511 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3513 if (dev->flags & IFF_PROMISC) {
3514 /* Promiscuous mode. */
3515 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3516 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3517 BNX2_RPM_SORT_USER0_PROM_VLAN;
3519 else if (dev->flags & IFF_ALLMULTI) {
3520 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3521 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3524 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3527 /* Accept one or more multicast(s). */
3528 struct dev_mc_list *mclist;
3529 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3534 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3536 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3537 i++, mclist = mclist->next) {
3539 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3541 regidx = (bit & 0xe0) >> 5;
3543 mc_filter[regidx] |= (1 << bit);
3546 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3547 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3551 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3555 if (dev->uc_count > BNX2_MAX_UNICAST_ADDRESSES) {
3556 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3557 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3558 BNX2_RPM_SORT_USER0_PROM_VLAN;
3559 } else if (!(dev->flags & IFF_PROMISC)) {
3560 uc_ptr = dev->uc_list;
3562 /* Add all entries into to the match filter list */
3563 for (i = 0; i < dev->uc_count; i++) {
3564 bnx2_set_mac_addr(bp, uc_ptr->da_addr,
3565 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3567 (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3568 uc_ptr = uc_ptr->next;
3573 if (rx_mode != bp->rx_mode) {
3574 bp->rx_mode = rx_mode;
3575 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3578 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3579 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3580 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3582 spin_unlock_bh(&bp->phy_lock);
3585 static int __devinit
3586 check_fw_section(const struct firmware *fw,
3587 const struct bnx2_fw_file_section *section,
3588 u32 alignment, bool non_empty)
3590 u32 offset = be32_to_cpu(section->offset);
3591 u32 len = be32_to_cpu(section->len);
3593 if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3595 if ((non_empty && len == 0) || len > fw->size - offset ||
3596 len & (alignment - 1))
3601 static int __devinit
3602 check_mips_fw_entry(const struct firmware *fw,
3603 const struct bnx2_mips_fw_file_entry *entry)
3605 if (check_fw_section(fw, &entry->text, 4, true) ||
3606 check_fw_section(fw, &entry->data, 4, false) ||
3607 check_fw_section(fw, &entry->rodata, 4, false))
3612 static int __devinit
3613 bnx2_request_firmware(struct bnx2 *bp)
3615 const char *mips_fw_file, *rv2p_fw_file;
3616 const struct bnx2_mips_fw_file *mips_fw;
3617 const struct bnx2_rv2p_fw_file *rv2p_fw;
3620 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3621 mips_fw_file = FW_MIPS_FILE_09;
3622 rv2p_fw_file = FW_RV2P_FILE_09;
3624 mips_fw_file = FW_MIPS_FILE_06;
3625 rv2p_fw_file = FW_RV2P_FILE_06;
3628 rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3630 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3635 rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3637 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3641 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3642 rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3643 if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3644 check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3645 check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3646 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3647 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3648 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3649 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3653 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3654 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3655 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3656 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3665 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3668 case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3669 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3670 rv2p_code |= RV2P_BD_PAGE_SIZE;
3677 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3678 const struct bnx2_rv2p_fw_file_entry *fw_entry)
3680 u32 rv2p_code_len, file_offset;
3685 rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3686 file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3688 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3690 if (rv2p_proc == RV2P_PROC1) {
3691 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3692 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3694 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3695 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3698 for (i = 0; i < rv2p_code_len; i += 8) {
3699 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3701 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3704 val = (i / 8) | cmd;
3705 REG_WR(bp, addr, val);
3708 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3709 for (i = 0; i < 8; i++) {
3712 loc = be32_to_cpu(fw_entry->fixup[i]);
3713 if (loc && ((loc * 4) < rv2p_code_len)) {
3714 code = be32_to_cpu(*(rv2p_code + loc - 1));
3715 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3716 code = be32_to_cpu(*(rv2p_code + loc));
3717 code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3718 REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3720 val = (loc / 2) | cmd;
3721 REG_WR(bp, addr, val);
3725 /* Reset the processor, un-stall is done later. */
3726 if (rv2p_proc == RV2P_PROC1) {
3727 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3730 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3737 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3738 const struct bnx2_mips_fw_file_entry *fw_entry)
3740 u32 addr, len, file_offset;
3746 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3747 val |= cpu_reg->mode_value_halt;
3748 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3749 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3751 /* Load the Text area. */
3752 addr = be32_to_cpu(fw_entry->text.addr);
3753 len = be32_to_cpu(fw_entry->text.len);
3754 file_offset = be32_to_cpu(fw_entry->text.offset);
3755 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3757 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3761 for (j = 0; j < (len / 4); j++, offset += 4)
3762 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3765 /* Load the Data area. */
3766 addr = be32_to_cpu(fw_entry->data.addr);
3767 len = be32_to_cpu(fw_entry->data.len);
3768 file_offset = be32_to_cpu(fw_entry->data.offset);
3769 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3771 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3775 for (j = 0; j < (len / 4); j++, offset += 4)
3776 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3779 /* Load the Read-Only area. */
3780 addr = be32_to_cpu(fw_entry->rodata.addr);
3781 len = be32_to_cpu(fw_entry->rodata.len);
3782 file_offset = be32_to_cpu(fw_entry->rodata.offset);
3783 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3785 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3789 for (j = 0; j < (len / 4); j++, offset += 4)
3790 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3793 /* Clear the pre-fetch instruction. */
3794 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3796 val = be32_to_cpu(fw_entry->start_addr);
3797 bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3799 /* Start the CPU. */
3800 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3801 val &= ~cpu_reg->mode_value_halt;
3802 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3803 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3809 bnx2_init_cpus(struct bnx2 *bp)
3811 const struct bnx2_mips_fw_file *mips_fw =
3812 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3813 const struct bnx2_rv2p_fw_file *rv2p_fw =
3814 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3817 /* Initialize the RV2P processor. */
3818 load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3819 load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3821 /* Initialize the RX Processor. */
3822 rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3826 /* Initialize the TX Processor. */
3827 rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3831 /* Initialize the TX Patch-up Processor. */
3832 rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3836 /* Initialize the Completion Processor. */
3837 rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3841 /* Initialize the Command Processor. */
3842 rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3849 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3853 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3859 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3860 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3861 PCI_PM_CTRL_PME_STATUS);
3863 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3864 /* delay required during transition out of D3hot */
3867 val = REG_RD(bp, BNX2_EMAC_MODE);
3868 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3869 val &= ~BNX2_EMAC_MODE_MPKT;
3870 REG_WR(bp, BNX2_EMAC_MODE, val);
3872 val = REG_RD(bp, BNX2_RPM_CONFIG);
3873 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3874 REG_WR(bp, BNX2_RPM_CONFIG, val);
3885 autoneg = bp->autoneg;
3886 advertising = bp->advertising;
3888 if (bp->phy_port == PORT_TP) {
3889 bp->autoneg = AUTONEG_SPEED;
3890 bp->advertising = ADVERTISED_10baseT_Half |
3891 ADVERTISED_10baseT_Full |
3892 ADVERTISED_100baseT_Half |
3893 ADVERTISED_100baseT_Full |
3897 spin_lock_bh(&bp->phy_lock);
3898 bnx2_setup_phy(bp, bp->phy_port);
3899 spin_unlock_bh(&bp->phy_lock);
3901 bp->autoneg = autoneg;
3902 bp->advertising = advertising;
3904 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3906 val = REG_RD(bp, BNX2_EMAC_MODE);
3908 /* Enable port mode. */
3909 val &= ~BNX2_EMAC_MODE_PORT;
3910 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3911 BNX2_EMAC_MODE_ACPI_RCVD |
3912 BNX2_EMAC_MODE_MPKT;
3913 if (bp->phy_port == PORT_TP)
3914 val |= BNX2_EMAC_MODE_PORT_MII;
3916 val |= BNX2_EMAC_MODE_PORT_GMII;
3917 if (bp->line_speed == SPEED_2500)
3918 val |= BNX2_EMAC_MODE_25G_MODE;
3921 REG_WR(bp, BNX2_EMAC_MODE, val);
3923 /* receive all multicast */
3924 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3925 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3928 REG_WR(bp, BNX2_EMAC_RX_MODE,
3929 BNX2_EMAC_RX_MODE_SORT_MODE);
3931 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3932 BNX2_RPM_SORT_USER0_MC_EN;
3933 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3934 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3935 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3936 BNX2_RPM_SORT_USER0_ENA);
3938 /* Need to enable EMAC and RPM for WOL. */
3939 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3940 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3941 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3942 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3944 val = REG_RD(bp, BNX2_RPM_CONFIG);
3945 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3946 REG_WR(bp, BNX2_RPM_CONFIG, val);
3948 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3951 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3954 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3955 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3958 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3959 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3960 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3969 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3971 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3974 /* No more memory access after this point until
3975 * device is brought back to D0.
3987 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3992 /* Request access to the flash interface. */
3993 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3994 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3995 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3996 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4002 if (j >= NVRAM_TIMEOUT_COUNT)
4009 bnx2_release_nvram_lock(struct bnx2 *bp)
4014 /* Relinquish nvram interface. */
4015 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4017 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4018 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4019 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4025 if (j >= NVRAM_TIMEOUT_COUNT)
4033 bnx2_enable_nvram_write(struct bnx2 *bp)
4037 val = REG_RD(bp, BNX2_MISC_CFG);
4038 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4040 if (bp->flash_info->flags & BNX2_NV_WREN) {
4043 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4044 REG_WR(bp, BNX2_NVM_COMMAND,
4045 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4047 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4050 val = REG_RD(bp, BNX2_NVM_COMMAND);
4051 if (val & BNX2_NVM_COMMAND_DONE)
4055 if (j >= NVRAM_TIMEOUT_COUNT)
4062 bnx2_disable_nvram_write(struct bnx2 *bp)
4066 val = REG_RD(bp, BNX2_MISC_CFG);
4067 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4072 bnx2_enable_nvram_access(struct bnx2 *bp)
4076 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4077 /* Enable both bits, even on read. */
4078 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4079 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4083 bnx2_disable_nvram_access(struct bnx2 *bp)
4087 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4088 /* Disable both bits, even after read. */
4089 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4090 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4091 BNX2_NVM_ACCESS_ENABLE_WR_EN));
4095 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4100 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4101 /* Buffered flash, no erase needed */
4104 /* Build an erase command */
4105 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4106 BNX2_NVM_COMMAND_DOIT;
4108 /* Need to clear DONE bit separately. */
4109 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4111 /* Address of the NVRAM to read from. */
4112 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4114 /* Issue an erase command. */
4115 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4117 /* Wait for completion. */
4118 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4123 val = REG_RD(bp, BNX2_NVM_COMMAND);
4124 if (val & BNX2_NVM_COMMAND_DONE)
4128 if (j >= NVRAM_TIMEOUT_COUNT)
4135 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4140 /* Build the command word. */
4141 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4143 /* Calculate an offset of a buffered flash, not needed for 5709. */
4144 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4145 offset = ((offset / bp->flash_info->page_size) <<
4146 bp->flash_info->page_bits) +
4147 (offset % bp->flash_info->page_size);
4150 /* Need to clear DONE bit separately. */
4151 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4153 /* Address of the NVRAM to read from. */
4154 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4156 /* Issue a read command. */
4157 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4159 /* Wait for completion. */
4160 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4165 val = REG_RD(bp, BNX2_NVM_COMMAND);
4166 if (val & BNX2_NVM_COMMAND_DONE) {
4167 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4168 memcpy(ret_val, &v, 4);
4172 if (j >= NVRAM_TIMEOUT_COUNT)
4180 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4186 /* Build the command word. */
4187 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4189 /* Calculate an offset of a buffered flash, not needed for 5709. */
4190 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4191 offset = ((offset / bp->flash_info->page_size) <<
4192 bp->flash_info->page_bits) +
4193 (offset % bp->flash_info->page_size);
4196 /* Need to clear DONE bit separately. */
4197 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4199 memcpy(&val32, val, 4);
4201 /* Write the data. */
4202 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4204 /* Address of the NVRAM to write to. */
4205 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4207 /* Issue the write command. */
4208 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4210 /* Wait for completion. */
4211 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4214 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4217 if (j >= NVRAM_TIMEOUT_COUNT)
4224 bnx2_init_nvram(struct bnx2 *bp)
4227 int j, entry_count, rc = 0;
4228 struct flash_spec *flash;
4230 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4231 bp->flash_info = &flash_5709;
4232 goto get_flash_size;
4235 /* Determine the selected interface. */
4236 val = REG_RD(bp, BNX2_NVM_CFG1);
4238 entry_count = ARRAY_SIZE(flash_table);
4240 if (val & 0x40000000) {
4242 /* Flash interface has been reconfigured */
4243 for (j = 0, flash = &flash_table[0]; j < entry_count;
4245 if ((val & FLASH_BACKUP_STRAP_MASK) ==
4246 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4247 bp->flash_info = flash;
4254 /* Not yet been reconfigured */
4256 if (val & (1 << 23))
4257 mask = FLASH_BACKUP_STRAP_MASK;
4259 mask = FLASH_STRAP_MASK;
4261 for (j = 0, flash = &flash_table[0]; j < entry_count;
4264 if ((val & mask) == (flash->strapping & mask)) {
4265 bp->flash_info = flash;
4267 /* Request access to the flash interface. */
4268 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4271 /* Enable access to flash interface */
4272 bnx2_enable_nvram_access(bp);
4274 /* Reconfigure the flash interface */
4275 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4276 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4277 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4278 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4280 /* Disable access to flash interface */
4281 bnx2_disable_nvram_access(bp);
4282 bnx2_release_nvram_lock(bp);
4287 } /* if (val & 0x40000000) */
4289 if (j == entry_count) {
4290 bp->flash_info = NULL;
4291 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
4296 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4297 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4299 bp->flash_size = val;
4301 bp->flash_size = bp->flash_info->total_size;
4307 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4311 u32 cmd_flags, offset32, len32, extra;
4316 /* Request access to the flash interface. */
4317 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4320 /* Enable access to flash interface */
4321 bnx2_enable_nvram_access(bp);
4334 pre_len = 4 - (offset & 3);
4336 if (pre_len >= len32) {
4338 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4339 BNX2_NVM_COMMAND_LAST;
4342 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4345 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4350 memcpy(ret_buf, buf + (offset & 3), pre_len);
4357 extra = 4 - (len32 & 3);
4358 len32 = (len32 + 4) & ~3;
4365 cmd_flags = BNX2_NVM_COMMAND_LAST;
4367 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4368 BNX2_NVM_COMMAND_LAST;
4370 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4372 memcpy(ret_buf, buf, 4 - extra);
4374 else if (len32 > 0) {
4377 /* Read the first word. */
4381 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4383 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4385 /* Advance to the next dword. */
4390 while (len32 > 4 && rc == 0) {
4391 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4393 /* Advance to the next dword. */
4402 cmd_flags = BNX2_NVM_COMMAND_LAST;
4403 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4405 memcpy(ret_buf, buf, 4 - extra);
4408 /* Disable access to flash interface */
4409 bnx2_disable_nvram_access(bp);
4411 bnx2_release_nvram_lock(bp);
4417 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4420 u32 written, offset32, len32;
4421 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4423 int align_start, align_end;
4428 align_start = align_end = 0;
4430 if ((align_start = (offset32 & 3))) {
4432 len32 += align_start;
4435 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4440 align_end = 4 - (len32 & 3);
4442 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4446 if (align_start || align_end) {
4447 align_buf = kmalloc(len32, GFP_KERNEL);
4448 if (align_buf == NULL)
4451 memcpy(align_buf, start, 4);
4454 memcpy(align_buf + len32 - 4, end, 4);
4456 memcpy(align_buf + align_start, data_buf, buf_size);
4460 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4461 flash_buffer = kmalloc(264, GFP_KERNEL);
4462 if (flash_buffer == NULL) {
4464 goto nvram_write_end;
4469 while ((written < len32) && (rc == 0)) {
4470 u32 page_start, page_end, data_start, data_end;
4471 u32 addr, cmd_flags;
4474 /* Find the page_start addr */
4475 page_start = offset32 + written;
4476 page_start -= (page_start % bp->flash_info->page_size);
4477 /* Find the page_end addr */
4478 page_end = page_start + bp->flash_info->page_size;
4479 /* Find the data_start addr */
4480 data_start = (written == 0) ? offset32 : page_start;
4481 /* Find the data_end addr */
4482 data_end = (page_end > offset32 + len32) ?
4483 (offset32 + len32) : page_end;
4485 /* Request access to the flash interface. */
4486 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4487 goto nvram_write_end;
4489 /* Enable access to flash interface */
4490 bnx2_enable_nvram_access(bp);
4492 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4493 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4496 /* Read the whole page into the buffer
4497 * (non-buffer flash only) */
4498 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4499 if (j == (bp->flash_info->page_size - 4)) {
4500 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4502 rc = bnx2_nvram_read_dword(bp,
4508 goto nvram_write_end;
4514 /* Enable writes to flash interface (unlock write-protect) */
4515 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4516 goto nvram_write_end;
4518 /* Loop to write back the buffer data from page_start to
4521 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4522 /* Erase the page */
4523 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4524 goto nvram_write_end;
4526 /* Re-enable the write again for the actual write */
4527 bnx2_enable_nvram_write(bp);
4529 for (addr = page_start; addr < data_start;
4530 addr += 4, i += 4) {
4532 rc = bnx2_nvram_write_dword(bp, addr,
4533 &flash_buffer[i], cmd_flags);
4536 goto nvram_write_end;
4542 /* Loop to write the new data from data_start to data_end */
4543 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4544 if ((addr == page_end - 4) ||
4545 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4546 (addr == data_end - 4))) {
4548 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4550 rc = bnx2_nvram_write_dword(bp, addr, buf,
4554 goto nvram_write_end;
4560 /* Loop to write back the buffer data from data_end
4562 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4563 for (addr = data_end; addr < page_end;
4564 addr += 4, i += 4) {
4566 if (addr == page_end-4) {
4567 cmd_flags = BNX2_NVM_COMMAND_LAST;
4569 rc = bnx2_nvram_write_dword(bp, addr,
4570 &flash_buffer[i], cmd_flags);
4573 goto nvram_write_end;
4579 /* Disable writes to flash interface (lock write-protect) */
4580 bnx2_disable_nvram_write(bp);
4582 /* Disable access to flash interface */
4583 bnx2_disable_nvram_access(bp);
4584 bnx2_release_nvram_lock(bp);
4586 /* Increment written */
4587 written += data_end - data_start;
4591 kfree(flash_buffer);
4597 bnx2_init_fw_cap(struct bnx2 *bp)
4601 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4602 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4604 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4605 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4607 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4608 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4611 if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4612 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4613 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4616 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4617 (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4620 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4622 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4623 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4624 bp->phy_port = PORT_FIBRE;
4626 bp->phy_port = PORT_TP;
4628 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4629 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4632 if (netif_running(bp->dev) && sig)
4633 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4637 bnx2_setup_msix_tbl(struct bnx2 *bp)
4639 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4641 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4642 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4646 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4652 /* Wait for the current PCI transaction to complete before
4653 * issuing a reset. */
4654 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4655 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4656 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4657 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4658 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4659 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4662 /* Wait for the firmware to tell us it is ok to issue a reset. */
4663 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4665 /* Deposit a driver reset signature so the firmware knows that
4666 * this is a soft reset. */
4667 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4668 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4670 /* Do a dummy read to force the chip to complete all current transaction
4671 * before we issue a reset. */
4672 val = REG_RD(bp, BNX2_MISC_ID);
4674 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4675 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4676 REG_RD(bp, BNX2_MISC_COMMAND);
4679 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4680 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4682 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4685 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4686 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4687 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4690 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4692 /* Reading back any register after chip reset will hang the
4693 * bus on 5706 A0 and A1. The msleep below provides plenty
4694 * of margin for write posting.
4696 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4697 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4700 /* Reset takes approximate 30 usec */
4701 for (i = 0; i < 10; i++) {
4702 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4703 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4704 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4709 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4710 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4711 printk(KERN_ERR PFX "Chip reset did not complete\n");
4716 /* Make sure byte swapping is properly configured. */
4717 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4718 if (val != 0x01020304) {
4719 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4723 /* Wait for the firmware to finish its initialization. */
4724 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4728 spin_lock_bh(&bp->phy_lock);
4729 old_port = bp->phy_port;
4730 bnx2_init_fw_cap(bp);
4731 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4732 old_port != bp->phy_port)
4733 bnx2_set_default_remote_link(bp);
4734 spin_unlock_bh(&bp->phy_lock);
4736 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4737 /* Adjust the voltage regular to two steps lower. The default
4738 * of this register is 0x0000000e. */
4739 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4741 /* Remove bad rbuf memory from the free pool. */
4742 rc = bnx2_alloc_bad_rbuf(bp);
4745 if (bp->flags & BNX2_FLAG_USING_MSIX)
4746 bnx2_setup_msix_tbl(bp);
4752 bnx2_init_chip(struct bnx2 *bp)
4757 /* Make sure the interrupt is not active. */
4758 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4760 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4761 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4763 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4765 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4766 DMA_READ_CHANS << 12 |
4767 DMA_WRITE_CHANS << 16;
4769 val |= (0x2 << 20) | (1 << 11);
4771 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4774 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4775 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4776 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4778 REG_WR(bp, BNX2_DMA_CONFIG, val);
4780 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4781 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4782 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4783 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4786 if (bp->flags & BNX2_FLAG_PCIX) {
4789 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4791 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4792 val16 & ~PCI_X_CMD_ERO);
4795 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4796 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4797 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4798 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4800 /* Initialize context mapping and zero out the quick contexts. The
4801 * context block must have already been enabled. */
4802 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4803 rc = bnx2_init_5709_context(bp);
4807 bnx2_init_context(bp);
4809 if ((rc = bnx2_init_cpus(bp)) != 0)
4812 bnx2_init_nvram(bp);
4814 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4816 val = REG_RD(bp, BNX2_MQ_CONFIG);
4817 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4818 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4819 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4820 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4821 if (CHIP_REV(bp) == CHIP_REV_Ax)
4822 val |= BNX2_MQ_CONFIG_HALT_DIS;
4825 REG_WR(bp, BNX2_MQ_CONFIG, val);
4827 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4828 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4829 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4831 val = (BCM_PAGE_BITS - 8) << 24;
4832 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4834 /* Configure page size. */
4835 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4836 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4837 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4838 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4840 val = bp->mac_addr[0] +
4841 (bp->mac_addr[1] << 8) +
4842 (bp->mac_addr[2] << 16) +
4844 (bp->mac_addr[4] << 8) +
4845 (bp->mac_addr[5] << 16);
4846 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4848 /* Program the MTU. Also include 4 bytes for CRC32. */
4850 val = mtu + ETH_HLEN + ETH_FCS_LEN;
4851 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4852 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4853 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4858 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4859 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4860 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4862 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4863 bp->bnx2_napi[i].last_status_idx = 0;
4865 bp->idle_chk_status_idx = 0xffff;
4867 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4869 /* Set up how to generate a link change interrupt. */
4870 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4872 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4873 (u64) bp->status_blk_mapping & 0xffffffff);
4874 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4876 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4877 (u64) bp->stats_blk_mapping & 0xffffffff);
4878 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4879 (u64) bp->stats_blk_mapping >> 32);
4881 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4882 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4884 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4885 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4887 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4888 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4890 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4892 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4894 REG_WR(bp, BNX2_HC_COM_TICKS,
4895 (bp->com_ticks_int << 16) | bp->com_ticks);
4897 REG_WR(bp, BNX2_HC_CMD_TICKS,
4898 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4900 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4901 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4903 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4904 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4906 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4907 val = BNX2_HC_CONFIG_COLLECT_STATS;
4909 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4910 BNX2_HC_CONFIG_COLLECT_STATS;
4913 if (bp->irq_nvecs > 1) {
4914 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4915 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4917 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4920 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4921 val |= BNX2_HC_CONFIG_ONE_SHOT;
4923 REG_WR(bp, BNX2_HC_CONFIG, val);
4925 for (i = 1; i < bp->irq_nvecs; i++) {
4926 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4927 BNX2_HC_SB_CONFIG_1;
4930 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4931 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4932 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4934 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4935 (bp->tx_quick_cons_trip_int << 16) |
4936 bp->tx_quick_cons_trip);
4938 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4939 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4941 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4942 (bp->rx_quick_cons_trip_int << 16) |
4943 bp->rx_quick_cons_trip);
4945 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4946 (bp->rx_ticks_int << 16) | bp->rx_ticks);
4949 /* Clear internal stats counters. */
4950 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4952 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4954 /* Initialize the receive filter. */
4955 bnx2_set_rx_mode(bp->dev);
4957 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4958 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4959 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4960 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4962 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4965 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4966 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4970 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4976 bnx2_clear_ring_states(struct bnx2 *bp)
4978 struct bnx2_napi *bnapi;
4979 struct bnx2_tx_ring_info *txr;
4980 struct bnx2_rx_ring_info *rxr;
4983 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4984 bnapi = &bp->bnx2_napi[i];
4985 txr = &bnapi->tx_ring;
4986 rxr = &bnapi->rx_ring;
4989 txr->hw_tx_cons = 0;
4990 rxr->rx_prod_bseq = 0;
4993 rxr->rx_pg_prod = 0;
4994 rxr->rx_pg_cons = 0;
4999 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5001 u32 val, offset0, offset1, offset2, offset3;
5002 u32 cid_addr = GET_CID_ADDR(cid);
5004 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5005 offset0 = BNX2_L2CTX_TYPE_XI;
5006 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5007 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5008 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5010 offset0 = BNX2_L2CTX_TYPE;
5011 offset1 = BNX2_L2CTX_CMD_TYPE;
5012 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5013 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5015 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5016 bnx2_ctx_wr(bp, cid_addr, offset0, val);
5018 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5019 bnx2_ctx_wr(bp, cid_addr, offset1, val);
5021 val = (u64) txr->tx_desc_mapping >> 32;
5022 bnx2_ctx_wr(bp, cid_addr, offset2, val);
5024 val = (u64) txr->tx_desc_mapping & 0xffffffff;
5025 bnx2_ctx_wr(bp, cid_addr, offset3, val);
5029 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5033 struct bnx2_napi *bnapi;
5034 struct bnx2_tx_ring_info *txr;
5036 bnapi = &bp->bnx2_napi[ring_num];
5037 txr = &bnapi->tx_ring;
5042 cid = TX_TSS_CID + ring_num - 1;
5044 bp->tx_wake_thresh = bp->tx_ring_size / 2;
5046 txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5048 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5049 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5052 txr->tx_prod_bseq = 0;
5054 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5055 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5057 bnx2_init_tx_context(bp, cid, txr);
5061 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5067 for (i = 0; i < num_rings; i++) {
5070 rxbd = &rx_ring[i][0];
5071 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5072 rxbd->rx_bd_len = buf_size;
5073 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5075 if (i == (num_rings - 1))
5079 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5080 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5085 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5088 u16 prod, ring_prod;
5089 u32 cid, rx_cid_addr, val;
5090 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5091 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5096 cid = RX_RSS_CID + ring_num - 1;
5098 rx_cid_addr = GET_CID_ADDR(cid);
5100 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5101 bp->rx_buf_use_size, bp->rx_max_ring);
5103 bnx2_init_rx_context(bp, cid);
5105 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5106 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5107 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5110 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5111 if (bp->rx_pg_ring_size) {
5112 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5113 rxr->rx_pg_desc_mapping,
5114 PAGE_SIZE, bp->rx_max_pg_ring);
5115 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5116 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5117 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5118 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5120 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5121 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5123 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5124 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5126 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5127 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5130 val = (u64) rxr->rx_desc_mapping[0] >> 32;
5131 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5133 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5134 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5136 ring_prod = prod = rxr->rx_pg_prod;
5137 for (i = 0; i < bp->rx_pg_ring_size; i++) {
5138 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
5140 prod = NEXT_RX_BD(prod);
5141 ring_prod = RX_PG_RING_IDX(prod);
5143 rxr->rx_pg_prod = prod;
5145 ring_prod = prod = rxr->rx_prod;
5146 for (i = 0; i < bp->rx_ring_size; i++) {
5147 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
5149 prod = NEXT_RX_BD(prod);
5150 ring_prod = RX_RING_IDX(prod);
5152 rxr->rx_prod = prod;
5154 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5155 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5156 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5158 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5159 REG_WR16(bp, rxr->rx_bidx_addr, prod);
5161 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5165 bnx2_init_all_rings(struct bnx2 *bp)
5170 bnx2_clear_ring_states(bp);
5172 REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5173 for (i = 0; i < bp->num_tx_rings; i++)
5174 bnx2_init_tx_ring(bp, i);
5176 if (bp->num_tx_rings > 1)
5177 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5180 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5181 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5183 for (i = 0; i < bp->num_rx_rings; i++)
5184 bnx2_init_rx_ring(bp, i);
5186 if (bp->num_rx_rings > 1) {
5188 u8 *tbl = (u8 *) &tbl_32;
5190 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5191 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5193 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5194 tbl[i % 4] = i % (bp->num_rx_rings - 1);
5197 BNX2_RXP_SCRATCH_RSS_TBL + i,
5198 cpu_to_be32(tbl_32));
5201 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5202 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5204 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5209 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5211 u32 max, num_rings = 1;
5213 while (ring_size > MAX_RX_DESC_CNT) {
5214 ring_size -= MAX_RX_DESC_CNT;
5217 /* round to next power of 2 */
5219 while ((max & num_rings) == 0)
5222 if (num_rings != max)
5229 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5231 u32 rx_size, rx_space, jumbo_size;
5233 /* 8 for CRC and VLAN */
5234 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5236 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5237 sizeof(struct skb_shared_info);
5239 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5240 bp->rx_pg_ring_size = 0;
5241 bp->rx_max_pg_ring = 0;
5242 bp->rx_max_pg_ring_idx = 0;
5243 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5244 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5246 jumbo_size = size * pages;
5247 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5248 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5250 bp->rx_pg_ring_size = jumbo_size;
5251 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5253 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5254 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5255 bp->rx_copy_thresh = 0;
5258 bp->rx_buf_use_size = rx_size;
5260 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5261 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5262 bp->rx_ring_size = size;
5263 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5264 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5268 bnx2_free_tx_skbs(struct bnx2 *bp)
5272 for (i = 0; i < bp->num_tx_rings; i++) {
5273 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5274 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5277 if (txr->tx_buf_ring == NULL)
5280 for (j = 0; j < TX_DESC_CNT; ) {
5281 struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5282 struct sk_buff *skb = tx_buf->skb;
5289 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
5293 j += skb_shinfo(skb)->nr_frags + 1;
5300 bnx2_free_rx_skbs(struct bnx2 *bp)
5304 for (i = 0; i < bp->num_rx_rings; i++) {
5305 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5306 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5309 if (rxr->rx_buf_ring == NULL)
5312 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5313 struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5314 struct sk_buff *skb = rx_buf->skb;
5319 pci_unmap_single(bp->pdev,
5320 pci_unmap_addr(rx_buf, mapping),
5321 bp->rx_buf_use_size,
5322 PCI_DMA_FROMDEVICE);
5328 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5329 bnx2_free_rx_page(bp, rxr, j);
5334 bnx2_free_skbs(struct bnx2 *bp)
5336 bnx2_free_tx_skbs(bp);
5337 bnx2_free_rx_skbs(bp);
5341 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5345 rc = bnx2_reset_chip(bp, reset_code);
5350 if ((rc = bnx2_init_chip(bp)) != 0)
5353 bnx2_init_all_rings(bp);
5358 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5362 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5365 spin_lock_bh(&bp->phy_lock);
5366 bnx2_init_phy(bp, reset_phy);
5368 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5369 bnx2_remote_phy_event(bp);
5370 spin_unlock_bh(&bp->phy_lock);
5375 bnx2_shutdown_chip(struct bnx2 *bp)
5379 if (bp->flags & BNX2_FLAG_NO_WOL)
5380 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5382 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5384 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5386 return bnx2_reset_chip(bp, reset_code);
5390 bnx2_test_registers(struct bnx2 *bp)
5394 static const struct {
5397 #define BNX2_FL_NOT_5709 1
5401 { 0x006c, 0, 0x00000000, 0x0000003f },
5402 { 0x0090, 0, 0xffffffff, 0x00000000 },
5403 { 0x0094, 0, 0x00000000, 0x00000000 },
5405 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5406 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5407 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5408 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5409 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5410 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5411 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5412 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5413 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5415 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5416 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5417 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5418 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5419 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5420 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5422 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5423 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5424 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
5426 { 0x1000, 0, 0x00000000, 0x00000001 },
5427 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5429 { 0x1408, 0, 0x01c00800, 0x00000000 },
5430 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5431 { 0x14a8, 0, 0x00000000, 0x000001ff },
5432 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5433 { 0x14b0, 0, 0x00000002, 0x00000001 },
5434 { 0x14b8, 0, 0x00000000, 0x00000000 },
5435 { 0x14c0, 0, 0x00000000, 0x00000009 },
5436 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5437 { 0x14cc, 0, 0x00000000, 0x00000001 },
5438 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5440 { 0x1800, 0, 0x00000000, 0x00000001 },
5441 { 0x1804, 0, 0x00000000, 0x00000003 },
5443 { 0x2800, 0, 0x00000000, 0x00000001 },
5444 { 0x2804, 0, 0x00000000, 0x00003f01 },
5445 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5446 { 0x2810, 0, 0xffff0000, 0x00000000 },
5447 { 0x2814, 0, 0xffff0000, 0x00000000 },
5448 { 0x2818, 0, 0xffff0000, 0x00000000 },
5449 { 0x281c, 0, 0xffff0000, 0x00000000 },
5450 { 0x2834, 0, 0xffffffff, 0x00000000 },
5451 { 0x2840, 0, 0x00000000, 0xffffffff },
5452 { 0x2844, 0, 0x00000000, 0xffffffff },
5453 { 0x2848, 0, 0xffffffff, 0x00000000 },
5454 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5456 { 0x2c00, 0, 0x00000000, 0x00000011 },
5457 { 0x2c04, 0, 0x00000000, 0x00030007 },
5459 { 0x3c00, 0, 0x00000000, 0x00000001 },
5460 { 0x3c04, 0, 0x00000000, 0x00070000 },
5461 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5462 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5463 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5464 { 0x3c14, 0, 0x00000000, 0xffffffff },
5465 { 0x3c18, 0, 0x00000000, 0xffffffff },
5466 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5467 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5469 { 0x5004, 0, 0x00000000, 0x0000007f },
5470 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5472 { 0x5c00, 0, 0x00000000, 0x00000001 },
5473 { 0x5c04, 0, 0x00000000, 0x0003000f },
5474 { 0x5c08, 0, 0x00000003, 0x00000000 },
5475 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5476 { 0x5c10, 0, 0x00000000, 0xffffffff },
5477 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5478 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5479 { 0x5c88, 0, 0x00000000, 0x00077373 },
5480 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5482 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5483 { 0x680c, 0, 0xffffffff, 0x00000000 },
5484 { 0x6810, 0, 0xffffffff, 0x00000000 },
5485 { 0x6814, 0, 0xffffffff, 0x00000000 },
5486 { 0x6818, 0, 0xffffffff, 0x00000000 },
5487 { 0x681c, 0, 0xffffffff, 0x00000000 },
5488 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5489 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5490 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5491 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5492 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5493 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5494 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5495 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5496 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5497 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5498 { 0x684c, 0, 0xffffffff, 0x00000000 },
5499 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5500 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5501 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5502 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5503 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5504 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5506 { 0xffff, 0, 0x00000000, 0x00000000 },
5511 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5514 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5515 u32 offset, rw_mask, ro_mask, save_val, val;
5516 u16 flags = reg_tbl[i].flags;
5518 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5521 offset = (u32) reg_tbl[i].offset;
5522 rw_mask = reg_tbl[i].rw_mask;
5523 ro_mask = reg_tbl[i].ro_mask;
5525 save_val = readl(bp->regview + offset);
5527 writel(0, bp->regview + offset);
5529 val = readl(bp->regview + offset);
5530 if ((val & rw_mask) != 0) {
5534 if ((val & ro_mask) != (save_val & ro_mask)) {
5538 writel(0xffffffff, bp->regview + offset);
5540 val = readl(bp->regview + offset);
5541 if ((val & rw_mask) != rw_mask) {
5545 if ((val & ro_mask) != (save_val & ro_mask)) {
5549 writel(save_val, bp->regview + offset);
5553 writel(save_val, bp->regview + offset);
5561 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5563 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5564 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5567 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5570 for (offset = 0; offset < size; offset += 4) {
5572 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5574 if (bnx2_reg_rd_ind(bp, start + offset) !=
5584 bnx2_test_memory(struct bnx2 *bp)
5588 static struct mem_entry {
5591 } mem_tbl_5706[] = {
5592 { 0x60000, 0x4000 },
5593 { 0xa0000, 0x3000 },
5594 { 0xe0000, 0x4000 },
5595 { 0x120000, 0x4000 },
5596 { 0x1a0000, 0x4000 },
5597 { 0x160000, 0x4000 },
5601 { 0x60000, 0x4000 },
5602 { 0xa0000, 0x3000 },
5603 { 0xe0000, 0x4000 },
5604 { 0x120000, 0x4000 },
5605 { 0x1a0000, 0x4000 },
5608 struct mem_entry *mem_tbl;
5610 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5611 mem_tbl = mem_tbl_5709;
5613 mem_tbl = mem_tbl_5706;
5615 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5616 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5617 mem_tbl[i].len)) != 0) {
5625 #define BNX2_MAC_LOOPBACK 0
5626 #define BNX2_PHY_LOOPBACK 1
5629 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5631 unsigned int pkt_size, num_pkts, i;
5632 struct sk_buff *skb, *rx_skb;
5633 unsigned char *packet;
5634 u16 rx_start_idx, rx_idx;
5637 struct sw_bd *rx_buf;
5638 struct l2_fhdr *rx_hdr;
5640 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5641 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5642 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5646 txr = &tx_napi->tx_ring;
5647 rxr = &bnapi->rx_ring;
5648 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5649 bp->loopback = MAC_LOOPBACK;
5650 bnx2_set_mac_loopback(bp);
5652 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5653 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5656 bp->loopback = PHY_LOOPBACK;
5657 bnx2_set_phy_loopback(bp);
5662 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5663 skb = netdev_alloc_skb(bp->dev, pkt_size);
5666 packet = skb_put(skb, pkt_size);
5667 memcpy(packet, bp->dev->dev_addr, 6);
5668 memset(packet + 6, 0x0, 8);
5669 for (i = 14; i < pkt_size; i++)
5670 packet[i] = (unsigned char) (i & 0xff);
5672 if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
5676 map = skb_shinfo(skb)->dma_maps[0];
5678 REG_WR(bp, BNX2_HC_COMMAND,
5679 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5681 REG_RD(bp, BNX2_HC_COMMAND);
5684 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5688 txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5690 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5691 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5692 txbd->tx_bd_mss_nbytes = pkt_size;
5693 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5696 txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5697 txr->tx_prod_bseq += pkt_size;
5699 REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5700 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5704 REG_WR(bp, BNX2_HC_COMMAND,
5705 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5707 REG_RD(bp, BNX2_HC_COMMAND);
5711 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
5714 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5715 goto loopback_test_done;
5717 rx_idx = bnx2_get_hw_rx_cons(bnapi);
5718 if (rx_idx != rx_start_idx + num_pkts) {
5719 goto loopback_test_done;
5722 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5723 rx_skb = rx_buf->skb;
5725 rx_hdr = (struct l2_fhdr *) rx_skb->data;
5726 skb_reserve(rx_skb, BNX2_RX_OFFSET);
5728 pci_dma_sync_single_for_cpu(bp->pdev,
5729 pci_unmap_addr(rx_buf, mapping),
5730 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5732 if (rx_hdr->l2_fhdr_status &
5733 (L2_FHDR_ERRORS_BAD_CRC |
5734 L2_FHDR_ERRORS_PHY_DECODE |
5735 L2_FHDR_ERRORS_ALIGNMENT |
5736 L2_FHDR_ERRORS_TOO_SHORT |
5737 L2_FHDR_ERRORS_GIANT_FRAME)) {
5739 goto loopback_test_done;
5742 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5743 goto loopback_test_done;
5746 for (i = 14; i < pkt_size; i++) {
5747 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5748 goto loopback_test_done;
5759 #define BNX2_MAC_LOOPBACK_FAILED 1
5760 #define BNX2_PHY_LOOPBACK_FAILED 2
5761 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5762 BNX2_PHY_LOOPBACK_FAILED)
5765 bnx2_test_loopback(struct bnx2 *bp)
5769 if (!netif_running(bp->dev))
5770 return BNX2_LOOPBACK_FAILED;
5772 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5773 spin_lock_bh(&bp->phy_lock);
5774 bnx2_init_phy(bp, 1);
5775 spin_unlock_bh(&bp->phy_lock);
5776 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5777 rc |= BNX2_MAC_LOOPBACK_FAILED;
5778 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5779 rc |= BNX2_PHY_LOOPBACK_FAILED;
5783 #define NVRAM_SIZE 0x200
5784 #define CRC32_RESIDUAL 0xdebb20e3
5787 bnx2_test_nvram(struct bnx2 *bp)
5789 __be32 buf[NVRAM_SIZE / 4];
5790 u8 *data = (u8 *) buf;
5794 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5795 goto test_nvram_done;
5797 magic = be32_to_cpu(buf[0]);
5798 if (magic != 0x669955aa) {
5800 goto test_nvram_done;
5803 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5804 goto test_nvram_done;
5806 csum = ether_crc_le(0x100, data);
5807 if (csum != CRC32_RESIDUAL) {
5809 goto test_nvram_done;
5812 csum = ether_crc_le(0x100, data + 0x100);
5813 if (csum != CRC32_RESIDUAL) {
5822 bnx2_test_link(struct bnx2 *bp)
5826 if (!netif_running(bp->dev))
5829 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5834 spin_lock_bh(&bp->phy_lock);
5835 bnx2_enable_bmsr1(bp);
5836 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5837 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5838 bnx2_disable_bmsr1(bp);
5839 spin_unlock_bh(&bp->phy_lock);
5841 if (bmsr & BMSR_LSTATUS) {
5848 bnx2_test_intr(struct bnx2 *bp)
5853 if (!netif_running(bp->dev))
5856 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5858 /* This register is not touched during run-time. */
5859 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5860 REG_RD(bp, BNX2_HC_COMMAND);
5862 for (i = 0; i < 10; i++) {
5863 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5869 msleep_interruptible(10);
5877 /* Determining link for parallel detection. */
5879 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5881 u32 mode_ctl, an_dbg, exp;
5883 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5886 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5887 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5889 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5892 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5893 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5894 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5896 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5899 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5900 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5901 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5903 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
5910 bnx2_5706_serdes_timer(struct bnx2 *bp)
5914 spin_lock(&bp->phy_lock);
5915 if (bp->serdes_an_pending) {
5916 bp->serdes_an_pending--;
5918 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5921 bp->current_interval = BNX2_TIMER_INTERVAL;
5923 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5925 if (bmcr & BMCR_ANENABLE) {
5926 if (bnx2_5706_serdes_has_link(bp)) {
5927 bmcr &= ~BMCR_ANENABLE;
5928 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5929 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5930 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5934 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5935 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5938 bnx2_write_phy(bp, 0x17, 0x0f01);
5939 bnx2_read_phy(bp, 0x15, &phy2);
5943 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5944 bmcr |= BMCR_ANENABLE;
5945 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5947 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5950 bp->current_interval = BNX2_TIMER_INTERVAL;
5955 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5956 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5957 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5959 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5960 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5961 bnx2_5706s_force_link_dn(bp, 1);
5962 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5965 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5968 spin_unlock(&bp->phy_lock);
5972 bnx2_5708_serdes_timer(struct bnx2 *bp)
5974 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5977 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5978 bp->serdes_an_pending = 0;
5982 spin_lock(&bp->phy_lock);
5983 if (bp->serdes_an_pending)
5984 bp->serdes_an_pending--;
5985 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5988 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5989 if (bmcr & BMCR_ANENABLE) {
5990 bnx2_enable_forced_2g5(bp);
5991 bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
5993 bnx2_disable_forced_2g5(bp);
5994 bp->serdes_an_pending = 2;
5995 bp->current_interval = BNX2_TIMER_INTERVAL;
5999 bp->current_interval = BNX2_TIMER_INTERVAL;
6001 spin_unlock(&bp->phy_lock);
6005 bnx2_timer(unsigned long data)
6007 struct bnx2 *bp = (struct bnx2 *) data;
6009 if (!netif_running(bp->dev))
6012 if (atomic_read(&bp->intr_sem) != 0)
6013 goto bnx2_restart_timer;
6015 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6016 BNX2_FLAG_USING_MSI)
6017 bnx2_chk_missed_msi(bp);
6019 bnx2_send_heart_beat(bp);
6021 bp->stats_blk->stat_FwRxDrop =
6022 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6024 /* workaround occasional corrupted counters */
6025 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
6026 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6027 BNX2_HC_COMMAND_STATS_NOW);
6029 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6030 if (CHIP_NUM(bp) == CHIP_NUM_5706)
6031 bnx2_5706_serdes_timer(bp);
6033 bnx2_5708_serdes_timer(bp);
6037 mod_timer(&bp->timer, jiffies + bp->current_interval);
6041 bnx2_request_irq(struct bnx2 *bp)
6043 unsigned long flags;
6044 struct bnx2_irq *irq;
6047 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6050 flags = IRQF_SHARED;
6052 for (i = 0; i < bp->irq_nvecs; i++) {
6053 irq = &bp->irq_tbl[i];
6054 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6064 bnx2_free_irq(struct bnx2 *bp)
6066 struct bnx2_irq *irq;
6069 for (i = 0; i < bp->irq_nvecs; i++) {
6070 irq = &bp->irq_tbl[i];
6072 free_irq(irq->vector, &bp->bnx2_napi[i]);
6075 if (bp->flags & BNX2_FLAG_USING_MSI)
6076 pci_disable_msi(bp->pdev);
6077 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6078 pci_disable_msix(bp->pdev);
6080 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6084 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6087 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6088 struct net_device *dev = bp->dev;
6089 const int len = sizeof(bp->irq_tbl[0].name);
6091 bnx2_setup_msix_tbl(bp);
6092 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6093 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6094 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6096 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6097 msix_ent[i].entry = i;
6098 msix_ent[i].vector = 0;
6101 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
6105 bp->irq_nvecs = msix_vecs;
6106 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6107 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6108 bp->irq_tbl[i].vector = msix_ent[i].vector;
6109 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6110 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6115 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6117 int cpus = num_online_cpus();
6118 int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
6120 bp->irq_tbl[0].handler = bnx2_interrupt;
6121 strcpy(bp->irq_tbl[0].name, bp->dev->name);
6123 bp->irq_tbl[0].vector = bp->pdev->irq;
6125 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
6126 bnx2_enable_msix(bp, msix_vecs);
6128 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6129 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6130 if (pci_enable_msi(bp->pdev) == 0) {
6131 bp->flags |= BNX2_FLAG_USING_MSI;
6132 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6133 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6134 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6136 bp->irq_tbl[0].handler = bnx2_msi;
6138 bp->irq_tbl[0].vector = bp->pdev->irq;
6142 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6143 bp->dev->real_num_tx_queues = bp->num_tx_rings;
6145 bp->num_rx_rings = bp->irq_nvecs;
6148 /* Called with rtnl_lock */
6150 bnx2_open(struct net_device *dev)
6152 struct bnx2 *bp = netdev_priv(dev);
6155 netif_carrier_off(dev);
6157 bnx2_set_power_state(bp, PCI_D0);
6158 bnx2_disable_int(bp);
6160 bnx2_setup_int_mode(bp, disable_msi);
6161 bnx2_napi_enable(bp);
6162 rc = bnx2_alloc_mem(bp);
6166 rc = bnx2_request_irq(bp);
6170 rc = bnx2_init_nic(bp, 1);
6174 mod_timer(&bp->timer, jiffies + bp->current_interval);
6176 atomic_set(&bp->intr_sem, 0);
6178 bnx2_enable_int(bp);
6180 if (bp->flags & BNX2_FLAG_USING_MSI) {
6181 /* Test MSI to make sure it is working
6182 * If MSI test fails, go back to INTx mode
6184 if (bnx2_test_intr(bp) != 0) {
6185 printk(KERN_WARNING PFX "%s: No interrupt was generated"
6186 " using MSI, switching to INTx mode. Please"
6187 " report this failure to the PCI maintainer"
6188 " and include system chipset information.\n",
6191 bnx2_disable_int(bp);
6194 bnx2_setup_int_mode(bp, 1);
6196 rc = bnx2_init_nic(bp, 0);
6199 rc = bnx2_request_irq(bp);
6202 del_timer_sync(&bp->timer);
6205 bnx2_enable_int(bp);
6208 if (bp->flags & BNX2_FLAG_USING_MSI)
6209 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
6210 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6211 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
6213 netif_tx_start_all_queues(dev);
6218 bnx2_napi_disable(bp);
6226 bnx2_reset_task(struct work_struct *work)
6228 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6230 if (!netif_running(bp->dev))
6233 bnx2_netif_stop(bp);
6235 bnx2_init_nic(bp, 1);
6237 atomic_set(&bp->intr_sem, 1);
6238 bnx2_netif_start(bp);
6242 bnx2_tx_timeout(struct net_device *dev)
6244 struct bnx2 *bp = netdev_priv(dev);
6246 /* This allows the netif to be shutdown gracefully before resetting */
6247 schedule_work(&bp->reset_task);
6251 /* Called with rtnl_lock */
6253 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6255 struct bnx2 *bp = netdev_priv(dev);
6257 bnx2_netif_stop(bp);
6260 bnx2_set_rx_mode(dev);
6261 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6262 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6264 bnx2_netif_start(bp);
6268 /* Called with netif_tx_lock.
6269 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6270 * netif_wake_queue().
6273 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6275 struct bnx2 *bp = netdev_priv(dev);
6278 struct sw_tx_bd *tx_buf;
6279 u32 len, vlan_tag_flags, last_frag, mss;
6280 u16 prod, ring_prod;
6282 struct bnx2_napi *bnapi;
6283 struct bnx2_tx_ring_info *txr;
6284 struct netdev_queue *txq;
6285 struct skb_shared_info *sp;
6287 /* Determine which tx ring we will be placed on */
6288 i = skb_get_queue_mapping(skb);
6289 bnapi = &bp->bnx2_napi[i];
6290 txr = &bnapi->tx_ring;
6291 txq = netdev_get_tx_queue(dev, i);
6293 if (unlikely(bnx2_tx_avail(bp, txr) <
6294 (skb_shinfo(skb)->nr_frags + 1))) {
6295 netif_tx_stop_queue(txq);
6296 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
6299 return NETDEV_TX_BUSY;
6301 len = skb_headlen(skb);
6302 prod = txr->tx_prod;
6303 ring_prod = TX_RING_IDX(prod);
6306 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6307 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6311 if (bp->vlgrp && vlan_tx_tag_present(skb)) {
6313 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6316 if ((mss = skb_shinfo(skb)->gso_size)) {
6320 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6322 tcp_opt_len = tcp_optlen(skb);
6324 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6325 u32 tcp_off = skb_transport_offset(skb) -
6326 sizeof(struct ipv6hdr) - ETH_HLEN;
6328 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6329 TX_BD_FLAGS_SW_FLAGS;
6330 if (likely(tcp_off == 0))
6331 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6334 vlan_tag_flags |= ((tcp_off & 0x3) <<
6335 TX_BD_FLAGS_TCP6_OFF0_SHL) |
6336 ((tcp_off & 0x10) <<
6337 TX_BD_FLAGS_TCP6_OFF4_SHL);
6338 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6342 if (tcp_opt_len || (iph->ihl > 5)) {
6343 vlan_tag_flags |= ((iph->ihl - 5) +
6344 (tcp_opt_len >> 2)) << 8;
6350 if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
6352 return NETDEV_TX_OK;
6355 sp = skb_shinfo(skb);
6356 mapping = sp->dma_maps[0];
6358 tx_buf = &txr->tx_buf_ring[ring_prod];
6361 txbd = &txr->tx_desc_ring[ring_prod];
6363 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6364 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6365 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6366 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6368 last_frag = skb_shinfo(skb)->nr_frags;
6370 for (i = 0; i < last_frag; i++) {
6371 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6373 prod = NEXT_TX_BD(prod);
6374 ring_prod = TX_RING_IDX(prod);
6375 txbd = &txr->tx_desc_ring[ring_prod];
6378 mapping = sp->dma_maps[i + 1];
6380 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6381 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6382 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6383 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6386 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6388 prod = NEXT_TX_BD(prod);
6389 txr->tx_prod_bseq += skb->len;
6391 REG_WR16(bp, txr->tx_bidx_addr, prod);
6392 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6396 txr->tx_prod = prod;
6397 dev->trans_start = jiffies;
6399 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6400 netif_tx_stop_queue(txq);
6401 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6402 netif_tx_wake_queue(txq);
6405 return NETDEV_TX_OK;
6408 /* Called with rtnl_lock */
6410 bnx2_close(struct net_device *dev)
6412 struct bnx2 *bp = netdev_priv(dev);
6414 cancel_work_sync(&bp->reset_task);
6416 bnx2_disable_int_sync(bp);
6417 bnx2_napi_disable(bp);
6418 del_timer_sync(&bp->timer);
6419 bnx2_shutdown_chip(bp);
6424 netif_carrier_off(bp->dev);
6425 bnx2_set_power_state(bp, PCI_D3hot);
6429 #define GET_NET_STATS64(ctr) \
6430 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
6431 (unsigned long) (ctr##_lo)
6433 #define GET_NET_STATS32(ctr) \
6436 #if (BITS_PER_LONG == 64)
6437 #define GET_NET_STATS GET_NET_STATS64
6439 #define GET_NET_STATS GET_NET_STATS32
6442 static struct net_device_stats *
6443 bnx2_get_stats(struct net_device *dev)
6445 struct bnx2 *bp = netdev_priv(dev);
6446 struct statistics_block *stats_blk = bp->stats_blk;
6447 struct net_device_stats *net_stats = &dev->stats;
6449 if (bp->stats_blk == NULL) {
6452 net_stats->rx_packets =
6453 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
6454 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
6455 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
6457 net_stats->tx_packets =
6458 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
6459 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
6460 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
6462 net_stats->rx_bytes =
6463 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
6465 net_stats->tx_bytes =
6466 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
6468 net_stats->multicast =
6469 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
6471 net_stats->collisions =
6472 (unsigned long) stats_blk->stat_EtherStatsCollisions;
6474 net_stats->rx_length_errors =
6475 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
6476 stats_blk->stat_EtherStatsOverrsizePkts);
6478 net_stats->rx_over_errors =
6479 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
6481 net_stats->rx_frame_errors =
6482 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
6484 net_stats->rx_crc_errors =
6485 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
6487 net_stats->rx_errors = net_stats->rx_length_errors +
6488 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6489 net_stats->rx_crc_errors;
6491 net_stats->tx_aborted_errors =
6492 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
6493 stats_blk->stat_Dot3StatsLateCollisions);
6495 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6496 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6497 net_stats->tx_carrier_errors = 0;
6499 net_stats->tx_carrier_errors =
6501 stats_blk->stat_Dot3StatsCarrierSenseErrors;
6504 net_stats->tx_errors =
6506 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6508 net_stats->tx_aborted_errors +
6509 net_stats->tx_carrier_errors;
6511 net_stats->rx_missed_errors =
6512 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
6513 stats_blk->stat_FwRxDrop);
6518 /* All ethtool functions called with rtnl_lock */
6521 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6523 struct bnx2 *bp = netdev_priv(dev);
6524 int support_serdes = 0, support_copper = 0;
6526 cmd->supported = SUPPORTED_Autoneg;
6527 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6530 } else if (bp->phy_port == PORT_FIBRE)
6535 if (support_serdes) {
6536 cmd->supported |= SUPPORTED_1000baseT_Full |
6538 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6539 cmd->supported |= SUPPORTED_2500baseX_Full;
6542 if (support_copper) {
6543 cmd->supported |= SUPPORTED_10baseT_Half |
6544 SUPPORTED_10baseT_Full |
6545 SUPPORTED_100baseT_Half |
6546 SUPPORTED_100baseT_Full |
6547 SUPPORTED_1000baseT_Full |
6552 spin_lock_bh(&bp->phy_lock);
6553 cmd->port = bp->phy_port;
6554 cmd->advertising = bp->advertising;
6556 if (bp->autoneg & AUTONEG_SPEED) {
6557 cmd->autoneg = AUTONEG_ENABLE;
6560 cmd->autoneg = AUTONEG_DISABLE;
6563 if (netif_carrier_ok(dev)) {
6564 cmd->speed = bp->line_speed;
6565 cmd->duplex = bp->duplex;
6571 spin_unlock_bh(&bp->phy_lock);
6573 cmd->transceiver = XCVR_INTERNAL;
6574 cmd->phy_address = bp->phy_addr;
6580 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6582 struct bnx2 *bp = netdev_priv(dev);
6583 u8 autoneg = bp->autoneg;
6584 u8 req_duplex = bp->req_duplex;
6585 u16 req_line_speed = bp->req_line_speed;
6586 u32 advertising = bp->advertising;
6589 spin_lock_bh(&bp->phy_lock);
6591 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6592 goto err_out_unlock;
6594 if (cmd->port != bp->phy_port &&
6595 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6596 goto err_out_unlock;
6598 /* If device is down, we can store the settings only if the user
6599 * is setting the currently active port.
6601 if (!netif_running(dev) && cmd->port != bp->phy_port)
6602 goto err_out_unlock;
6604 if (cmd->autoneg == AUTONEG_ENABLE) {
6605 autoneg |= AUTONEG_SPEED;
6607 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6609 /* allow advertising 1 speed */
6610 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6611 (cmd->advertising == ADVERTISED_10baseT_Full) ||
6612 (cmd->advertising == ADVERTISED_100baseT_Half) ||
6613 (cmd->advertising == ADVERTISED_100baseT_Full)) {
6615 if (cmd->port == PORT_FIBRE)
6616 goto err_out_unlock;
6618 advertising = cmd->advertising;
6620 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6621 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6622 (cmd->port == PORT_TP))
6623 goto err_out_unlock;
6624 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6625 advertising = cmd->advertising;
6626 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6627 goto err_out_unlock;
6629 if (cmd->port == PORT_FIBRE)
6630 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6632 advertising = ETHTOOL_ALL_COPPER_SPEED;
6634 advertising |= ADVERTISED_Autoneg;
6637 if (cmd->port == PORT_FIBRE) {
6638 if ((cmd->speed != SPEED_1000 &&
6639 cmd->speed != SPEED_2500) ||
6640 (cmd->duplex != DUPLEX_FULL))
6641 goto err_out_unlock;
6643 if (cmd->speed == SPEED_2500 &&
6644 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6645 goto err_out_unlock;
6647 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6648 goto err_out_unlock;
6650 autoneg &= ~AUTONEG_SPEED;
6651 req_line_speed = cmd->speed;
6652 req_duplex = cmd->duplex;
6656 bp->autoneg = autoneg;
6657 bp->advertising = advertising;
6658 bp->req_line_speed = req_line_speed;
6659 bp->req_duplex = req_duplex;
6662 /* If device is down, the new settings will be picked up when it is
6665 if (netif_running(dev))
6666 err = bnx2_setup_phy(bp, cmd->port);
6669 spin_unlock_bh(&bp->phy_lock);
6675 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6677 struct bnx2 *bp = netdev_priv(dev);
6679 strcpy(info->driver, DRV_MODULE_NAME);
6680 strcpy(info->version, DRV_MODULE_VERSION);
6681 strcpy(info->bus_info, pci_name(bp->pdev));
6682 strcpy(info->fw_version, bp->fw_version);
6685 #define BNX2_REGDUMP_LEN (32 * 1024)
6688 bnx2_get_regs_len(struct net_device *dev)
6690 return BNX2_REGDUMP_LEN;
6694 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6696 u32 *p = _p, i, offset;
6698 struct bnx2 *bp = netdev_priv(dev);
6699 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6700 0x0800, 0x0880, 0x0c00, 0x0c10,
6701 0x0c30, 0x0d08, 0x1000, 0x101c,
6702 0x1040, 0x1048, 0x1080, 0x10a4,
6703 0x1400, 0x1490, 0x1498, 0x14f0,
6704 0x1500, 0x155c, 0x1580, 0x15dc,
6705 0x1600, 0x1658, 0x1680, 0x16d8,
6706 0x1800, 0x1820, 0x1840, 0x1854,
6707 0x1880, 0x1894, 0x1900, 0x1984,
6708 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6709 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6710 0x2000, 0x2030, 0x23c0, 0x2400,
6711 0x2800, 0x2820, 0x2830, 0x2850,
6712 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6713 0x3c00, 0x3c94, 0x4000, 0x4010,
6714 0x4080, 0x4090, 0x43c0, 0x4458,
6715 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6716 0x4fc0, 0x5010, 0x53c0, 0x5444,
6717 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6718 0x5fc0, 0x6000, 0x6400, 0x6428,
6719 0x6800, 0x6848, 0x684c, 0x6860,
6720 0x6888, 0x6910, 0x8000 };
6724 memset(p, 0, BNX2_REGDUMP_LEN);
6726 if (!netif_running(bp->dev))
6730 offset = reg_boundaries[0];
6732 while (offset < BNX2_REGDUMP_LEN) {
6733 *p++ = REG_RD(bp, offset);
6735 if (offset == reg_boundaries[i + 1]) {
6736 offset = reg_boundaries[i + 2];
6737 p = (u32 *) (orig_p + offset);
6744 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6746 struct bnx2 *bp = netdev_priv(dev);
6748 if (bp->flags & BNX2_FLAG_NO_WOL) {
6753 wol->supported = WAKE_MAGIC;
6755 wol->wolopts = WAKE_MAGIC;
6759 memset(&wol->sopass, 0, sizeof(wol->sopass));
6763 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6765 struct bnx2 *bp = netdev_priv(dev);
6767 if (wol->wolopts & ~WAKE_MAGIC)
6770 if (wol->wolopts & WAKE_MAGIC) {
6771 if (bp->flags & BNX2_FLAG_NO_WOL)
6783 bnx2_nway_reset(struct net_device *dev)
6785 struct bnx2 *bp = netdev_priv(dev);
6788 if (!netif_running(dev))
6791 if (!(bp->autoneg & AUTONEG_SPEED)) {
6795 spin_lock_bh(&bp->phy_lock);
6797 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6800 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6801 spin_unlock_bh(&bp->phy_lock);
6805 /* Force a link down visible on the other side */
6806 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6807 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6808 spin_unlock_bh(&bp->phy_lock);
6812 spin_lock_bh(&bp->phy_lock);
6814 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6815 bp->serdes_an_pending = 1;
6816 mod_timer(&bp->timer, jiffies + bp->current_interval);
6819 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6820 bmcr &= ~BMCR_LOOPBACK;
6821 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6823 spin_unlock_bh(&bp->phy_lock);
6829 bnx2_get_eeprom_len(struct net_device *dev)
6831 struct bnx2 *bp = netdev_priv(dev);
6833 if (bp->flash_info == NULL)
6836 return (int) bp->flash_size;
6840 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6843 struct bnx2 *bp = netdev_priv(dev);
6846 if (!netif_running(dev))
6849 /* parameters already validated in ethtool_get_eeprom */
6851 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6857 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6860 struct bnx2 *bp = netdev_priv(dev);
6863 if (!netif_running(dev))
6866 /* parameters already validated in ethtool_set_eeprom */
6868 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6874 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6876 struct bnx2 *bp = netdev_priv(dev);
6878 memset(coal, 0, sizeof(struct ethtool_coalesce));
6880 coal->rx_coalesce_usecs = bp->rx_ticks;
6881 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6882 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6883 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6885 coal->tx_coalesce_usecs = bp->tx_ticks;
6886 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6887 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6888 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6890 coal->stats_block_coalesce_usecs = bp->stats_ticks;
6896 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6898 struct bnx2 *bp = netdev_priv(dev);
6900 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6901 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6903 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6904 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6906 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6907 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6909 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6910 if (bp->rx_quick_cons_trip_int > 0xff)
6911 bp->rx_quick_cons_trip_int = 0xff;
6913 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6914 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6916 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6917 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6919 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6920 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6922 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6923 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6926 bp->stats_ticks = coal->stats_block_coalesce_usecs;
6927 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6928 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6929 bp->stats_ticks = USEC_PER_SEC;
6931 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6932 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6933 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6935 if (netif_running(bp->dev)) {
6936 bnx2_netif_stop(bp);
6937 bnx2_init_nic(bp, 0);
6938 bnx2_netif_start(bp);
6945 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6947 struct bnx2 *bp = netdev_priv(dev);
6949 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6950 ering->rx_mini_max_pending = 0;
6951 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6953 ering->rx_pending = bp->rx_ring_size;
6954 ering->rx_mini_pending = 0;
6955 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6957 ering->tx_max_pending = MAX_TX_DESC_CNT;
6958 ering->tx_pending = bp->tx_ring_size;
6962 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6964 if (netif_running(bp->dev)) {
6965 bnx2_netif_stop(bp);
6966 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6971 bnx2_set_rx_ring_size(bp, rx);
6972 bp->tx_ring_size = tx;
6974 if (netif_running(bp->dev)) {
6977 rc = bnx2_alloc_mem(bp);
6980 bnx2_init_nic(bp, 0);
6981 bnx2_netif_start(bp);
6987 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6989 struct bnx2 *bp = netdev_priv(dev);
6992 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6993 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6994 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6998 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
7003 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7005 struct bnx2 *bp = netdev_priv(dev);
7007 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7008 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7009 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7013 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7015 struct bnx2 *bp = netdev_priv(dev);
7017 bp->req_flow_ctrl = 0;
7018 if (epause->rx_pause)
7019 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7020 if (epause->tx_pause)
7021 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7023 if (epause->autoneg) {
7024 bp->autoneg |= AUTONEG_FLOW_CTRL;
7027 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7030 if (netif_running(dev)) {
7031 spin_lock_bh(&bp->phy_lock);
7032 bnx2_setup_phy(bp, bp->phy_port);
7033 spin_unlock_bh(&bp->phy_lock);
7040 bnx2_get_rx_csum(struct net_device *dev)
7042 struct bnx2 *bp = netdev_priv(dev);
7048 bnx2_set_rx_csum(struct net_device *dev, u32 data)
7050 struct bnx2 *bp = netdev_priv(dev);
7057 bnx2_set_tso(struct net_device *dev, u32 data)
7059 struct bnx2 *bp = netdev_priv(dev);
7062 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7063 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7064 dev->features |= NETIF_F_TSO6;
7066 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
7071 #define BNX2_NUM_STATS 46
7074 char string[ETH_GSTRING_LEN];
7075 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
7077 { "rx_error_bytes" },
7079 { "tx_error_bytes" },
7080 { "rx_ucast_packets" },
7081 { "rx_mcast_packets" },
7082 { "rx_bcast_packets" },
7083 { "tx_ucast_packets" },
7084 { "tx_mcast_packets" },
7085 { "tx_bcast_packets" },
7086 { "tx_mac_errors" },
7087 { "tx_carrier_errors" },
7088 { "rx_crc_errors" },
7089 { "rx_align_errors" },
7090 { "tx_single_collisions" },
7091 { "tx_multi_collisions" },
7093 { "tx_excess_collisions" },
7094 { "tx_late_collisions" },
7095 { "tx_total_collisions" },
7098 { "rx_undersize_packets" },
7099 { "rx_oversize_packets" },
7100 { "rx_64_byte_packets" },
7101 { "rx_65_to_127_byte_packets" },
7102 { "rx_128_to_255_byte_packets" },
7103 { "rx_256_to_511_byte_packets" },
7104 { "rx_512_to_1023_byte_packets" },
7105 { "rx_1024_to_1522_byte_packets" },
7106 { "rx_1523_to_9022_byte_packets" },
7107 { "tx_64_byte_packets" },
7108 { "tx_65_to_127_byte_packets" },
7109 { "tx_128_to_255_byte_packets" },
7110 { "tx_256_to_511_byte_packets" },
7111 { "tx_512_to_1023_byte_packets" },
7112 { "tx_1024_to_1522_byte_packets" },
7113 { "tx_1523_to_9022_byte_packets" },
7114 { "rx_xon_frames" },
7115 { "rx_xoff_frames" },
7116 { "tx_xon_frames" },
7117 { "tx_xoff_frames" },
7118 { "rx_mac_ctrl_frames" },
7119 { "rx_filtered_packets" },
7121 { "rx_fw_discards" },
7124 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7126 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7127 STATS_OFFSET32(stat_IfHCInOctets_hi),
7128 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7129 STATS_OFFSET32(stat_IfHCOutOctets_hi),
7130 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7131 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7132 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7133 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7134 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7135 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7136 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7137 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7138 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7139 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7140 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7141 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7142 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7143 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7144 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7145 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7146 STATS_OFFSET32(stat_EtherStatsCollisions),
7147 STATS_OFFSET32(stat_EtherStatsFragments),
7148 STATS_OFFSET32(stat_EtherStatsJabbers),
7149 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7150 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7151 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7152 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7153 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7154 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7155 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7156 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7157 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7158 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7159 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7160 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7161 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7162 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7163 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7164 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7165 STATS_OFFSET32(stat_XonPauseFramesReceived),
7166 STATS_OFFSET32(stat_XoffPauseFramesReceived),
7167 STATS_OFFSET32(stat_OutXonSent),
7168 STATS_OFFSET32(stat_OutXoffSent),
7169 STATS_OFFSET32(stat_MacControlFramesReceived),
7170 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7171 STATS_OFFSET32(stat_IfInMBUFDiscards),
7172 STATS_OFFSET32(stat_FwRxDrop),
7175 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7176 * skipped because of errata.
7178 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7179 8,0,8,8,8,8,8,8,8,8,
7180 4,0,4,4,4,4,4,4,4,4,
7181 4,4,4,4,4,4,4,4,4,4,
7182 4,4,4,4,4,4,4,4,4,4,
7186 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7187 8,0,8,8,8,8,8,8,8,8,
7188 4,4,4,4,4,4,4,4,4,4,
7189 4,4,4,4,4,4,4,4,4,4,
7190 4,4,4,4,4,4,4,4,4,4,
7194 #define BNX2_NUM_TESTS 6
7197 char string[ETH_GSTRING_LEN];
7198 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7199 { "register_test (offline)" },
7200 { "memory_test (offline)" },
7201 { "loopback_test (offline)" },
7202 { "nvram_test (online)" },
7203 { "interrupt_test (online)" },
7204 { "link_test (online)" },
7208 bnx2_get_sset_count(struct net_device *dev, int sset)
7212 return BNX2_NUM_TESTS;
7214 return BNX2_NUM_STATS;
7221 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7223 struct bnx2 *bp = netdev_priv(dev);
7225 bnx2_set_power_state(bp, PCI_D0);
7227 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7228 if (etest->flags & ETH_TEST_FL_OFFLINE) {
7231 bnx2_netif_stop(bp);
7232 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7235 if (bnx2_test_registers(bp) != 0) {
7237 etest->flags |= ETH_TEST_FL_FAILED;
7239 if (bnx2_test_memory(bp) != 0) {
7241 etest->flags |= ETH_TEST_FL_FAILED;
7243 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7244 etest->flags |= ETH_TEST_FL_FAILED;
7246 if (!netif_running(bp->dev))
7247 bnx2_shutdown_chip(bp);
7249 bnx2_init_nic(bp, 1);
7250 bnx2_netif_start(bp);
7253 /* wait for link up */
7254 for (i = 0; i < 7; i++) {
7257 msleep_interruptible(1000);
7261 if (bnx2_test_nvram(bp) != 0) {
7263 etest->flags |= ETH_TEST_FL_FAILED;
7265 if (bnx2_test_intr(bp) != 0) {
7267 etest->flags |= ETH_TEST_FL_FAILED;
7270 if (bnx2_test_link(bp) != 0) {
7272 etest->flags |= ETH_TEST_FL_FAILED;
7275 if (!netif_running(bp->dev))
7276 bnx2_set_power_state(bp, PCI_D3hot);
7280 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7282 switch (stringset) {
7284 memcpy(buf, bnx2_stats_str_arr,
7285 sizeof(bnx2_stats_str_arr));
7288 memcpy(buf, bnx2_tests_str_arr,
7289 sizeof(bnx2_tests_str_arr));
7295 bnx2_get_ethtool_stats(struct net_device *dev,
7296 struct ethtool_stats *stats, u64 *buf)
7298 struct bnx2 *bp = netdev_priv(dev);
7300 u32 *hw_stats = (u32 *) bp->stats_blk;
7301 u8 *stats_len_arr = NULL;
7303 if (hw_stats == NULL) {
7304 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7308 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7309 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7310 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7311 (CHIP_ID(bp) == CHIP_ID_5708_A0))
7312 stats_len_arr = bnx2_5706_stats_len_arr;
7314 stats_len_arr = bnx2_5708_stats_len_arr;
7316 for (i = 0; i < BNX2_NUM_STATS; i++) {
7317 if (stats_len_arr[i] == 0) {
7318 /* skip this counter */
7322 if (stats_len_arr[i] == 4) {
7323 /* 4-byte counter */
7325 *(hw_stats + bnx2_stats_offset_arr[i]);
7328 /* 8-byte counter */
7329 buf[i] = (((u64) *(hw_stats +
7330 bnx2_stats_offset_arr[i])) << 32) +
7331 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
7336 bnx2_phys_id(struct net_device *dev, u32 data)
7338 struct bnx2 *bp = netdev_priv(dev);
7342 bnx2_set_power_state(bp, PCI_D0);
7347 save = REG_RD(bp, BNX2_MISC_CFG);
7348 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7350 for (i = 0; i < (data * 2); i++) {
7352 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7355 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7356 BNX2_EMAC_LED_1000MB_OVERRIDE |
7357 BNX2_EMAC_LED_100MB_OVERRIDE |
7358 BNX2_EMAC_LED_10MB_OVERRIDE |
7359 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7360 BNX2_EMAC_LED_TRAFFIC);
7362 msleep_interruptible(500);
7363 if (signal_pending(current))
7366 REG_WR(bp, BNX2_EMAC_LED, 0);
7367 REG_WR(bp, BNX2_MISC_CFG, save);
7369 if (!netif_running(dev))
7370 bnx2_set_power_state(bp, PCI_D3hot);
7376 bnx2_set_tx_csum(struct net_device *dev, u32 data)
7378 struct bnx2 *bp = netdev_priv(dev);
7380 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7381 return (ethtool_op_set_tx_ipv6_csum(dev, data));
7383 return (ethtool_op_set_tx_csum(dev, data));
7386 static const struct ethtool_ops bnx2_ethtool_ops = {
7387 .get_settings = bnx2_get_settings,
7388 .set_settings = bnx2_set_settings,
7389 .get_drvinfo = bnx2_get_drvinfo,
7390 .get_regs_len = bnx2_get_regs_len,
7391 .get_regs = bnx2_get_regs,
7392 .get_wol = bnx2_get_wol,
7393 .set_wol = bnx2_set_wol,
7394 .nway_reset = bnx2_nway_reset,
7395 .get_link = ethtool_op_get_link,
7396 .get_eeprom_len = bnx2_get_eeprom_len,
7397 .get_eeprom = bnx2_get_eeprom,
7398 .set_eeprom = bnx2_set_eeprom,
7399 .get_coalesce = bnx2_get_coalesce,
7400 .set_coalesce = bnx2_set_coalesce,
7401 .get_ringparam = bnx2_get_ringparam,
7402 .set_ringparam = bnx2_set_ringparam,
7403 .get_pauseparam = bnx2_get_pauseparam,
7404 .set_pauseparam = bnx2_set_pauseparam,
7405 .get_rx_csum = bnx2_get_rx_csum,
7406 .set_rx_csum = bnx2_set_rx_csum,
7407 .set_tx_csum = bnx2_set_tx_csum,
7408 .set_sg = ethtool_op_set_sg,
7409 .set_tso = bnx2_set_tso,
7410 .self_test = bnx2_self_test,
7411 .get_strings = bnx2_get_strings,
7412 .phys_id = bnx2_phys_id,
7413 .get_ethtool_stats = bnx2_get_ethtool_stats,
7414 .get_sset_count = bnx2_get_sset_count,
7417 /* Called with rtnl_lock */
7419 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7421 struct mii_ioctl_data *data = if_mii(ifr);
7422 struct bnx2 *bp = netdev_priv(dev);
7427 data->phy_id = bp->phy_addr;
7433 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7436 if (!netif_running(dev))
7439 spin_lock_bh(&bp->phy_lock);
7440 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7441 spin_unlock_bh(&bp->phy_lock);
7443 data->val_out = mii_regval;
7449 if (!capable(CAP_NET_ADMIN))
7452 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7455 if (!netif_running(dev))
7458 spin_lock_bh(&bp->phy_lock);
7459 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7460 spin_unlock_bh(&bp->phy_lock);
7471 /* Called with rtnl_lock */
7473 bnx2_change_mac_addr(struct net_device *dev, void *p)
7475 struct sockaddr *addr = p;
7476 struct bnx2 *bp = netdev_priv(dev);
7478 if (!is_valid_ether_addr(addr->sa_data))
7481 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7482 if (netif_running(dev))
7483 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7488 /* Called with rtnl_lock */
7490 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7492 struct bnx2 *bp = netdev_priv(dev);
7494 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7495 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7499 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7502 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7504 poll_bnx2(struct net_device *dev)
7506 struct bnx2 *bp = netdev_priv(dev);
7509 for (i = 0; i < bp->irq_nvecs; i++) {
7510 disable_irq(bp->irq_tbl[i].vector);
7511 bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]);
7512 enable_irq(bp->irq_tbl[i].vector);
7517 static void __devinit
7518 bnx2_get_5709_media(struct bnx2 *bp)
7520 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7521 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7524 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7526 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7527 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7531 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7532 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7534 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7536 if (PCI_FUNC(bp->pdev->devfn) == 0) {
7541 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7549 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7555 static void __devinit
7556 bnx2_get_pci_speed(struct bnx2 *bp)
7560 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7561 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7564 bp->flags |= BNX2_FLAG_PCIX;
7566 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7568 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7570 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7571 bp->bus_speed_mhz = 133;
7574 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7575 bp->bus_speed_mhz = 100;
7578 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7579 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7580 bp->bus_speed_mhz = 66;
7583 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7584 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7585 bp->bus_speed_mhz = 50;
7588 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7589 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7590 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7591 bp->bus_speed_mhz = 33;
7596 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7597 bp->bus_speed_mhz = 66;
7599 bp->bus_speed_mhz = 33;
7602 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7603 bp->flags |= BNX2_FLAG_PCI_32BIT;
7607 static int __devinit
7608 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7611 unsigned long mem_len;
7614 u64 dma_mask, persist_dma_mask;
7616 SET_NETDEV_DEV(dev, &pdev->dev);
7617 bp = netdev_priv(dev);
7622 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7623 rc = pci_enable_device(pdev);
7625 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7629 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7631 "Cannot find PCI device base address, aborting.\n");
7633 goto err_out_disable;
7636 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7638 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7639 goto err_out_disable;
7642 pci_set_master(pdev);
7643 pci_save_state(pdev);
7645 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7646 if (bp->pm_cap == 0) {
7648 "Cannot find power management capability, aborting.\n");
7650 goto err_out_release;
7656 spin_lock_init(&bp->phy_lock);
7657 spin_lock_init(&bp->indirect_lock);
7658 INIT_WORK(&bp->reset_task, bnx2_reset_task);
7660 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7661 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
7662 dev->mem_end = dev->mem_start + mem_len;
7663 dev->irq = pdev->irq;
7665 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7668 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7670 goto err_out_release;
7673 /* Configure byte swap and enable write to the reg_window registers.
7674 * Rely on CPU to do target byte swapping on big endian systems
7675 * The chip's target access swapping will not swap all accesses
7677 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7678 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7679 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7681 bnx2_set_power_state(bp, PCI_D0);
7683 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7685 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7686 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7688 "Cannot find PCIE capability, aborting.\n");
7692 bp->flags |= BNX2_FLAG_PCIE;
7693 if (CHIP_REV(bp) == CHIP_REV_Ax)
7694 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7696 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7697 if (bp->pcix_cap == 0) {
7699 "Cannot find PCIX capability, aborting.\n");
7705 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7706 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7707 bp->flags |= BNX2_FLAG_MSIX_CAP;
7710 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7711 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7712 bp->flags |= BNX2_FLAG_MSI_CAP;
7715 /* 5708 cannot support DMA addresses > 40-bit. */
7716 if (CHIP_NUM(bp) == CHIP_NUM_5708)
7717 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
7719 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
7721 /* Configure DMA attributes. */
7722 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7723 dev->features |= NETIF_F_HIGHDMA;
7724 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7727 "pci_set_consistent_dma_mask failed, aborting.\n");
7730 } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
7731 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7735 if (!(bp->flags & BNX2_FLAG_PCIE))
7736 bnx2_get_pci_speed(bp);
7738 /* 5706A0 may falsely detect SERR and PERR. */
7739 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7740 reg = REG_RD(bp, PCI_COMMAND);
7741 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7742 REG_WR(bp, PCI_COMMAND, reg);
7744 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7745 !(bp->flags & BNX2_FLAG_PCIX)) {
7748 "5706 A1 can only be used in a PCIX bus, aborting.\n");
7752 bnx2_init_nvram(bp);
7754 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7756 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7757 BNX2_SHM_HDR_SIGNATURE_SIG) {
7758 u32 off = PCI_FUNC(pdev->devfn) << 2;
7760 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7762 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7764 /* Get the permanent MAC address. First we need to make sure the
7765 * firmware is actually running.
7767 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7769 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7770 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7771 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7776 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7777 for (i = 0, j = 0; i < 3; i++) {
7780 num = (u8) (reg >> (24 - (i * 8)));
7781 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7782 if (num >= k || !skip0 || k == 1) {
7783 bp->fw_version[j++] = (num / k) + '0';
7788 bp->fw_version[j++] = '.';
7790 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
7791 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7794 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7795 bp->flags |= BNX2_FLAG_ASF_ENABLE;
7797 for (i = 0; i < 30; i++) {
7798 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7799 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7804 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7805 reg &= BNX2_CONDITION_MFW_RUN_MASK;
7806 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7807 reg != BNX2_CONDITION_MFW_RUN_NONE) {
7808 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7810 bp->fw_version[j++] = ' ';
7811 for (i = 0; i < 3; i++) {
7812 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
7814 memcpy(&bp->fw_version[j], ®, 4);
7819 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
7820 bp->mac_addr[0] = (u8) (reg >> 8);
7821 bp->mac_addr[1] = (u8) reg;
7823 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
7824 bp->mac_addr[2] = (u8) (reg >> 24);
7825 bp->mac_addr[3] = (u8) (reg >> 16);
7826 bp->mac_addr[4] = (u8) (reg >> 8);
7827 bp->mac_addr[5] = (u8) reg;
7829 bp->tx_ring_size = MAX_TX_DESC_CNT;
7830 bnx2_set_rx_ring_size(bp, 255);
7834 bp->tx_quick_cons_trip_int = 20;
7835 bp->tx_quick_cons_trip = 20;
7836 bp->tx_ticks_int = 80;
7839 bp->rx_quick_cons_trip_int = 6;
7840 bp->rx_quick_cons_trip = 6;
7841 bp->rx_ticks_int = 18;
7844 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7846 bp->current_interval = BNX2_TIMER_INTERVAL;
7850 /* Disable WOL support if we are running on a SERDES chip. */
7851 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7852 bnx2_get_5709_media(bp);
7853 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7854 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7856 bp->phy_port = PORT_TP;
7857 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7858 bp->phy_port = PORT_FIBRE;
7859 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
7860 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7861 bp->flags |= BNX2_FLAG_NO_WOL;
7864 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7865 /* Don't do parallel detect on this board because of
7866 * some board problems. The link will not go down
7867 * if we do parallel detect.
7869 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7870 pdev->subsystem_device == 0x310c)
7871 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7874 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7875 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7877 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7878 CHIP_NUM(bp) == CHIP_NUM_5708)
7879 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
7880 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7881 (CHIP_REV(bp) == CHIP_REV_Ax ||
7882 CHIP_REV(bp) == CHIP_REV_Bx))
7883 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7885 bnx2_init_fw_cap(bp);
7887 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7888 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7889 (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
7890 !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
7891 bp->flags |= BNX2_FLAG_NO_WOL;
7895 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7896 bp->tx_quick_cons_trip_int =
7897 bp->tx_quick_cons_trip;
7898 bp->tx_ticks_int = bp->tx_ticks;
7899 bp->rx_quick_cons_trip_int =
7900 bp->rx_quick_cons_trip;
7901 bp->rx_ticks_int = bp->rx_ticks;
7902 bp->comp_prod_trip_int = bp->comp_prod_trip;
7903 bp->com_ticks_int = bp->com_ticks;
7904 bp->cmd_ticks_int = bp->cmd_ticks;
7907 /* Disable MSI on 5706 if AMD 8132 bridge is found.
7909 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
7910 * with byte enables disabled on the unused 32-bit word. This is legal
7911 * but causes problems on the AMD 8132 which will eventually stop
7912 * responding after a while.
7914 * AMD believes this incompatibility is unique to the 5706, and
7915 * prefers to locally disable MSI rather than globally disabling it.
7917 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7918 struct pci_dev *amd_8132 = NULL;
7920 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7921 PCI_DEVICE_ID_AMD_8132_BRIDGE,
7924 if (amd_8132->revision >= 0x10 &&
7925 amd_8132->revision <= 0x13) {
7927 pci_dev_put(amd_8132);
7933 bnx2_set_default_link(bp);
7934 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7936 init_timer(&bp->timer);
7937 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
7938 bp->timer.data = (unsigned long) bp;
7939 bp->timer.function = bnx2_timer;
7945 iounmap(bp->regview);
7950 pci_release_regions(pdev);
7953 pci_disable_device(pdev);
7954 pci_set_drvdata(pdev, NULL);
7960 static char * __devinit
7961 bnx2_bus_string(struct bnx2 *bp, char *str)
7965 if (bp->flags & BNX2_FLAG_PCIE) {
7966 s += sprintf(s, "PCI Express");
7968 s += sprintf(s, "PCI");
7969 if (bp->flags & BNX2_FLAG_PCIX)
7970 s += sprintf(s, "-X");
7971 if (bp->flags & BNX2_FLAG_PCI_32BIT)
7972 s += sprintf(s, " 32-bit");
7974 s += sprintf(s, " 64-bit");
7975 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7980 static void __devinit
7981 bnx2_init_napi(struct bnx2 *bp)
7985 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7986 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
7987 int (*poll)(struct napi_struct *, int);
7992 poll = bnx2_poll_msix;
7994 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
7999 static const struct net_device_ops bnx2_netdev_ops = {
8000 .ndo_open = bnx2_open,
8001 .ndo_start_xmit = bnx2_start_xmit,
8002 .ndo_stop = bnx2_close,
8003 .ndo_get_stats = bnx2_get_stats,
8004 .ndo_set_rx_mode = bnx2_set_rx_mode,
8005 .ndo_do_ioctl = bnx2_ioctl,
8006 .ndo_validate_addr = eth_validate_addr,
8007 .ndo_set_mac_address = bnx2_change_mac_addr,
8008 .ndo_change_mtu = bnx2_change_mtu,
8009 .ndo_tx_timeout = bnx2_tx_timeout,
8011 .ndo_vlan_rx_register = bnx2_vlan_rx_register,
8013 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
8014 .ndo_poll_controller = poll_bnx2,
8018 static int __devinit
8019 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8021 static int version_printed = 0;
8022 struct net_device *dev = NULL;
8027 if (version_printed++ == 0)
8028 printk(KERN_INFO "%s", version);
8030 /* dev zeroed in init_etherdev */
8031 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8036 rc = bnx2_init_board(pdev, dev);
8042 dev->netdev_ops = &bnx2_netdev_ops;
8043 dev->watchdog_timeo = TX_TIMEOUT;
8044 dev->ethtool_ops = &bnx2_ethtool_ops;
8046 bp = netdev_priv(dev);
8049 pci_set_drvdata(pdev, dev);
8051 rc = bnx2_request_firmware(bp);
8055 memcpy(dev->dev_addr, bp->mac_addr, 6);
8056 memcpy(dev->perm_addr, bp->mac_addr, 6);
8058 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
8059 if (CHIP_NUM(bp) == CHIP_NUM_5709)
8060 dev->features |= NETIF_F_IPV6_CSUM;
8063 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8065 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
8066 if (CHIP_NUM(bp) == CHIP_NUM_5709)
8067 dev->features |= NETIF_F_TSO6;
8069 if ((rc = register_netdev(dev))) {
8070 dev_err(&pdev->dev, "Cannot register net device\n");
8074 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
8075 "IRQ %d, node addr %pM\n",
8077 board_info[ent->driver_data].name,
8078 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8079 ((CHIP_ID(bp) & 0x0ff0) >> 4),
8080 bnx2_bus_string(bp, str),
8082 bp->pdev->irq, dev->dev_addr);
8087 if (bp->mips_firmware)
8088 release_firmware(bp->mips_firmware);
8089 if (bp->rv2p_firmware)
8090 release_firmware(bp->rv2p_firmware);
8093 iounmap(bp->regview);
8094 pci_release_regions(pdev);
8095 pci_disable_device(pdev);
8096 pci_set_drvdata(pdev, NULL);
8101 static void __devexit
8102 bnx2_remove_one(struct pci_dev *pdev)
8104 struct net_device *dev = pci_get_drvdata(pdev);
8105 struct bnx2 *bp = netdev_priv(dev);
8107 flush_scheduled_work();
8109 unregister_netdev(dev);
8111 if (bp->mips_firmware)
8112 release_firmware(bp->mips_firmware);
8113 if (bp->rv2p_firmware)
8114 release_firmware(bp->rv2p_firmware);
8117 iounmap(bp->regview);
8120 pci_release_regions(pdev);
8121 pci_disable_device(pdev);
8122 pci_set_drvdata(pdev, NULL);
8126 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8128 struct net_device *dev = pci_get_drvdata(pdev);
8129 struct bnx2 *bp = netdev_priv(dev);
8131 /* PCI register 4 needs to be saved whether netif_running() or not.
8132 * MSI address and data need to be saved if using MSI and
8135 pci_save_state(pdev);
8136 if (!netif_running(dev))
8139 flush_scheduled_work();
8140 bnx2_netif_stop(bp);
8141 netif_device_detach(dev);
8142 del_timer_sync(&bp->timer);
8143 bnx2_shutdown_chip(bp);
8145 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8150 bnx2_resume(struct pci_dev *pdev)
8152 struct net_device *dev = pci_get_drvdata(pdev);
8153 struct bnx2 *bp = netdev_priv(dev);
8155 pci_restore_state(pdev);
8156 if (!netif_running(dev))
8159 bnx2_set_power_state(bp, PCI_D0);
8160 netif_device_attach(dev);
8161 bnx2_init_nic(bp, 1);
8162 bnx2_netif_start(bp);
8167 * bnx2_io_error_detected - called when PCI error is detected
8168 * @pdev: Pointer to PCI device
8169 * @state: The current pci connection state
8171 * This function is called after a PCI bus error affecting
8172 * this device has been detected.
8174 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8175 pci_channel_state_t state)
8177 struct net_device *dev = pci_get_drvdata(pdev);
8178 struct bnx2 *bp = netdev_priv(dev);
8181 netif_device_detach(dev);
8183 if (netif_running(dev)) {
8184 bnx2_netif_stop(bp);
8185 del_timer_sync(&bp->timer);
8186 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8189 pci_disable_device(pdev);
8192 /* Request a slot slot reset. */
8193 return PCI_ERS_RESULT_NEED_RESET;
8197 * bnx2_io_slot_reset - called after the pci bus has been reset.
8198 * @pdev: Pointer to PCI device
8200 * Restart the card from scratch, as if from a cold-boot.
8202 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8204 struct net_device *dev = pci_get_drvdata(pdev);
8205 struct bnx2 *bp = netdev_priv(dev);
8208 if (pci_enable_device(pdev)) {
8210 "Cannot re-enable PCI device after reset.\n");
8212 return PCI_ERS_RESULT_DISCONNECT;
8214 pci_set_master(pdev);
8215 pci_restore_state(pdev);
8217 if (netif_running(dev)) {
8218 bnx2_set_power_state(bp, PCI_D0);
8219 bnx2_init_nic(bp, 1);
8223 return PCI_ERS_RESULT_RECOVERED;
8227 * bnx2_io_resume - called when traffic can start flowing again.
8228 * @pdev: Pointer to PCI device
8230 * This callback is called when the error recovery driver tells us that
8231 * its OK to resume normal operation.
8233 static void bnx2_io_resume(struct pci_dev *pdev)
8235 struct net_device *dev = pci_get_drvdata(pdev);
8236 struct bnx2 *bp = netdev_priv(dev);
8239 if (netif_running(dev))
8240 bnx2_netif_start(bp);
8242 netif_device_attach(dev);
8246 static struct pci_error_handlers bnx2_err_handler = {
8247 .error_detected = bnx2_io_error_detected,
8248 .slot_reset = bnx2_io_slot_reset,
8249 .resume = bnx2_io_resume,
8252 static struct pci_driver bnx2_pci_driver = {
8253 .name = DRV_MODULE_NAME,
8254 .id_table = bnx2_pci_tbl,
8255 .probe = bnx2_init_one,
8256 .remove = __devexit_p(bnx2_remove_one),
8257 .suspend = bnx2_suspend,
8258 .resume = bnx2_resume,
8259 .err_handler = &bnx2_err_handler,
8262 static int __init bnx2_init(void)
8264 return pci_register_driver(&bnx2_pci_driver);
8267 static void __exit bnx2_cleanup(void)
8269 pci_unregister_driver(&bnx2_pci_driver);
8272 module_init(bnx2_init);
8273 module_exit(bnx2_cleanup);