e1000e: Call e1000e_config_collision_dist() after TCTL has been set
[linux-2.6] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/if_vlan.h>
39 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/list.h>
52
53 #include "bnx2.h"
54 #include "bnx2_fw.h"
55
56 #define DRV_MODULE_NAME         "bnx2"
57 #define PFX DRV_MODULE_NAME     ": "
58 #define DRV_MODULE_VERSION      "2.0.1"
59 #define DRV_MODULE_RELDATE      "May 6, 2009"
60 #define FW_MIPS_FILE_06         "bnx2/bnx2-mips-06-4.6.16.fw"
61 #define FW_RV2P_FILE_06         "bnx2/bnx2-rv2p-06-4.6.16.fw"
62 #define FW_MIPS_FILE_09         "bnx2/bnx2-mips-09-4.6.17.fw"
63 #define FW_RV2P_FILE_09         "bnx2/bnx2-rv2p-09-4.6.15.fw"
64
65 #define RUN_AT(x) (jiffies + (x))
66
67 /* Time in jiffies before concluding the transmitter is hung. */
68 #define TX_TIMEOUT  (5*HZ)
69
70 static char version[] __devinitdata =
71         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
72
73 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
74 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
75 MODULE_LICENSE("GPL");
76 MODULE_VERSION(DRV_MODULE_VERSION);
77 MODULE_FIRMWARE(FW_MIPS_FILE_06);
78 MODULE_FIRMWARE(FW_RV2P_FILE_06);
79 MODULE_FIRMWARE(FW_MIPS_FILE_09);
80 MODULE_FIRMWARE(FW_RV2P_FILE_09);
81
82 static int disable_msi = 0;
83
84 module_param(disable_msi, int, 0);
85 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
86
87 typedef enum {
88         BCM5706 = 0,
89         NC370T,
90         NC370I,
91         BCM5706S,
92         NC370F,
93         BCM5708,
94         BCM5708S,
95         BCM5709,
96         BCM5709S,
97         BCM5716,
98         BCM5716S,
99 } board_t;
100
101 /* indexed by board_t, above */
102 static struct {
103         char *name;
104 } board_info[] __devinitdata = {
105         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
106         { "HP NC370T Multifunction Gigabit Server Adapter" },
107         { "HP NC370i Multifunction Gigabit Server Adapter" },
108         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
109         { "HP NC370F Multifunction Gigabit Server Adapter" },
110         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
111         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
112         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
113         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
114         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
115         { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
116         };
117
118 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
119         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
120           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
121         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
122           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
123         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
124           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
125         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
126           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
127         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
128           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
129         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
130           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
131         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
132           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
133         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
134           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
135         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
136           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
137         { PCI_VENDOR_ID_BROADCOM, 0x163b,
138           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
139         { PCI_VENDOR_ID_BROADCOM, 0x163c,
140           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
141         { 0, }
142 };
143
144 static struct flash_spec flash_table[] =
145 {
146 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
147 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
148         /* Slow EEPROM */
149         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
150          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
151          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
152          "EEPROM - slow"},
153         /* Expansion entry 0001 */
154         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
155          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
156          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
157          "Entry 0001"},
158         /* Saifun SA25F010 (non-buffered flash) */
159         /* strap, cfg1, & write1 need updates */
160         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
161          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
162          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
163          "Non-buffered flash (128kB)"},
164         /* Saifun SA25F020 (non-buffered flash) */
165         /* strap, cfg1, & write1 need updates */
166         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
167          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
168          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
169          "Non-buffered flash (256kB)"},
170         /* Expansion entry 0100 */
171         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
172          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
174          "Entry 0100"},
175         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
176         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
177          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
178          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
179          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
180         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
181         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
182          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
184          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
185         /* Saifun SA25F005 (non-buffered flash) */
186         /* strap, cfg1, & write1 need updates */
187         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
188          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
190          "Non-buffered flash (64kB)"},
191         /* Fast EEPROM */
192         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
193          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
194          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
195          "EEPROM - fast"},
196         /* Expansion entry 1001 */
197         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
198          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200          "Entry 1001"},
201         /* Expansion entry 1010 */
202         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1010"},
206         /* ATMEL AT45DB011B (buffered flash) */
207         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
208          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
210          "Buffered flash (128kB)"},
211         /* Expansion entry 1100 */
212         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
213          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
214          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
215          "Entry 1100"},
216         /* Expansion entry 1101 */
217         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
218          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220          "Entry 1101"},
221         /* Ateml Expansion entry 1110 */
222         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
223          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
224          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
225          "Entry 1110 (Atmel)"},
226         /* ATMEL AT45DB021B (buffered flash) */
227         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
228          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
230          "Buffered flash (256kB)"},
231 };
232
233 static struct flash_spec flash_5709 = {
234         .flags          = BNX2_NV_BUFFERED,
235         .page_bits      = BCM5709_FLASH_PAGE_BITS,
236         .page_size      = BCM5709_FLASH_PAGE_SIZE,
237         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
238         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
239         .name           = "5709 Buffered flash (256kB)",
240 };
241
242 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
243
244 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
245 {
246         u32 diff;
247
248         smp_mb();
249
250         /* The ring uses 256 indices for 255 entries, one of them
251          * needs to be skipped.
252          */
253         diff = txr->tx_prod - txr->tx_cons;
254         if (unlikely(diff >= TX_DESC_CNT)) {
255                 diff &= 0xffff;
256                 if (diff == TX_DESC_CNT)
257                         diff = MAX_TX_DESC_CNT;
258         }
259         return (bp->tx_ring_size - diff);
260 }
261
262 static u32
263 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
264 {
265         u32 val;
266
267         spin_lock_bh(&bp->indirect_lock);
268         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
269         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
270         spin_unlock_bh(&bp->indirect_lock);
271         return val;
272 }
273
274 static void
275 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
276 {
277         spin_lock_bh(&bp->indirect_lock);
278         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
279         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
280         spin_unlock_bh(&bp->indirect_lock);
281 }
282
283 static void
284 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
285 {
286         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
287 }
288
289 static u32
290 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
291 {
292         return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
293 }
294
295 static void
296 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
297 {
298         offset += cid_addr;
299         spin_lock_bh(&bp->indirect_lock);
300         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
301                 int i;
302
303                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
304                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
305                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
306                 for (i = 0; i < 5; i++) {
307                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
308                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
309                                 break;
310                         udelay(5);
311                 }
312         } else {
313                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
314                 REG_WR(bp, BNX2_CTX_DATA, val);
315         }
316         spin_unlock_bh(&bp->indirect_lock);
317 }
318
319 static int
320 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
321 {
322         u32 val1;
323         int i, ret;
324
325         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
326                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
327                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
328
329                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
330                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
331
332                 udelay(40);
333         }
334
335         val1 = (bp->phy_addr << 21) | (reg << 16) |
336                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
337                 BNX2_EMAC_MDIO_COMM_START_BUSY;
338         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
339
340         for (i = 0; i < 50; i++) {
341                 udelay(10);
342
343                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
344                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
345                         udelay(5);
346
347                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
348                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
349
350                         break;
351                 }
352         }
353
354         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
355                 *val = 0x0;
356                 ret = -EBUSY;
357         }
358         else {
359                 *val = val1;
360                 ret = 0;
361         }
362
363         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
364                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
365                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
366
367                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
368                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
369
370                 udelay(40);
371         }
372
373         return ret;
374 }
375
376 static int
377 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
378 {
379         u32 val1;
380         int i, ret;
381
382         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
383                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
384                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
385
386                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
387                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388
389                 udelay(40);
390         }
391
392         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
393                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
394                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
395         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
396
397         for (i = 0; i < 50; i++) {
398                 udelay(10);
399
400                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
401                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
402                         udelay(5);
403                         break;
404                 }
405         }
406
407         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
408                 ret = -EBUSY;
409         else
410                 ret = 0;
411
412         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
413                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
414                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
415
416                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
417                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
418
419                 udelay(40);
420         }
421
422         return ret;
423 }
424
425 static void
426 bnx2_disable_int(struct bnx2 *bp)
427 {
428         int i;
429         struct bnx2_napi *bnapi;
430
431         for (i = 0; i < bp->irq_nvecs; i++) {
432                 bnapi = &bp->bnx2_napi[i];
433                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
434                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
435         }
436         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
437 }
438
439 static void
440 bnx2_enable_int(struct bnx2 *bp)
441 {
442         int i;
443         struct bnx2_napi *bnapi;
444
445         for (i = 0; i < bp->irq_nvecs; i++) {
446                 bnapi = &bp->bnx2_napi[i];
447
448                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
449                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
450                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
451                        bnapi->last_status_idx);
452
453                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
454                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
455                        bnapi->last_status_idx);
456         }
457         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
458 }
459
460 static void
461 bnx2_disable_int_sync(struct bnx2 *bp)
462 {
463         int i;
464
465         atomic_inc(&bp->intr_sem);
466         bnx2_disable_int(bp);
467         for (i = 0; i < bp->irq_nvecs; i++)
468                 synchronize_irq(bp->irq_tbl[i].vector);
469 }
470
471 static void
472 bnx2_napi_disable(struct bnx2 *bp)
473 {
474         int i;
475
476         for (i = 0; i < bp->irq_nvecs; i++)
477                 napi_disable(&bp->bnx2_napi[i].napi);
478 }
479
480 static void
481 bnx2_napi_enable(struct bnx2 *bp)
482 {
483         int i;
484
485         for (i = 0; i < bp->irq_nvecs; i++)
486                 napi_enable(&bp->bnx2_napi[i].napi);
487 }
488
489 static void
490 bnx2_netif_stop(struct bnx2 *bp)
491 {
492         bnx2_disable_int_sync(bp);
493         if (netif_running(bp->dev)) {
494                 bnx2_napi_disable(bp);
495                 netif_tx_disable(bp->dev);
496                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
497         }
498 }
499
500 static void
501 bnx2_netif_start(struct bnx2 *bp)
502 {
503         if (atomic_dec_and_test(&bp->intr_sem)) {
504                 if (netif_running(bp->dev)) {
505                         netif_tx_wake_all_queues(bp->dev);
506                         bnx2_napi_enable(bp);
507                         bnx2_enable_int(bp);
508                 }
509         }
510 }
511
512 static void
513 bnx2_free_tx_mem(struct bnx2 *bp)
514 {
515         int i;
516
517         for (i = 0; i < bp->num_tx_rings; i++) {
518                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
519                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
520
521                 if (txr->tx_desc_ring) {
522                         pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
523                                             txr->tx_desc_ring,
524                                             txr->tx_desc_mapping);
525                         txr->tx_desc_ring = NULL;
526                 }
527                 kfree(txr->tx_buf_ring);
528                 txr->tx_buf_ring = NULL;
529         }
530 }
531
532 static void
533 bnx2_free_rx_mem(struct bnx2 *bp)
534 {
535         int i;
536
537         for (i = 0; i < bp->num_rx_rings; i++) {
538                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
539                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
540                 int j;
541
542                 for (j = 0; j < bp->rx_max_ring; j++) {
543                         if (rxr->rx_desc_ring[j])
544                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
545                                                     rxr->rx_desc_ring[j],
546                                                     rxr->rx_desc_mapping[j]);
547                         rxr->rx_desc_ring[j] = NULL;
548                 }
549                 if (rxr->rx_buf_ring)
550                         vfree(rxr->rx_buf_ring);
551                 rxr->rx_buf_ring = NULL;
552
553                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
554                         if (rxr->rx_pg_desc_ring[j])
555                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
556                                                     rxr->rx_pg_desc_ring[j],
557                                                     rxr->rx_pg_desc_mapping[j]);
558                         rxr->rx_pg_desc_ring[j] = NULL;
559                 }
560                 if (rxr->rx_pg_ring)
561                         vfree(rxr->rx_pg_ring);
562                 rxr->rx_pg_ring = NULL;
563         }
564 }
565
566 static int
567 bnx2_alloc_tx_mem(struct bnx2 *bp)
568 {
569         int i;
570
571         for (i = 0; i < bp->num_tx_rings; i++) {
572                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
573                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
574
575                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
576                 if (txr->tx_buf_ring == NULL)
577                         return -ENOMEM;
578
579                 txr->tx_desc_ring =
580                         pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
581                                              &txr->tx_desc_mapping);
582                 if (txr->tx_desc_ring == NULL)
583                         return -ENOMEM;
584         }
585         return 0;
586 }
587
588 static int
589 bnx2_alloc_rx_mem(struct bnx2 *bp)
590 {
591         int i;
592
593         for (i = 0; i < bp->num_rx_rings; i++) {
594                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
595                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
596                 int j;
597
598                 rxr->rx_buf_ring =
599                         vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
600                 if (rxr->rx_buf_ring == NULL)
601                         return -ENOMEM;
602
603                 memset(rxr->rx_buf_ring, 0,
604                        SW_RXBD_RING_SIZE * bp->rx_max_ring);
605
606                 for (j = 0; j < bp->rx_max_ring; j++) {
607                         rxr->rx_desc_ring[j] =
608                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
609                                                      &rxr->rx_desc_mapping[j]);
610                         if (rxr->rx_desc_ring[j] == NULL)
611                                 return -ENOMEM;
612
613                 }
614
615                 if (bp->rx_pg_ring_size) {
616                         rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
617                                                   bp->rx_max_pg_ring);
618                         if (rxr->rx_pg_ring == NULL)
619                                 return -ENOMEM;
620
621                         memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
622                                bp->rx_max_pg_ring);
623                 }
624
625                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
626                         rxr->rx_pg_desc_ring[j] =
627                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
628                                                 &rxr->rx_pg_desc_mapping[j]);
629                         if (rxr->rx_pg_desc_ring[j] == NULL)
630                                 return -ENOMEM;
631
632                 }
633         }
634         return 0;
635 }
636
637 static void
638 bnx2_free_mem(struct bnx2 *bp)
639 {
640         int i;
641         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
642
643         bnx2_free_tx_mem(bp);
644         bnx2_free_rx_mem(bp);
645
646         for (i = 0; i < bp->ctx_pages; i++) {
647                 if (bp->ctx_blk[i]) {
648                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
649                                             bp->ctx_blk[i],
650                                             bp->ctx_blk_mapping[i]);
651                         bp->ctx_blk[i] = NULL;
652                 }
653         }
654         if (bnapi->status_blk.msi) {
655                 pci_free_consistent(bp->pdev, bp->status_stats_size,
656                                     bnapi->status_blk.msi,
657                                     bp->status_blk_mapping);
658                 bnapi->status_blk.msi = NULL;
659                 bp->stats_blk = NULL;
660         }
661 }
662
663 static int
664 bnx2_alloc_mem(struct bnx2 *bp)
665 {
666         int i, status_blk_size, err;
667         struct bnx2_napi *bnapi;
668         void *status_blk;
669
670         /* Combine status and statistics blocks into one allocation. */
671         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
672         if (bp->flags & BNX2_FLAG_MSIX_CAP)
673                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
674                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
675         bp->status_stats_size = status_blk_size +
676                                 sizeof(struct statistics_block);
677
678         status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
679                                           &bp->status_blk_mapping);
680         if (status_blk == NULL)
681                 goto alloc_mem_err;
682
683         memset(status_blk, 0, bp->status_stats_size);
684
685         bnapi = &bp->bnx2_napi[0];
686         bnapi->status_blk.msi = status_blk;
687         bnapi->hw_tx_cons_ptr =
688                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
689         bnapi->hw_rx_cons_ptr =
690                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
691         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
692                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
693                         struct status_block_msix *sblk;
694
695                         bnapi = &bp->bnx2_napi[i];
696
697                         sblk = (void *) (status_blk +
698                                          BNX2_SBLK_MSIX_ALIGN_SIZE * i);
699                         bnapi->status_blk.msix = sblk;
700                         bnapi->hw_tx_cons_ptr =
701                                 &sblk->status_tx_quick_consumer_index;
702                         bnapi->hw_rx_cons_ptr =
703                                 &sblk->status_rx_quick_consumer_index;
704                         bnapi->int_num = i << 24;
705                 }
706         }
707
708         bp->stats_blk = status_blk + status_blk_size;
709
710         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
711
712         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
713                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
714                 if (bp->ctx_pages == 0)
715                         bp->ctx_pages = 1;
716                 for (i = 0; i < bp->ctx_pages; i++) {
717                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
718                                                 BCM_PAGE_SIZE,
719                                                 &bp->ctx_blk_mapping[i]);
720                         if (bp->ctx_blk[i] == NULL)
721                                 goto alloc_mem_err;
722                 }
723         }
724
725         err = bnx2_alloc_rx_mem(bp);
726         if (err)
727                 goto alloc_mem_err;
728
729         err = bnx2_alloc_tx_mem(bp);
730         if (err)
731                 goto alloc_mem_err;
732
733         return 0;
734
735 alloc_mem_err:
736         bnx2_free_mem(bp);
737         return -ENOMEM;
738 }
739
740 static void
741 bnx2_report_fw_link(struct bnx2 *bp)
742 {
743         u32 fw_link_status = 0;
744
745         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
746                 return;
747
748         if (bp->link_up) {
749                 u32 bmsr;
750
751                 switch (bp->line_speed) {
752                 case SPEED_10:
753                         if (bp->duplex == DUPLEX_HALF)
754                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
755                         else
756                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
757                         break;
758                 case SPEED_100:
759                         if (bp->duplex == DUPLEX_HALF)
760                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
761                         else
762                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
763                         break;
764                 case SPEED_1000:
765                         if (bp->duplex == DUPLEX_HALF)
766                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
767                         else
768                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
769                         break;
770                 case SPEED_2500:
771                         if (bp->duplex == DUPLEX_HALF)
772                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
773                         else
774                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
775                         break;
776                 }
777
778                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
779
780                 if (bp->autoneg) {
781                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
782
783                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
784                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
785
786                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
787                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
788                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
789                         else
790                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
791                 }
792         }
793         else
794                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
795
796         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
797 }
798
799 static char *
800 bnx2_xceiver_str(struct bnx2 *bp)
801 {
802         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
803                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
804                  "Copper"));
805 }
806
807 static void
808 bnx2_report_link(struct bnx2 *bp)
809 {
810         if (bp->link_up) {
811                 netif_carrier_on(bp->dev);
812                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
813                        bnx2_xceiver_str(bp));
814
815                 printk("%d Mbps ", bp->line_speed);
816
817                 if (bp->duplex == DUPLEX_FULL)
818                         printk("full duplex");
819                 else
820                         printk("half duplex");
821
822                 if (bp->flow_ctrl) {
823                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
824                                 printk(", receive ");
825                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
826                                         printk("& transmit ");
827                         }
828                         else {
829                                 printk(", transmit ");
830                         }
831                         printk("flow control ON");
832                 }
833                 printk("\n");
834         }
835         else {
836                 netif_carrier_off(bp->dev);
837                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
838                        bnx2_xceiver_str(bp));
839         }
840
841         bnx2_report_fw_link(bp);
842 }
843
844 static void
845 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
846 {
847         u32 local_adv, remote_adv;
848
849         bp->flow_ctrl = 0;
850         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
851                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
852
853                 if (bp->duplex == DUPLEX_FULL) {
854                         bp->flow_ctrl = bp->req_flow_ctrl;
855                 }
856                 return;
857         }
858
859         if (bp->duplex != DUPLEX_FULL) {
860                 return;
861         }
862
863         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
864             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
865                 u32 val;
866
867                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
868                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
869                         bp->flow_ctrl |= FLOW_CTRL_TX;
870                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
871                         bp->flow_ctrl |= FLOW_CTRL_RX;
872                 return;
873         }
874
875         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
876         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
877
878         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
879                 u32 new_local_adv = 0;
880                 u32 new_remote_adv = 0;
881
882                 if (local_adv & ADVERTISE_1000XPAUSE)
883                         new_local_adv |= ADVERTISE_PAUSE_CAP;
884                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
885                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
886                 if (remote_adv & ADVERTISE_1000XPAUSE)
887                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
888                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
889                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
890
891                 local_adv = new_local_adv;
892                 remote_adv = new_remote_adv;
893         }
894
895         /* See Table 28B-3 of 802.3ab-1999 spec. */
896         if (local_adv & ADVERTISE_PAUSE_CAP) {
897                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
898                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
899                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
900                         }
901                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
902                                 bp->flow_ctrl = FLOW_CTRL_RX;
903                         }
904                 }
905                 else {
906                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
907                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
908                         }
909                 }
910         }
911         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
912                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
913                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
914
915                         bp->flow_ctrl = FLOW_CTRL_TX;
916                 }
917         }
918 }
919
920 static int
921 bnx2_5709s_linkup(struct bnx2 *bp)
922 {
923         u32 val, speed;
924
925         bp->link_up = 1;
926
927         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
928         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
929         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
930
931         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
932                 bp->line_speed = bp->req_line_speed;
933                 bp->duplex = bp->req_duplex;
934                 return 0;
935         }
936         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
937         switch (speed) {
938                 case MII_BNX2_GP_TOP_AN_SPEED_10:
939                         bp->line_speed = SPEED_10;
940                         break;
941                 case MII_BNX2_GP_TOP_AN_SPEED_100:
942                         bp->line_speed = SPEED_100;
943                         break;
944                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
945                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
946                         bp->line_speed = SPEED_1000;
947                         break;
948                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
949                         bp->line_speed = SPEED_2500;
950                         break;
951         }
952         if (val & MII_BNX2_GP_TOP_AN_FD)
953                 bp->duplex = DUPLEX_FULL;
954         else
955                 bp->duplex = DUPLEX_HALF;
956         return 0;
957 }
958
959 static int
960 bnx2_5708s_linkup(struct bnx2 *bp)
961 {
962         u32 val;
963
964         bp->link_up = 1;
965         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
966         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
967                 case BCM5708S_1000X_STAT1_SPEED_10:
968                         bp->line_speed = SPEED_10;
969                         break;
970                 case BCM5708S_1000X_STAT1_SPEED_100:
971                         bp->line_speed = SPEED_100;
972                         break;
973                 case BCM5708S_1000X_STAT1_SPEED_1G:
974                         bp->line_speed = SPEED_1000;
975                         break;
976                 case BCM5708S_1000X_STAT1_SPEED_2G5:
977                         bp->line_speed = SPEED_2500;
978                         break;
979         }
980         if (val & BCM5708S_1000X_STAT1_FD)
981                 bp->duplex = DUPLEX_FULL;
982         else
983                 bp->duplex = DUPLEX_HALF;
984
985         return 0;
986 }
987
988 static int
989 bnx2_5706s_linkup(struct bnx2 *bp)
990 {
991         u32 bmcr, local_adv, remote_adv, common;
992
993         bp->link_up = 1;
994         bp->line_speed = SPEED_1000;
995
996         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
997         if (bmcr & BMCR_FULLDPLX) {
998                 bp->duplex = DUPLEX_FULL;
999         }
1000         else {
1001                 bp->duplex = DUPLEX_HALF;
1002         }
1003
1004         if (!(bmcr & BMCR_ANENABLE)) {
1005                 return 0;
1006         }
1007
1008         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1009         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1010
1011         common = local_adv & remote_adv;
1012         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1013
1014                 if (common & ADVERTISE_1000XFULL) {
1015                         bp->duplex = DUPLEX_FULL;
1016                 }
1017                 else {
1018                         bp->duplex = DUPLEX_HALF;
1019                 }
1020         }
1021
1022         return 0;
1023 }
1024
1025 static int
1026 bnx2_copper_linkup(struct bnx2 *bp)
1027 {
1028         u32 bmcr;
1029
1030         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1031         if (bmcr & BMCR_ANENABLE) {
1032                 u32 local_adv, remote_adv, common;
1033
1034                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1035                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1036
1037                 common = local_adv & (remote_adv >> 2);
1038                 if (common & ADVERTISE_1000FULL) {
1039                         bp->line_speed = SPEED_1000;
1040                         bp->duplex = DUPLEX_FULL;
1041                 }
1042                 else if (common & ADVERTISE_1000HALF) {
1043                         bp->line_speed = SPEED_1000;
1044                         bp->duplex = DUPLEX_HALF;
1045                 }
1046                 else {
1047                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1048                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1049
1050                         common = local_adv & remote_adv;
1051                         if (common & ADVERTISE_100FULL) {
1052                                 bp->line_speed = SPEED_100;
1053                                 bp->duplex = DUPLEX_FULL;
1054                         }
1055                         else if (common & ADVERTISE_100HALF) {
1056                                 bp->line_speed = SPEED_100;
1057                                 bp->duplex = DUPLEX_HALF;
1058                         }
1059                         else if (common & ADVERTISE_10FULL) {
1060                                 bp->line_speed = SPEED_10;
1061                                 bp->duplex = DUPLEX_FULL;
1062                         }
1063                         else if (common & ADVERTISE_10HALF) {
1064                                 bp->line_speed = SPEED_10;
1065                                 bp->duplex = DUPLEX_HALF;
1066                         }
1067                         else {
1068                                 bp->line_speed = 0;
1069                                 bp->link_up = 0;
1070                         }
1071                 }
1072         }
1073         else {
1074                 if (bmcr & BMCR_SPEED100) {
1075                         bp->line_speed = SPEED_100;
1076                 }
1077                 else {
1078                         bp->line_speed = SPEED_10;
1079                 }
1080                 if (bmcr & BMCR_FULLDPLX) {
1081                         bp->duplex = DUPLEX_FULL;
1082                 }
1083                 else {
1084                         bp->duplex = DUPLEX_HALF;
1085                 }
1086         }
1087
1088         return 0;
1089 }
1090
1091 static void
1092 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1093 {
1094         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1095
1096         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1097         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1098         val |= 0x02 << 8;
1099
1100         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1101                 u32 lo_water, hi_water;
1102
1103                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1104                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1105                 else
1106                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1107                 if (lo_water >= bp->rx_ring_size)
1108                         lo_water = 0;
1109
1110                 hi_water = bp->rx_ring_size / 4;
1111
1112                 if (hi_water <= lo_water)
1113                         lo_water = 0;
1114
1115                 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1116                 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1117
1118                 if (hi_water > 0xf)
1119                         hi_water = 0xf;
1120                 else if (hi_water == 0)
1121                         lo_water = 0;
1122                 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1123         }
1124         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1125 }
1126
1127 static void
1128 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1129 {
1130         int i;
1131         u32 cid;
1132
1133         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1134                 if (i == 1)
1135                         cid = RX_RSS_CID;
1136                 bnx2_init_rx_context(bp, cid);
1137         }
1138 }
1139
1140 static void
1141 bnx2_set_mac_link(struct bnx2 *bp)
1142 {
1143         u32 val;
1144
1145         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1146         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1147                 (bp->duplex == DUPLEX_HALF)) {
1148                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1149         }
1150
1151         /* Configure the EMAC mode register. */
1152         val = REG_RD(bp, BNX2_EMAC_MODE);
1153
1154         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1155                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1156                 BNX2_EMAC_MODE_25G_MODE);
1157
1158         if (bp->link_up) {
1159                 switch (bp->line_speed) {
1160                         case SPEED_10:
1161                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1162                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1163                                         break;
1164                                 }
1165                                 /* fall through */
1166                         case SPEED_100:
1167                                 val |= BNX2_EMAC_MODE_PORT_MII;
1168                                 break;
1169                         case SPEED_2500:
1170                                 val |= BNX2_EMAC_MODE_25G_MODE;
1171                                 /* fall through */
1172                         case SPEED_1000:
1173                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1174                                 break;
1175                 }
1176         }
1177         else {
1178                 val |= BNX2_EMAC_MODE_PORT_GMII;
1179         }
1180
1181         /* Set the MAC to operate in the appropriate duplex mode. */
1182         if (bp->duplex == DUPLEX_HALF)
1183                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1184         REG_WR(bp, BNX2_EMAC_MODE, val);
1185
1186         /* Enable/disable rx PAUSE. */
1187         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1188
1189         if (bp->flow_ctrl & FLOW_CTRL_RX)
1190                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1191         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1192
1193         /* Enable/disable tx PAUSE. */
1194         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1195         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1196
1197         if (bp->flow_ctrl & FLOW_CTRL_TX)
1198                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1199         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1200
1201         /* Acknowledge the interrupt. */
1202         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1203
1204         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1205                 bnx2_init_all_rx_contexts(bp);
1206 }
1207
1208 static void
1209 bnx2_enable_bmsr1(struct bnx2 *bp)
1210 {
1211         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1212             (CHIP_NUM(bp) == CHIP_NUM_5709))
1213                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1214                                MII_BNX2_BLK_ADDR_GP_STATUS);
1215 }
1216
1217 static void
1218 bnx2_disable_bmsr1(struct bnx2 *bp)
1219 {
1220         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1221             (CHIP_NUM(bp) == CHIP_NUM_5709))
1222                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1223                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1224 }
1225
1226 static int
1227 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1228 {
1229         u32 up1;
1230         int ret = 1;
1231
1232         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1233                 return 0;
1234
1235         if (bp->autoneg & AUTONEG_SPEED)
1236                 bp->advertising |= ADVERTISED_2500baseX_Full;
1237
1238         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1239                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1240
1241         bnx2_read_phy(bp, bp->mii_up1, &up1);
1242         if (!(up1 & BCM5708S_UP1_2G5)) {
1243                 up1 |= BCM5708S_UP1_2G5;
1244                 bnx2_write_phy(bp, bp->mii_up1, up1);
1245                 ret = 0;
1246         }
1247
1248         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1249                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1250                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1251
1252         return ret;
1253 }
1254
1255 static int
1256 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1257 {
1258         u32 up1;
1259         int ret = 0;
1260
1261         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1262                 return 0;
1263
1264         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1265                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1266
1267         bnx2_read_phy(bp, bp->mii_up1, &up1);
1268         if (up1 & BCM5708S_UP1_2G5) {
1269                 up1 &= ~BCM5708S_UP1_2G5;
1270                 bnx2_write_phy(bp, bp->mii_up1, up1);
1271                 ret = 1;
1272         }
1273
1274         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1275                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1276                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1277
1278         return ret;
1279 }
1280
1281 static void
1282 bnx2_enable_forced_2g5(struct bnx2 *bp)
1283 {
1284         u32 bmcr;
1285
1286         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1287                 return;
1288
1289         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1290                 u32 val;
1291
1292                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1293                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1294                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1295                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1296                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1297                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1298
1299                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1300                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1301                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1302
1303         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1304                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1305                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1306         }
1307
1308         if (bp->autoneg & AUTONEG_SPEED) {
1309                 bmcr &= ~BMCR_ANENABLE;
1310                 if (bp->req_duplex == DUPLEX_FULL)
1311                         bmcr |= BMCR_FULLDPLX;
1312         }
1313         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1314 }
1315
1316 static void
1317 bnx2_disable_forced_2g5(struct bnx2 *bp)
1318 {
1319         u32 bmcr;
1320
1321         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1322                 return;
1323
1324         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1325                 u32 val;
1326
1327                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1328                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1329                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1330                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1331                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1332
1333                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1334                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1335                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1336
1337         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1338                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1339                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1340         }
1341
1342         if (bp->autoneg & AUTONEG_SPEED)
1343                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1344         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1345 }
1346
1347 static void
1348 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1349 {
1350         u32 val;
1351
1352         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1353         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1354         if (start)
1355                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1356         else
1357                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1358 }
1359
1360 static int
1361 bnx2_set_link(struct bnx2 *bp)
1362 {
1363         u32 bmsr;
1364         u8 link_up;
1365
1366         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1367                 bp->link_up = 1;
1368                 return 0;
1369         }
1370
1371         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1372                 return 0;
1373
1374         link_up = bp->link_up;
1375
1376         bnx2_enable_bmsr1(bp);
1377         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1378         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1379         bnx2_disable_bmsr1(bp);
1380
1381         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1382             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1383                 u32 val, an_dbg;
1384
1385                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1386                         bnx2_5706s_force_link_dn(bp, 0);
1387                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1388                 }
1389                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1390
1391                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1392                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1393                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1394
1395                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1396                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1397                         bmsr |= BMSR_LSTATUS;
1398                 else
1399                         bmsr &= ~BMSR_LSTATUS;
1400         }
1401
1402         if (bmsr & BMSR_LSTATUS) {
1403                 bp->link_up = 1;
1404
1405                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1406                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1407                                 bnx2_5706s_linkup(bp);
1408                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1409                                 bnx2_5708s_linkup(bp);
1410                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1411                                 bnx2_5709s_linkup(bp);
1412                 }
1413                 else {
1414                         bnx2_copper_linkup(bp);
1415                 }
1416                 bnx2_resolve_flow_ctrl(bp);
1417         }
1418         else {
1419                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1420                     (bp->autoneg & AUTONEG_SPEED))
1421                         bnx2_disable_forced_2g5(bp);
1422
1423                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1424                         u32 bmcr;
1425
1426                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1427                         bmcr |= BMCR_ANENABLE;
1428                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1429
1430                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1431                 }
1432                 bp->link_up = 0;
1433         }
1434
1435         if (bp->link_up != link_up) {
1436                 bnx2_report_link(bp);
1437         }
1438
1439         bnx2_set_mac_link(bp);
1440
1441         return 0;
1442 }
1443
1444 static int
1445 bnx2_reset_phy(struct bnx2 *bp)
1446 {
1447         int i;
1448         u32 reg;
1449
1450         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1451
1452 #define PHY_RESET_MAX_WAIT 100
1453         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1454                 udelay(10);
1455
1456                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1457                 if (!(reg & BMCR_RESET)) {
1458                         udelay(20);
1459                         break;
1460                 }
1461         }
1462         if (i == PHY_RESET_MAX_WAIT) {
1463                 return -EBUSY;
1464         }
1465         return 0;
1466 }
1467
1468 static u32
1469 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1470 {
1471         u32 adv = 0;
1472
1473         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1474                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1475
1476                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1477                         adv = ADVERTISE_1000XPAUSE;
1478                 }
1479                 else {
1480                         adv = ADVERTISE_PAUSE_CAP;
1481                 }
1482         }
1483         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1484                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1485                         adv = ADVERTISE_1000XPSE_ASYM;
1486                 }
1487                 else {
1488                         adv = ADVERTISE_PAUSE_ASYM;
1489                 }
1490         }
1491         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1492                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1493                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1494                 }
1495                 else {
1496                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1497                 }
1498         }
1499         return adv;
1500 }
1501
1502 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1503
1504 static int
1505 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1506 __releases(&bp->phy_lock)
1507 __acquires(&bp->phy_lock)
1508 {
1509         u32 speed_arg = 0, pause_adv;
1510
1511         pause_adv = bnx2_phy_get_pause_adv(bp);
1512
1513         if (bp->autoneg & AUTONEG_SPEED) {
1514                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1515                 if (bp->advertising & ADVERTISED_10baseT_Half)
1516                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1517                 if (bp->advertising & ADVERTISED_10baseT_Full)
1518                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1519                 if (bp->advertising & ADVERTISED_100baseT_Half)
1520                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1521                 if (bp->advertising & ADVERTISED_100baseT_Full)
1522                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1523                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1524                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1525                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1526                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1527         } else {
1528                 if (bp->req_line_speed == SPEED_2500)
1529                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1530                 else if (bp->req_line_speed == SPEED_1000)
1531                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1532                 else if (bp->req_line_speed == SPEED_100) {
1533                         if (bp->req_duplex == DUPLEX_FULL)
1534                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1535                         else
1536                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1537                 } else if (bp->req_line_speed == SPEED_10) {
1538                         if (bp->req_duplex == DUPLEX_FULL)
1539                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1540                         else
1541                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1542                 }
1543         }
1544
1545         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1546                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1547         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1548                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1549
1550         if (port == PORT_TP)
1551                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1552                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1553
1554         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1555
1556         spin_unlock_bh(&bp->phy_lock);
1557         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1558         spin_lock_bh(&bp->phy_lock);
1559
1560         return 0;
1561 }
1562
1563 static int
1564 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1565 __releases(&bp->phy_lock)
1566 __acquires(&bp->phy_lock)
1567 {
1568         u32 adv, bmcr;
1569         u32 new_adv = 0;
1570
1571         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1572                 return (bnx2_setup_remote_phy(bp, port));
1573
1574         if (!(bp->autoneg & AUTONEG_SPEED)) {
1575                 u32 new_bmcr;
1576                 int force_link_down = 0;
1577
1578                 if (bp->req_line_speed == SPEED_2500) {
1579                         if (!bnx2_test_and_enable_2g5(bp))
1580                                 force_link_down = 1;
1581                 } else if (bp->req_line_speed == SPEED_1000) {
1582                         if (bnx2_test_and_disable_2g5(bp))
1583                                 force_link_down = 1;
1584                 }
1585                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1586                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1587
1588                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1589                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1590                 new_bmcr |= BMCR_SPEED1000;
1591
1592                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1593                         if (bp->req_line_speed == SPEED_2500)
1594                                 bnx2_enable_forced_2g5(bp);
1595                         else if (bp->req_line_speed == SPEED_1000) {
1596                                 bnx2_disable_forced_2g5(bp);
1597                                 new_bmcr &= ~0x2000;
1598                         }
1599
1600                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1601                         if (bp->req_line_speed == SPEED_2500)
1602                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1603                         else
1604                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1605                 }
1606
1607                 if (bp->req_duplex == DUPLEX_FULL) {
1608                         adv |= ADVERTISE_1000XFULL;
1609                         new_bmcr |= BMCR_FULLDPLX;
1610                 }
1611                 else {
1612                         adv |= ADVERTISE_1000XHALF;
1613                         new_bmcr &= ~BMCR_FULLDPLX;
1614                 }
1615                 if ((new_bmcr != bmcr) || (force_link_down)) {
1616                         /* Force a link down visible on the other side */
1617                         if (bp->link_up) {
1618                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1619                                                ~(ADVERTISE_1000XFULL |
1620                                                  ADVERTISE_1000XHALF));
1621                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1622                                         BMCR_ANRESTART | BMCR_ANENABLE);
1623
1624                                 bp->link_up = 0;
1625                                 netif_carrier_off(bp->dev);
1626                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1627                                 bnx2_report_link(bp);
1628                         }
1629                         bnx2_write_phy(bp, bp->mii_adv, adv);
1630                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1631                 } else {
1632                         bnx2_resolve_flow_ctrl(bp);
1633                         bnx2_set_mac_link(bp);
1634                 }
1635                 return 0;
1636         }
1637
1638         bnx2_test_and_enable_2g5(bp);
1639
1640         if (bp->advertising & ADVERTISED_1000baseT_Full)
1641                 new_adv |= ADVERTISE_1000XFULL;
1642
1643         new_adv |= bnx2_phy_get_pause_adv(bp);
1644
1645         bnx2_read_phy(bp, bp->mii_adv, &adv);
1646         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1647
1648         bp->serdes_an_pending = 0;
1649         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1650                 /* Force a link down visible on the other side */
1651                 if (bp->link_up) {
1652                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1653                         spin_unlock_bh(&bp->phy_lock);
1654                         msleep(20);
1655                         spin_lock_bh(&bp->phy_lock);
1656                 }
1657
1658                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1659                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1660                         BMCR_ANENABLE);
1661                 /* Speed up link-up time when the link partner
1662                  * does not autonegotiate which is very common
1663                  * in blade servers. Some blade servers use
1664                  * IPMI for kerboard input and it's important
1665                  * to minimize link disruptions. Autoneg. involves
1666                  * exchanging base pages plus 3 next pages and
1667                  * normally completes in about 120 msec.
1668                  */
1669                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1670                 bp->serdes_an_pending = 1;
1671                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1672         } else {
1673                 bnx2_resolve_flow_ctrl(bp);
1674                 bnx2_set_mac_link(bp);
1675         }
1676
1677         return 0;
1678 }
1679
1680 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1681         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1682                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1683                 (ADVERTISED_1000baseT_Full)
1684
1685 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1686         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1687         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1688         ADVERTISED_1000baseT_Full)
1689
1690 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1691         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1692
1693 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1694
1695 static void
1696 bnx2_set_default_remote_link(struct bnx2 *bp)
1697 {
1698         u32 link;
1699
1700         if (bp->phy_port == PORT_TP)
1701                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1702         else
1703                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1704
1705         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1706                 bp->req_line_speed = 0;
1707                 bp->autoneg |= AUTONEG_SPEED;
1708                 bp->advertising = ADVERTISED_Autoneg;
1709                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1710                         bp->advertising |= ADVERTISED_10baseT_Half;
1711                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1712                         bp->advertising |= ADVERTISED_10baseT_Full;
1713                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1714                         bp->advertising |= ADVERTISED_100baseT_Half;
1715                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1716                         bp->advertising |= ADVERTISED_100baseT_Full;
1717                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1718                         bp->advertising |= ADVERTISED_1000baseT_Full;
1719                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1720                         bp->advertising |= ADVERTISED_2500baseX_Full;
1721         } else {
1722                 bp->autoneg = 0;
1723                 bp->advertising = 0;
1724                 bp->req_duplex = DUPLEX_FULL;
1725                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1726                         bp->req_line_speed = SPEED_10;
1727                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1728                                 bp->req_duplex = DUPLEX_HALF;
1729                 }
1730                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1731                         bp->req_line_speed = SPEED_100;
1732                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1733                                 bp->req_duplex = DUPLEX_HALF;
1734                 }
1735                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1736                         bp->req_line_speed = SPEED_1000;
1737                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1738                         bp->req_line_speed = SPEED_2500;
1739         }
1740 }
1741
1742 static void
1743 bnx2_set_default_link(struct bnx2 *bp)
1744 {
1745         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1746                 bnx2_set_default_remote_link(bp);
1747                 return;
1748         }
1749
1750         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1751         bp->req_line_speed = 0;
1752         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1753                 u32 reg;
1754
1755                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1756
1757                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1758                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1759                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1760                         bp->autoneg = 0;
1761                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1762                         bp->req_duplex = DUPLEX_FULL;
1763                 }
1764         } else
1765                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1766 }
1767
1768 static void
1769 bnx2_send_heart_beat(struct bnx2 *bp)
1770 {
1771         u32 msg;
1772         u32 addr;
1773
1774         spin_lock(&bp->indirect_lock);
1775         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1776         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1777         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1778         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1779         spin_unlock(&bp->indirect_lock);
1780 }
1781
1782 static void
1783 bnx2_remote_phy_event(struct bnx2 *bp)
1784 {
1785         u32 msg;
1786         u8 link_up = bp->link_up;
1787         u8 old_port;
1788
1789         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1790
1791         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1792                 bnx2_send_heart_beat(bp);
1793
1794         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1795
1796         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1797                 bp->link_up = 0;
1798         else {
1799                 u32 speed;
1800
1801                 bp->link_up = 1;
1802                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1803                 bp->duplex = DUPLEX_FULL;
1804                 switch (speed) {
1805                         case BNX2_LINK_STATUS_10HALF:
1806                                 bp->duplex = DUPLEX_HALF;
1807                         case BNX2_LINK_STATUS_10FULL:
1808                                 bp->line_speed = SPEED_10;
1809                                 break;
1810                         case BNX2_LINK_STATUS_100HALF:
1811                                 bp->duplex = DUPLEX_HALF;
1812                         case BNX2_LINK_STATUS_100BASE_T4:
1813                         case BNX2_LINK_STATUS_100FULL:
1814                                 bp->line_speed = SPEED_100;
1815                                 break;
1816                         case BNX2_LINK_STATUS_1000HALF:
1817                                 bp->duplex = DUPLEX_HALF;
1818                         case BNX2_LINK_STATUS_1000FULL:
1819                                 bp->line_speed = SPEED_1000;
1820                                 break;
1821                         case BNX2_LINK_STATUS_2500HALF:
1822                                 bp->duplex = DUPLEX_HALF;
1823                         case BNX2_LINK_STATUS_2500FULL:
1824                                 bp->line_speed = SPEED_2500;
1825                                 break;
1826                         default:
1827                                 bp->line_speed = 0;
1828                                 break;
1829                 }
1830
1831                 bp->flow_ctrl = 0;
1832                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1833                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1834                         if (bp->duplex == DUPLEX_FULL)
1835                                 bp->flow_ctrl = bp->req_flow_ctrl;
1836                 } else {
1837                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1838                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1839                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1840                                 bp->flow_ctrl |= FLOW_CTRL_RX;
1841                 }
1842
1843                 old_port = bp->phy_port;
1844                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1845                         bp->phy_port = PORT_FIBRE;
1846                 else
1847                         bp->phy_port = PORT_TP;
1848
1849                 if (old_port != bp->phy_port)
1850                         bnx2_set_default_link(bp);
1851
1852         }
1853         if (bp->link_up != link_up)
1854                 bnx2_report_link(bp);
1855
1856         bnx2_set_mac_link(bp);
1857 }
1858
1859 static int
1860 bnx2_set_remote_link(struct bnx2 *bp)
1861 {
1862         u32 evt_code;
1863
1864         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
1865         switch (evt_code) {
1866                 case BNX2_FW_EVT_CODE_LINK_EVENT:
1867                         bnx2_remote_phy_event(bp);
1868                         break;
1869                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1870                 default:
1871                         bnx2_send_heart_beat(bp);
1872                         break;
1873         }
1874         return 0;
1875 }
1876
1877 static int
1878 bnx2_setup_copper_phy(struct bnx2 *bp)
1879 __releases(&bp->phy_lock)
1880 __acquires(&bp->phy_lock)
1881 {
1882         u32 bmcr;
1883         u32 new_bmcr;
1884
1885         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1886
1887         if (bp->autoneg & AUTONEG_SPEED) {
1888                 u32 adv_reg, adv1000_reg;
1889                 u32 new_adv_reg = 0;
1890                 u32 new_adv1000_reg = 0;
1891
1892                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1893                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1894                         ADVERTISE_PAUSE_ASYM);
1895
1896                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1897                 adv1000_reg &= PHY_ALL_1000_SPEED;
1898
1899                 if (bp->advertising & ADVERTISED_10baseT_Half)
1900                         new_adv_reg |= ADVERTISE_10HALF;
1901                 if (bp->advertising & ADVERTISED_10baseT_Full)
1902                         new_adv_reg |= ADVERTISE_10FULL;
1903                 if (bp->advertising & ADVERTISED_100baseT_Half)
1904                         new_adv_reg |= ADVERTISE_100HALF;
1905                 if (bp->advertising & ADVERTISED_100baseT_Full)
1906                         new_adv_reg |= ADVERTISE_100FULL;
1907                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1908                         new_adv1000_reg |= ADVERTISE_1000FULL;
1909
1910                 new_adv_reg |= ADVERTISE_CSMA;
1911
1912                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1913
1914                 if ((adv1000_reg != new_adv1000_reg) ||
1915                         (adv_reg != new_adv_reg) ||
1916                         ((bmcr & BMCR_ANENABLE) == 0)) {
1917
1918                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1919                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1920                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1921                                 BMCR_ANENABLE);
1922                 }
1923                 else if (bp->link_up) {
1924                         /* Flow ctrl may have changed from auto to forced */
1925                         /* or vice-versa. */
1926
1927                         bnx2_resolve_flow_ctrl(bp);
1928                         bnx2_set_mac_link(bp);
1929                 }
1930                 return 0;
1931         }
1932
1933         new_bmcr = 0;
1934         if (bp->req_line_speed == SPEED_100) {
1935                 new_bmcr |= BMCR_SPEED100;
1936         }
1937         if (bp->req_duplex == DUPLEX_FULL) {
1938                 new_bmcr |= BMCR_FULLDPLX;
1939         }
1940         if (new_bmcr != bmcr) {
1941                 u32 bmsr;
1942
1943                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1944                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1945
1946                 if (bmsr & BMSR_LSTATUS) {
1947                         /* Force link down */
1948                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1949                         spin_unlock_bh(&bp->phy_lock);
1950                         msleep(50);
1951                         spin_lock_bh(&bp->phy_lock);
1952
1953                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1954                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1955                 }
1956
1957                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1958
1959                 /* Normally, the new speed is setup after the link has
1960                  * gone down and up again. In some cases, link will not go
1961                  * down so we need to set up the new speed here.
1962                  */
1963                 if (bmsr & BMSR_LSTATUS) {
1964                         bp->line_speed = bp->req_line_speed;
1965                         bp->duplex = bp->req_duplex;
1966                         bnx2_resolve_flow_ctrl(bp);
1967                         bnx2_set_mac_link(bp);
1968                 }
1969         } else {
1970                 bnx2_resolve_flow_ctrl(bp);
1971                 bnx2_set_mac_link(bp);
1972         }
1973         return 0;
1974 }
1975
1976 static int
1977 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1978 __releases(&bp->phy_lock)
1979 __acquires(&bp->phy_lock)
1980 {
1981         if (bp->loopback == MAC_LOOPBACK)
1982                 return 0;
1983
1984         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1985                 return (bnx2_setup_serdes_phy(bp, port));
1986         }
1987         else {
1988                 return (bnx2_setup_copper_phy(bp));
1989         }
1990 }
1991
1992 static int
1993 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
1994 {
1995         u32 val;
1996
1997         bp->mii_bmcr = MII_BMCR + 0x10;
1998         bp->mii_bmsr = MII_BMSR + 0x10;
1999         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2000         bp->mii_adv = MII_ADVERTISE + 0x10;
2001         bp->mii_lpa = MII_LPA + 0x10;
2002         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2003
2004         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2005         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2006
2007         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2008         if (reset_phy)
2009                 bnx2_reset_phy(bp);
2010
2011         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2012
2013         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2014         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2015         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2016         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2017
2018         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2019         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2020         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2021                 val |= BCM5708S_UP1_2G5;
2022         else
2023                 val &= ~BCM5708S_UP1_2G5;
2024         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2025
2026         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2027         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2028         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2029         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2030
2031         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2032
2033         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2034               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2035         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2036
2037         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2038
2039         return 0;
2040 }
2041
2042 static int
2043 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2044 {
2045         u32 val;
2046
2047         if (reset_phy)
2048                 bnx2_reset_phy(bp);
2049
2050         bp->mii_up1 = BCM5708S_UP1;
2051
2052         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2053         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2054         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2055
2056         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2057         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2058         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2059
2060         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2061         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2062         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2063
2064         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2065                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2066                 val |= BCM5708S_UP1_2G5;
2067                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2068         }
2069
2070         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2071             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2072             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2073                 /* increase tx signal amplitude */
2074                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2075                                BCM5708S_BLK_ADDR_TX_MISC);
2076                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2077                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2078                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2079                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2080         }
2081
2082         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2083               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2084
2085         if (val) {
2086                 u32 is_backplane;
2087
2088                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2089                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2090                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2091                                        BCM5708S_BLK_ADDR_TX_MISC);
2092                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2093                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2094                                        BCM5708S_BLK_ADDR_DIG);
2095                 }
2096         }
2097         return 0;
2098 }
2099
2100 static int
2101 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2102 {
2103         if (reset_phy)
2104                 bnx2_reset_phy(bp);
2105
2106         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2107
2108         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2109                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2110
2111         if (bp->dev->mtu > 1500) {
2112                 u32 val;
2113
2114                 /* Set extended packet length bit */
2115                 bnx2_write_phy(bp, 0x18, 0x7);
2116                 bnx2_read_phy(bp, 0x18, &val);
2117                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2118
2119                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2120                 bnx2_read_phy(bp, 0x1c, &val);
2121                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2122         }
2123         else {
2124                 u32 val;
2125
2126                 bnx2_write_phy(bp, 0x18, 0x7);
2127                 bnx2_read_phy(bp, 0x18, &val);
2128                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2129
2130                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2131                 bnx2_read_phy(bp, 0x1c, &val);
2132                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2133         }
2134
2135         return 0;
2136 }
2137
2138 static int
2139 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2140 {
2141         u32 val;
2142
2143         if (reset_phy)
2144                 bnx2_reset_phy(bp);
2145
2146         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2147                 bnx2_write_phy(bp, 0x18, 0x0c00);
2148                 bnx2_write_phy(bp, 0x17, 0x000a);
2149                 bnx2_write_phy(bp, 0x15, 0x310b);
2150                 bnx2_write_phy(bp, 0x17, 0x201f);
2151                 bnx2_write_phy(bp, 0x15, 0x9506);
2152                 bnx2_write_phy(bp, 0x17, 0x401f);
2153                 bnx2_write_phy(bp, 0x15, 0x14e2);
2154                 bnx2_write_phy(bp, 0x18, 0x0400);
2155         }
2156
2157         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2158                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2159                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2160                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2161                 val &= ~(1 << 8);
2162                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2163         }
2164
2165         if (bp->dev->mtu > 1500) {
2166                 /* Set extended packet length bit */
2167                 bnx2_write_phy(bp, 0x18, 0x7);
2168                 bnx2_read_phy(bp, 0x18, &val);
2169                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2170
2171                 bnx2_read_phy(bp, 0x10, &val);
2172                 bnx2_write_phy(bp, 0x10, val | 0x1);
2173         }
2174         else {
2175                 bnx2_write_phy(bp, 0x18, 0x7);
2176                 bnx2_read_phy(bp, 0x18, &val);
2177                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2178
2179                 bnx2_read_phy(bp, 0x10, &val);
2180                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2181         }
2182
2183         /* ethernet@wirespeed */
2184         bnx2_write_phy(bp, 0x18, 0x7007);
2185         bnx2_read_phy(bp, 0x18, &val);
2186         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2187         return 0;
2188 }
2189
2190
2191 static int
2192 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2193 __releases(&bp->phy_lock)
2194 __acquires(&bp->phy_lock)
2195 {
2196         u32 val;
2197         int rc = 0;
2198
2199         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2200         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2201
2202         bp->mii_bmcr = MII_BMCR;
2203         bp->mii_bmsr = MII_BMSR;
2204         bp->mii_bmsr1 = MII_BMSR;
2205         bp->mii_adv = MII_ADVERTISE;
2206         bp->mii_lpa = MII_LPA;
2207
2208         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2209
2210         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2211                 goto setup_phy;
2212
2213         bnx2_read_phy(bp, MII_PHYSID1, &val);
2214         bp->phy_id = val << 16;
2215         bnx2_read_phy(bp, MII_PHYSID2, &val);
2216         bp->phy_id |= val & 0xffff;
2217
2218         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2219                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2220                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2221                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2222                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2223                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2224                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2225         }
2226         else {
2227                 rc = bnx2_init_copper_phy(bp, reset_phy);
2228         }
2229
2230 setup_phy:
2231         if (!rc)
2232                 rc = bnx2_setup_phy(bp, bp->phy_port);
2233
2234         return rc;
2235 }
2236
2237 static int
2238 bnx2_set_mac_loopback(struct bnx2 *bp)
2239 {
2240         u32 mac_mode;
2241
2242         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2243         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2244         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2245         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2246         bp->link_up = 1;
2247         return 0;
2248 }
2249
2250 static int bnx2_test_link(struct bnx2 *);
2251
2252 static int
2253 bnx2_set_phy_loopback(struct bnx2 *bp)
2254 {
2255         u32 mac_mode;
2256         int rc, i;
2257
2258         spin_lock_bh(&bp->phy_lock);
2259         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2260                             BMCR_SPEED1000);
2261         spin_unlock_bh(&bp->phy_lock);
2262         if (rc)
2263                 return rc;
2264
2265         for (i = 0; i < 10; i++) {
2266                 if (bnx2_test_link(bp) == 0)
2267                         break;
2268                 msleep(100);
2269         }
2270
2271         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2272         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2273                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2274                       BNX2_EMAC_MODE_25G_MODE);
2275
2276         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2277         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2278         bp->link_up = 1;
2279         return 0;
2280 }
2281
2282 static int
2283 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2284 {
2285         int i;
2286         u32 val;
2287
2288         bp->fw_wr_seq++;
2289         msg_data |= bp->fw_wr_seq;
2290
2291         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2292
2293         if (!ack)
2294                 return 0;
2295
2296         /* wait for an acknowledgement. */
2297         for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2298                 msleep(10);
2299
2300                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2301
2302                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2303                         break;
2304         }
2305         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2306                 return 0;
2307
2308         /* If we timed out, inform the firmware that this is the case. */
2309         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2310                 if (!silent)
2311                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2312                                             "%x\n", msg_data);
2313
2314                 msg_data &= ~BNX2_DRV_MSG_CODE;
2315                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2316
2317                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2318
2319                 return -EBUSY;
2320         }
2321
2322         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2323                 return -EIO;
2324
2325         return 0;
2326 }
2327
2328 static int
2329 bnx2_init_5709_context(struct bnx2 *bp)
2330 {
2331         int i, ret = 0;
2332         u32 val;
2333
2334         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2335         val |= (BCM_PAGE_BITS - 8) << 16;
2336         REG_WR(bp, BNX2_CTX_COMMAND, val);
2337         for (i = 0; i < 10; i++) {
2338                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2339                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2340                         break;
2341                 udelay(2);
2342         }
2343         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2344                 return -EBUSY;
2345
2346         for (i = 0; i < bp->ctx_pages; i++) {
2347                 int j;
2348
2349                 if (bp->ctx_blk[i])
2350                         memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2351                 else
2352                         return -ENOMEM;
2353
2354                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2355                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2356                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2357                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2358                        (u64) bp->ctx_blk_mapping[i] >> 32);
2359                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2360                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2361                 for (j = 0; j < 10; j++) {
2362
2363                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2364                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2365                                 break;
2366                         udelay(5);
2367                 }
2368                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2369                         ret = -EBUSY;
2370                         break;
2371                 }
2372         }
2373         return ret;
2374 }
2375
2376 static void
2377 bnx2_init_context(struct bnx2 *bp)
2378 {
2379         u32 vcid;
2380
2381         vcid = 96;
2382         while (vcid) {
2383                 u32 vcid_addr, pcid_addr, offset;
2384                 int i;
2385
2386                 vcid--;
2387
2388                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2389                         u32 new_vcid;
2390
2391                         vcid_addr = GET_PCID_ADDR(vcid);
2392                         if (vcid & 0x8) {
2393                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2394                         }
2395                         else {
2396                                 new_vcid = vcid;
2397                         }
2398                         pcid_addr = GET_PCID_ADDR(new_vcid);
2399                 }
2400                 else {
2401                         vcid_addr = GET_CID_ADDR(vcid);
2402                         pcid_addr = vcid_addr;
2403                 }
2404
2405                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2406                         vcid_addr += (i << PHY_CTX_SHIFT);
2407                         pcid_addr += (i << PHY_CTX_SHIFT);
2408
2409                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2410                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2411
2412                         /* Zero out the context. */
2413                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2414                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2415                 }
2416         }
2417 }
2418
2419 static int
2420 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2421 {
2422         u16 *good_mbuf;
2423         u32 good_mbuf_cnt;
2424         u32 val;
2425
2426         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2427         if (good_mbuf == NULL) {
2428                 printk(KERN_ERR PFX "Failed to allocate memory in "
2429                                     "bnx2_alloc_bad_rbuf\n");
2430                 return -ENOMEM;
2431         }
2432
2433         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2434                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2435
2436         good_mbuf_cnt = 0;
2437
2438         /* Allocate a bunch of mbufs and save the good ones in an array. */
2439         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2440         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2441                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2442                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2443
2444                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2445
2446                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2447
2448                 /* The addresses with Bit 9 set are bad memory blocks. */
2449                 if (!(val & (1 << 9))) {
2450                         good_mbuf[good_mbuf_cnt] = (u16) val;
2451                         good_mbuf_cnt++;
2452                 }
2453
2454                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2455         }
2456
2457         /* Free the good ones back to the mbuf pool thus discarding
2458          * all the bad ones. */
2459         while (good_mbuf_cnt) {
2460                 good_mbuf_cnt--;
2461
2462                 val = good_mbuf[good_mbuf_cnt];
2463                 val = (val << 9) | val | 1;
2464
2465                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2466         }
2467         kfree(good_mbuf);
2468         return 0;
2469 }
2470
2471 static void
2472 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2473 {
2474         u32 val;
2475
2476         val = (mac_addr[0] << 8) | mac_addr[1];
2477
2478         REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2479
2480         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2481                 (mac_addr[4] << 8) | mac_addr[5];
2482
2483         REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2484 }
2485
2486 static inline int
2487 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2488 {
2489         dma_addr_t mapping;
2490         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2491         struct rx_bd *rxbd =
2492                 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2493         struct page *page = alloc_page(GFP_ATOMIC);
2494
2495         if (!page)
2496                 return -ENOMEM;
2497         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2498                                PCI_DMA_FROMDEVICE);
2499         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2500                 __free_page(page);
2501                 return -EIO;
2502         }
2503
2504         rx_pg->page = page;
2505         pci_unmap_addr_set(rx_pg, mapping, mapping);
2506         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2507         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2508         return 0;
2509 }
2510
2511 static void
2512 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2513 {
2514         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2515         struct page *page = rx_pg->page;
2516
2517         if (!page)
2518                 return;
2519
2520         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2521                        PCI_DMA_FROMDEVICE);
2522
2523         __free_page(page);
2524         rx_pg->page = NULL;
2525 }
2526
2527 static inline int
2528 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2529 {
2530         struct sk_buff *skb;
2531         struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2532         dma_addr_t mapping;
2533         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2534         unsigned long align;
2535
2536         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2537         if (skb == NULL) {
2538                 return -ENOMEM;
2539         }
2540
2541         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2542                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2543
2544         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2545                 PCI_DMA_FROMDEVICE);
2546         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2547                 dev_kfree_skb(skb);
2548                 return -EIO;
2549         }
2550
2551         rx_buf->skb = skb;
2552         pci_unmap_addr_set(rx_buf, mapping, mapping);
2553
2554         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2555         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2556
2557         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2558
2559         return 0;
2560 }
2561
2562 static int
2563 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2564 {
2565         struct status_block *sblk = bnapi->status_blk.msi;
2566         u32 new_link_state, old_link_state;
2567         int is_set = 1;
2568
2569         new_link_state = sblk->status_attn_bits & event;
2570         old_link_state = sblk->status_attn_bits_ack & event;
2571         if (new_link_state != old_link_state) {
2572                 if (new_link_state)
2573                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2574                 else
2575                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2576         } else
2577                 is_set = 0;
2578
2579         return is_set;
2580 }
2581
2582 static void
2583 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2584 {
2585         spin_lock(&bp->phy_lock);
2586
2587         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2588                 bnx2_set_link(bp);
2589         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2590                 bnx2_set_remote_link(bp);
2591
2592         spin_unlock(&bp->phy_lock);
2593
2594 }
2595
2596 static inline u16
2597 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2598 {
2599         u16 cons;
2600
2601         /* Tell compiler that status block fields can change. */
2602         barrier();
2603         cons = *bnapi->hw_tx_cons_ptr;
2604         barrier();
2605         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2606                 cons++;
2607         return cons;
2608 }
2609
2610 static int
2611 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2612 {
2613         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2614         u16 hw_cons, sw_cons, sw_ring_cons;
2615         int tx_pkt = 0, index;
2616         struct netdev_queue *txq;
2617
2618         index = (bnapi - bp->bnx2_napi);
2619         txq = netdev_get_tx_queue(bp->dev, index);
2620
2621         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2622         sw_cons = txr->tx_cons;
2623
2624         while (sw_cons != hw_cons) {
2625                 struct sw_tx_bd *tx_buf;
2626                 struct sk_buff *skb;
2627                 int i, last;
2628
2629                 sw_ring_cons = TX_RING_IDX(sw_cons);
2630
2631                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2632                 skb = tx_buf->skb;
2633
2634                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2635                 prefetch(&skb->end);
2636
2637                 /* partial BD completions possible with TSO packets */
2638                 if (tx_buf->is_gso) {
2639                         u16 last_idx, last_ring_idx;
2640
2641                         last_idx = sw_cons + tx_buf->nr_frags + 1;
2642                         last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2643                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2644                                 last_idx++;
2645                         }
2646                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2647                                 break;
2648                         }
2649                 }
2650
2651                 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
2652
2653                 tx_buf->skb = NULL;
2654                 last = tx_buf->nr_frags;
2655
2656                 for (i = 0; i < last; i++) {
2657                         sw_cons = NEXT_TX_BD(sw_cons);
2658                 }
2659
2660                 sw_cons = NEXT_TX_BD(sw_cons);
2661
2662                 dev_kfree_skb(skb);
2663                 tx_pkt++;
2664                 if (tx_pkt == budget)
2665                         break;
2666
2667                 if (hw_cons == sw_cons)
2668                         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2669         }
2670
2671         txr->hw_tx_cons = hw_cons;
2672         txr->tx_cons = sw_cons;
2673
2674         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2675          * before checking for netif_tx_queue_stopped().  Without the
2676          * memory barrier, there is a small possibility that bnx2_start_xmit()
2677          * will miss it and cause the queue to be stopped forever.
2678          */
2679         smp_mb();
2680
2681         if (unlikely(netif_tx_queue_stopped(txq)) &&
2682                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2683                 __netif_tx_lock(txq, smp_processor_id());
2684                 if ((netif_tx_queue_stopped(txq)) &&
2685                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2686                         netif_tx_wake_queue(txq);
2687                 __netif_tx_unlock(txq);
2688         }
2689
2690         return tx_pkt;
2691 }
2692
2693 static void
2694 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2695                         struct sk_buff *skb, int count)
2696 {
2697         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2698         struct rx_bd *cons_bd, *prod_bd;
2699         int i;
2700         u16 hw_prod, prod;
2701         u16 cons = rxr->rx_pg_cons;
2702
2703         cons_rx_pg = &rxr->rx_pg_ring[cons];
2704
2705         /* The caller was unable to allocate a new page to replace the
2706          * last one in the frags array, so we need to recycle that page
2707          * and then free the skb.
2708          */
2709         if (skb) {
2710                 struct page *page;
2711                 struct skb_shared_info *shinfo;
2712
2713                 shinfo = skb_shinfo(skb);
2714                 shinfo->nr_frags--;
2715                 page = shinfo->frags[shinfo->nr_frags].page;
2716                 shinfo->frags[shinfo->nr_frags].page = NULL;
2717
2718                 cons_rx_pg->page = page;
2719                 dev_kfree_skb(skb);
2720         }
2721
2722         hw_prod = rxr->rx_pg_prod;
2723
2724         for (i = 0; i < count; i++) {
2725                 prod = RX_PG_RING_IDX(hw_prod);
2726
2727                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2728                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2729                 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2730                 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2731
2732                 if (prod != cons) {
2733                         prod_rx_pg->page = cons_rx_pg->page;
2734                         cons_rx_pg->page = NULL;
2735                         pci_unmap_addr_set(prod_rx_pg, mapping,
2736                                 pci_unmap_addr(cons_rx_pg, mapping));
2737
2738                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2739                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2740
2741                 }
2742                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2743                 hw_prod = NEXT_RX_BD(hw_prod);
2744         }
2745         rxr->rx_pg_prod = hw_prod;
2746         rxr->rx_pg_cons = cons;
2747 }
2748
2749 static inline void
2750 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2751                   struct sk_buff *skb, u16 cons, u16 prod)
2752 {
2753         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2754         struct rx_bd *cons_bd, *prod_bd;
2755
2756         cons_rx_buf = &rxr->rx_buf_ring[cons];
2757         prod_rx_buf = &rxr->rx_buf_ring[prod];
2758
2759         pci_dma_sync_single_for_device(bp->pdev,
2760                 pci_unmap_addr(cons_rx_buf, mapping),
2761                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2762
2763         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2764
2765         prod_rx_buf->skb = skb;
2766
2767         if (cons == prod)
2768                 return;
2769
2770         pci_unmap_addr_set(prod_rx_buf, mapping,
2771                         pci_unmap_addr(cons_rx_buf, mapping));
2772
2773         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2774         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2775         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2776         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2777 }
2778
2779 static int
2780 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2781             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2782             u32 ring_idx)
2783 {
2784         int err;
2785         u16 prod = ring_idx & 0xffff;
2786
2787         err = bnx2_alloc_rx_skb(bp, rxr, prod);
2788         if (unlikely(err)) {
2789                 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2790                 if (hdr_len) {
2791                         unsigned int raw_len = len + 4;
2792                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2793
2794                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2795                 }
2796                 return err;
2797         }
2798
2799         skb_reserve(skb, BNX2_RX_OFFSET);
2800         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2801                          PCI_DMA_FROMDEVICE);
2802
2803         if (hdr_len == 0) {
2804                 skb_put(skb, len);
2805                 return 0;
2806         } else {
2807                 unsigned int i, frag_len, frag_size, pages;
2808                 struct sw_pg *rx_pg;
2809                 u16 pg_cons = rxr->rx_pg_cons;
2810                 u16 pg_prod = rxr->rx_pg_prod;
2811
2812                 frag_size = len + 4 - hdr_len;
2813                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2814                 skb_put(skb, hdr_len);
2815
2816                 for (i = 0; i < pages; i++) {
2817                         dma_addr_t mapping_old;
2818
2819                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2820                         if (unlikely(frag_len <= 4)) {
2821                                 unsigned int tail = 4 - frag_len;
2822
2823                                 rxr->rx_pg_cons = pg_cons;
2824                                 rxr->rx_pg_prod = pg_prod;
2825                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
2826                                                         pages - i);
2827                                 skb->len -= tail;
2828                                 if (i == 0) {
2829                                         skb->tail -= tail;
2830                                 } else {
2831                                         skb_frag_t *frag =
2832                                                 &skb_shinfo(skb)->frags[i - 1];
2833                                         frag->size -= tail;
2834                                         skb->data_len -= tail;
2835                                         skb->truesize -= tail;
2836                                 }
2837                                 return 0;
2838                         }
2839                         rx_pg = &rxr->rx_pg_ring[pg_cons];
2840
2841                         /* Don't unmap yet.  If we're unable to allocate a new
2842                          * page, we need to recycle the page and the DMA addr.
2843                          */
2844                         mapping_old = pci_unmap_addr(rx_pg, mapping);
2845                         if (i == pages - 1)
2846                                 frag_len -= 4;
2847
2848                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2849                         rx_pg->page = NULL;
2850
2851                         err = bnx2_alloc_rx_page(bp, rxr,
2852                                                  RX_PG_RING_IDX(pg_prod));
2853                         if (unlikely(err)) {
2854                                 rxr->rx_pg_cons = pg_cons;
2855                                 rxr->rx_pg_prod = pg_prod;
2856                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
2857                                                         pages - i);
2858                                 return err;
2859                         }
2860
2861                         pci_unmap_page(bp->pdev, mapping_old,
2862                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2863
2864                         frag_size -= frag_len;
2865                         skb->data_len += frag_len;
2866                         skb->truesize += frag_len;
2867                         skb->len += frag_len;
2868
2869                         pg_prod = NEXT_RX_BD(pg_prod);
2870                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2871                 }
2872                 rxr->rx_pg_prod = pg_prod;
2873                 rxr->rx_pg_cons = pg_cons;
2874         }
2875         return 0;
2876 }
2877
2878 static inline u16
2879 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2880 {
2881         u16 cons;
2882
2883         /* Tell compiler that status block fields can change. */
2884         barrier();
2885         cons = *bnapi->hw_rx_cons_ptr;
2886         barrier();
2887         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2888                 cons++;
2889         return cons;
2890 }
2891
2892 static int
2893 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2894 {
2895         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
2896         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2897         struct l2_fhdr *rx_hdr;
2898         int rx_pkt = 0, pg_ring_used = 0;
2899
2900         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2901         sw_cons = rxr->rx_cons;
2902         sw_prod = rxr->rx_prod;
2903
2904         /* Memory barrier necessary as speculative reads of the rx
2905          * buffer can be ahead of the index in the status block
2906          */
2907         rmb();
2908         while (sw_cons != hw_cons) {
2909                 unsigned int len, hdr_len;
2910                 u32 status;
2911                 struct sw_bd *rx_buf;
2912                 struct sk_buff *skb;
2913                 dma_addr_t dma_addr;
2914                 u16 vtag = 0;
2915                 int hw_vlan __maybe_unused = 0;
2916
2917                 sw_ring_cons = RX_RING_IDX(sw_cons);
2918                 sw_ring_prod = RX_RING_IDX(sw_prod);
2919
2920                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
2921                 skb = rx_buf->skb;
2922
2923                 rx_buf->skb = NULL;
2924
2925                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2926
2927                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2928                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
2929                         PCI_DMA_FROMDEVICE);
2930
2931                 rx_hdr = (struct l2_fhdr *) skb->data;
2932                 len = rx_hdr->l2_fhdr_pkt_len;
2933                 status = rx_hdr->l2_fhdr_status;
2934
2935                 hdr_len = 0;
2936                 if (status & L2_FHDR_STATUS_SPLIT) {
2937                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2938                         pg_ring_used = 1;
2939                 } else if (len > bp->rx_jumbo_thresh) {
2940                         hdr_len = bp->rx_jumbo_thresh;
2941                         pg_ring_used = 1;
2942                 }
2943
2944                 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
2945                                        L2_FHDR_ERRORS_PHY_DECODE |
2946                                        L2_FHDR_ERRORS_ALIGNMENT |
2947                                        L2_FHDR_ERRORS_TOO_SHORT |
2948                                        L2_FHDR_ERRORS_GIANT_FRAME))) {
2949
2950                         bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2951                                           sw_ring_prod);
2952                         if (pg_ring_used) {
2953                                 int pages;
2954
2955                                 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
2956
2957                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2958                         }
2959                         goto next_rx;
2960                 }
2961
2962                 len -= 4;
2963
2964                 if (len <= bp->rx_copy_thresh) {
2965                         struct sk_buff *new_skb;
2966
2967                         new_skb = netdev_alloc_skb(bp->dev, len + 6);
2968                         if (new_skb == NULL) {
2969                                 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2970                                                   sw_ring_prod);
2971                                 goto next_rx;
2972                         }
2973
2974                         /* aligned copy */
2975                         skb_copy_from_linear_data_offset(skb,
2976                                                          BNX2_RX_OFFSET - 6,
2977                                       new_skb->data, len + 6);
2978                         skb_reserve(new_skb, 6);
2979                         skb_put(new_skb, len);
2980
2981                         bnx2_reuse_rx_skb(bp, rxr, skb,
2982                                 sw_ring_cons, sw_ring_prod);
2983
2984                         skb = new_skb;
2985                 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
2986                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2987                         goto next_rx;
2988
2989                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
2990                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
2991                         vtag = rx_hdr->l2_fhdr_vlan_tag;
2992 #ifdef BCM_VLAN
2993                         if (bp->vlgrp)
2994                                 hw_vlan = 1;
2995                         else
2996 #endif
2997                         {
2998                                 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
2999                                         __skb_push(skb, 4);
3000
3001                                 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3002                                 ve->h_vlan_proto = htons(ETH_P_8021Q);
3003                                 ve->h_vlan_TCI = htons(vtag);
3004                                 len += 4;
3005                         }
3006                 }
3007
3008                 skb->protocol = eth_type_trans(skb, bp->dev);
3009
3010                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3011                         (ntohs(skb->protocol) != 0x8100)) {
3012
3013                         dev_kfree_skb(skb);
3014                         goto next_rx;
3015
3016                 }
3017
3018                 skb->ip_summed = CHECKSUM_NONE;
3019                 if (bp->rx_csum &&
3020                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3021                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
3022
3023                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3024                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3025                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3026                 }
3027
3028                 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3029
3030 #ifdef BCM_VLAN
3031                 if (hw_vlan)
3032                         vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
3033                 else
3034 #endif
3035                         netif_receive_skb(skb);
3036
3037                 rx_pkt++;
3038
3039 next_rx:
3040                 sw_cons = NEXT_RX_BD(sw_cons);
3041                 sw_prod = NEXT_RX_BD(sw_prod);
3042
3043                 if ((rx_pkt == budget))
3044                         break;
3045
3046                 /* Refresh hw_cons to see if there is new work */
3047                 if (sw_cons == hw_cons) {
3048                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3049                         rmb();
3050                 }
3051         }
3052         rxr->rx_cons = sw_cons;
3053         rxr->rx_prod = sw_prod;
3054
3055         if (pg_ring_used)
3056                 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3057
3058         REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3059
3060         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3061
3062         mmiowb();
3063
3064         return rx_pkt;
3065
3066 }
3067
3068 /* MSI ISR - The only difference between this and the INTx ISR
3069  * is that the MSI interrupt is always serviced.
3070  */
3071 static irqreturn_t
3072 bnx2_msi(int irq, void *dev_instance)
3073 {
3074         struct bnx2_napi *bnapi = dev_instance;
3075         struct bnx2 *bp = bnapi->bp;
3076
3077         prefetch(bnapi->status_blk.msi);
3078         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3079                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3080                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3081
3082         /* Return here if interrupt is disabled. */
3083         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3084                 return IRQ_HANDLED;
3085
3086         napi_schedule(&bnapi->napi);
3087
3088         return IRQ_HANDLED;
3089 }
3090
3091 static irqreturn_t
3092 bnx2_msi_1shot(int irq, void *dev_instance)
3093 {
3094         struct bnx2_napi *bnapi = dev_instance;
3095         struct bnx2 *bp = bnapi->bp;
3096
3097         prefetch(bnapi->status_blk.msi);
3098
3099         /* Return here if interrupt is disabled. */
3100         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3101                 return IRQ_HANDLED;
3102
3103         napi_schedule(&bnapi->napi);
3104
3105         return IRQ_HANDLED;
3106 }
3107
3108 static irqreturn_t
3109 bnx2_interrupt(int irq, void *dev_instance)
3110 {
3111         struct bnx2_napi *bnapi = dev_instance;
3112         struct bnx2 *bp = bnapi->bp;
3113         struct status_block *sblk = bnapi->status_blk.msi;
3114
3115         /* When using INTx, it is possible for the interrupt to arrive
3116          * at the CPU before the status block posted prior to the
3117          * interrupt. Reading a register will flush the status block.
3118          * When using MSI, the MSI message will always complete after
3119          * the status block write.
3120          */
3121         if ((sblk->status_idx == bnapi->last_status_idx) &&
3122             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3123              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3124                 return IRQ_NONE;
3125
3126         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3127                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3128                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3129
3130         /* Read back to deassert IRQ immediately to avoid too many
3131          * spurious interrupts.
3132          */
3133         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3134
3135         /* Return here if interrupt is shared and is disabled. */
3136         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3137                 return IRQ_HANDLED;
3138
3139         if (napi_schedule_prep(&bnapi->napi)) {
3140                 bnapi->last_status_idx = sblk->status_idx;
3141                 __napi_schedule(&bnapi->napi);
3142         }
3143
3144         return IRQ_HANDLED;
3145 }
3146
3147 static inline int
3148 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3149 {
3150         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3151         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3152
3153         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3154             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3155                 return 1;
3156         return 0;
3157 }
3158
3159 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3160                                  STATUS_ATTN_BITS_TIMER_ABORT)
3161
3162 static inline int
3163 bnx2_has_work(struct bnx2_napi *bnapi)
3164 {
3165         struct status_block *sblk = bnapi->status_blk.msi;
3166
3167         if (bnx2_has_fast_work(bnapi))
3168                 return 1;
3169
3170         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3171             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3172                 return 1;
3173
3174         return 0;
3175 }
3176
3177 static void
3178 bnx2_chk_missed_msi(struct bnx2 *bp)
3179 {
3180         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3181         u32 msi_ctrl;
3182
3183         if (bnx2_has_work(bnapi)) {
3184                 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3185                 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3186                         return;
3187
3188                 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3189                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3190                                ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3191                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3192                         bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3193                 }
3194         }
3195
3196         bp->idle_chk_status_idx = bnapi->last_status_idx;
3197 }
3198
3199 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3200 {
3201         struct status_block *sblk = bnapi->status_blk.msi;
3202         u32 status_attn_bits = sblk->status_attn_bits;
3203         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3204
3205         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3206             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3207
3208                 bnx2_phy_int(bp, bnapi);
3209
3210                 /* This is needed to take care of transient status
3211                  * during link changes.
3212                  */
3213                 REG_WR(bp, BNX2_HC_COMMAND,
3214                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3215                 REG_RD(bp, BNX2_HC_COMMAND);
3216         }
3217 }
3218
3219 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3220                           int work_done, int budget)
3221 {
3222         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3223         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3224
3225         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3226                 bnx2_tx_int(bp, bnapi, 0);
3227
3228         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3229                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3230
3231         return work_done;
3232 }
3233
3234 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3235 {
3236         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3237         struct bnx2 *bp = bnapi->bp;
3238         int work_done = 0;
3239         struct status_block_msix *sblk = bnapi->status_blk.msix;
3240
3241         while (1) {
3242                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3243                 if (unlikely(work_done >= budget))
3244                         break;
3245
3246                 bnapi->last_status_idx = sblk->status_idx;
3247                 /* status idx must be read before checking for more work. */
3248                 rmb();
3249                 if (likely(!bnx2_has_fast_work(bnapi))) {
3250
3251                         napi_complete(napi);
3252                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3253                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3254                                bnapi->last_status_idx);
3255                         break;
3256                 }
3257         }
3258         return work_done;
3259 }
3260
3261 static int bnx2_poll(struct napi_struct *napi, int budget)
3262 {
3263         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3264         struct bnx2 *bp = bnapi->bp;
3265         int work_done = 0;
3266         struct status_block *sblk = bnapi->status_blk.msi;
3267
3268         while (1) {
3269                 bnx2_poll_link(bp, bnapi);
3270
3271                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3272
3273                 /* bnapi->last_status_idx is used below to tell the hw how
3274                  * much work has been processed, so we must read it before
3275                  * checking for more work.
3276                  */
3277                 bnapi->last_status_idx = sblk->status_idx;
3278
3279                 if (unlikely(work_done >= budget))
3280                         break;
3281
3282                 rmb();
3283                 if (likely(!bnx2_has_work(bnapi))) {
3284                         napi_complete(napi);
3285                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3286                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3287                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3288                                        bnapi->last_status_idx);
3289                                 break;
3290                         }
3291                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3292                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3293                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3294                                bnapi->last_status_idx);
3295
3296                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3297                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3298                                bnapi->last_status_idx);
3299                         break;
3300                 }
3301         }
3302
3303         return work_done;
3304 }
3305
3306 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3307  * from set_multicast.
3308  */
3309 static void
3310 bnx2_set_rx_mode(struct net_device *dev)
3311 {
3312         struct bnx2 *bp = netdev_priv(dev);
3313         u32 rx_mode, sort_mode;
3314         struct netdev_hw_addr *ha;
3315         int i;
3316
3317         if (!netif_running(dev))
3318                 return;
3319
3320         spin_lock_bh(&bp->phy_lock);
3321
3322         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3323                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3324         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3325 #ifdef BCM_VLAN
3326         if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3327                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3328 #else
3329         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3330                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3331 #endif
3332         if (dev->flags & IFF_PROMISC) {
3333                 /* Promiscuous mode. */
3334                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3335                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3336                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3337         }
3338         else if (dev->flags & IFF_ALLMULTI) {
3339                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3340                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3341                                0xffffffff);
3342                 }
3343                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3344         }
3345         else {
3346                 /* Accept one or more multicast(s). */
3347                 struct dev_mc_list *mclist;
3348                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3349                 u32 regidx;
3350                 u32 bit;
3351                 u32 crc;
3352
3353                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3354
3355                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3356                      i++, mclist = mclist->next) {
3357
3358                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3359                         bit = crc & 0xff;
3360                         regidx = (bit & 0xe0) >> 5;
3361                         bit &= 0x1f;
3362                         mc_filter[regidx] |= (1 << bit);
3363                 }
3364
3365                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3366                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3367                                mc_filter[i]);
3368                 }
3369
3370                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3371         }
3372
3373         if (dev->uc_count > BNX2_MAX_UNICAST_ADDRESSES) {
3374                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3375                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3376                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3377         } else if (!(dev->flags & IFF_PROMISC)) {
3378                 /* Add all entries into to the match filter list */
3379                 i = 0;
3380                 list_for_each_entry(ha, &dev->uc_list, list) {
3381                         bnx2_set_mac_addr(bp, ha->addr,
3382                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3383                         sort_mode |= (1 <<
3384                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3385                         i++;
3386                 }
3387
3388         }
3389
3390         if (rx_mode != bp->rx_mode) {
3391                 bp->rx_mode = rx_mode;
3392                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3393         }
3394
3395         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3396         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3397         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3398
3399         spin_unlock_bh(&bp->phy_lock);
3400 }
3401
3402 static int __devinit
3403 check_fw_section(const struct firmware *fw,
3404                  const struct bnx2_fw_file_section *section,
3405                  u32 alignment, bool non_empty)
3406 {
3407         u32 offset = be32_to_cpu(section->offset);
3408         u32 len = be32_to_cpu(section->len);
3409
3410         if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3411                 return -EINVAL;
3412         if ((non_empty && len == 0) || len > fw->size - offset ||
3413             len & (alignment - 1))
3414                 return -EINVAL;
3415         return 0;
3416 }
3417
3418 static int __devinit
3419 check_mips_fw_entry(const struct firmware *fw,
3420                     const struct bnx2_mips_fw_file_entry *entry)
3421 {
3422         if (check_fw_section(fw, &entry->text, 4, true) ||
3423             check_fw_section(fw, &entry->data, 4, false) ||
3424             check_fw_section(fw, &entry->rodata, 4, false))
3425                 return -EINVAL;
3426         return 0;
3427 }
3428
3429 static int __devinit
3430 bnx2_request_firmware(struct bnx2 *bp)
3431 {
3432         const char *mips_fw_file, *rv2p_fw_file;
3433         const struct bnx2_mips_fw_file *mips_fw;
3434         const struct bnx2_rv2p_fw_file *rv2p_fw;
3435         int rc;
3436
3437         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3438                 mips_fw_file = FW_MIPS_FILE_09;
3439                 rv2p_fw_file = FW_RV2P_FILE_09;
3440         } else {
3441                 mips_fw_file = FW_MIPS_FILE_06;
3442                 rv2p_fw_file = FW_RV2P_FILE_06;
3443         }
3444
3445         rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3446         if (rc) {
3447                 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3448                        mips_fw_file);
3449                 return rc;
3450         }
3451
3452         rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3453         if (rc) {
3454                 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3455                        rv2p_fw_file);
3456                 return rc;
3457         }
3458         mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3459         rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3460         if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3461             check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3462             check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3463             check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3464             check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3465             check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3466                 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3467                        mips_fw_file);
3468                 return -EINVAL;
3469         }
3470         if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3471             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3472             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3473                 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3474                        rv2p_fw_file);
3475                 return -EINVAL;
3476         }
3477
3478         return 0;
3479 }
3480
3481 static u32
3482 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3483 {
3484         switch (idx) {
3485         case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3486                 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3487                 rv2p_code |= RV2P_BD_PAGE_SIZE;
3488                 break;
3489         }
3490         return rv2p_code;
3491 }
3492
3493 static int
3494 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3495              const struct bnx2_rv2p_fw_file_entry *fw_entry)
3496 {
3497         u32 rv2p_code_len, file_offset;
3498         __be32 *rv2p_code;
3499         int i;
3500         u32 val, cmd, addr;
3501
3502         rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3503         file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3504
3505         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3506
3507         if (rv2p_proc == RV2P_PROC1) {
3508                 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3509                 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3510         } else {
3511                 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3512                 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3513         }
3514
3515         for (i = 0; i < rv2p_code_len; i += 8) {
3516                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3517                 rv2p_code++;
3518                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3519                 rv2p_code++;
3520
3521                 val = (i / 8) | cmd;
3522                 REG_WR(bp, addr, val);
3523         }
3524
3525         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3526         for (i = 0; i < 8; i++) {
3527                 u32 loc, code;
3528
3529                 loc = be32_to_cpu(fw_entry->fixup[i]);
3530                 if (loc && ((loc * 4) < rv2p_code_len)) {
3531                         code = be32_to_cpu(*(rv2p_code + loc - 1));
3532                         REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3533                         code = be32_to_cpu(*(rv2p_code + loc));
3534                         code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3535                         REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3536
3537                         val = (loc / 2) | cmd;
3538                         REG_WR(bp, addr, val);
3539                 }
3540         }
3541
3542         /* Reset the processor, un-stall is done later. */
3543         if (rv2p_proc == RV2P_PROC1) {
3544                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3545         }
3546         else {
3547                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3548         }
3549
3550         return 0;
3551 }
3552
3553 static int
3554 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3555             const struct bnx2_mips_fw_file_entry *fw_entry)
3556 {
3557         u32 addr, len, file_offset;
3558         __be32 *data;
3559         u32 offset;
3560         u32 val;
3561
3562         /* Halt the CPU. */
3563         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3564         val |= cpu_reg->mode_value_halt;
3565         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3566         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3567
3568         /* Load the Text area. */
3569         addr = be32_to_cpu(fw_entry->text.addr);
3570         len = be32_to_cpu(fw_entry->text.len);
3571         file_offset = be32_to_cpu(fw_entry->text.offset);
3572         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3573
3574         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3575         if (len) {
3576                 int j;
3577
3578                 for (j = 0; j < (len / 4); j++, offset += 4)
3579                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3580         }
3581
3582         /* Load the Data area. */
3583         addr = be32_to_cpu(fw_entry->data.addr);
3584         len = be32_to_cpu(fw_entry->data.len);
3585         file_offset = be32_to_cpu(fw_entry->data.offset);
3586         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3587
3588         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3589         if (len) {
3590                 int j;
3591
3592                 for (j = 0; j < (len / 4); j++, offset += 4)
3593                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3594         }
3595
3596         /* Load the Read-Only area. */
3597         addr = be32_to_cpu(fw_entry->rodata.addr);
3598         len = be32_to_cpu(fw_entry->rodata.len);
3599         file_offset = be32_to_cpu(fw_entry->rodata.offset);
3600         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3601
3602         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3603         if (len) {
3604                 int j;
3605
3606                 for (j = 0; j < (len / 4); j++, offset += 4)
3607                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3608         }
3609
3610         /* Clear the pre-fetch instruction. */
3611         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3612
3613         val = be32_to_cpu(fw_entry->start_addr);
3614         bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3615
3616         /* Start the CPU. */
3617         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3618         val &= ~cpu_reg->mode_value_halt;
3619         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3620         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3621
3622         return 0;
3623 }
3624
3625 static int
3626 bnx2_init_cpus(struct bnx2 *bp)
3627 {
3628         const struct bnx2_mips_fw_file *mips_fw =
3629                 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3630         const struct bnx2_rv2p_fw_file *rv2p_fw =
3631                 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3632         int rc;
3633
3634         /* Initialize the RV2P processor. */
3635         load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3636         load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3637
3638         /* Initialize the RX Processor. */
3639         rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3640         if (rc)
3641                 goto init_cpu_err;
3642
3643         /* Initialize the TX Processor. */
3644         rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3645         if (rc)
3646                 goto init_cpu_err;
3647
3648         /* Initialize the TX Patch-up Processor. */
3649         rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3650         if (rc)
3651                 goto init_cpu_err;
3652
3653         /* Initialize the Completion Processor. */
3654         rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3655         if (rc)
3656                 goto init_cpu_err;
3657
3658         /* Initialize the Command Processor. */
3659         rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3660
3661 init_cpu_err:
3662         return rc;
3663 }
3664
3665 static int
3666 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3667 {
3668         u16 pmcsr;
3669
3670         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3671
3672         switch (state) {
3673         case PCI_D0: {
3674                 u32 val;
3675
3676                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3677                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3678                         PCI_PM_CTRL_PME_STATUS);
3679
3680                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3681                         /* delay required during transition out of D3hot */
3682                         msleep(20);
3683
3684                 val = REG_RD(bp, BNX2_EMAC_MODE);
3685                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3686                 val &= ~BNX2_EMAC_MODE_MPKT;
3687                 REG_WR(bp, BNX2_EMAC_MODE, val);
3688
3689                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3690                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3691                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3692                 break;
3693         }
3694         case PCI_D3hot: {
3695                 int i;
3696                 u32 val, wol_msg;
3697
3698                 if (bp->wol) {
3699                         u32 advertising;
3700                         u8 autoneg;
3701
3702                         autoneg = bp->autoneg;
3703                         advertising = bp->advertising;
3704
3705                         if (bp->phy_port == PORT_TP) {
3706                                 bp->autoneg = AUTONEG_SPEED;
3707                                 bp->advertising = ADVERTISED_10baseT_Half |
3708                                         ADVERTISED_10baseT_Full |
3709                                         ADVERTISED_100baseT_Half |
3710                                         ADVERTISED_100baseT_Full |
3711                                         ADVERTISED_Autoneg;
3712                         }
3713
3714                         spin_lock_bh(&bp->phy_lock);
3715                         bnx2_setup_phy(bp, bp->phy_port);
3716                         spin_unlock_bh(&bp->phy_lock);
3717
3718                         bp->autoneg = autoneg;
3719                         bp->advertising = advertising;
3720
3721                         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3722
3723                         val = REG_RD(bp, BNX2_EMAC_MODE);
3724
3725                         /* Enable port mode. */
3726                         val &= ~BNX2_EMAC_MODE_PORT;
3727                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3728                                BNX2_EMAC_MODE_ACPI_RCVD |
3729                                BNX2_EMAC_MODE_MPKT;
3730                         if (bp->phy_port == PORT_TP)
3731                                 val |= BNX2_EMAC_MODE_PORT_MII;
3732                         else {
3733                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3734                                 if (bp->line_speed == SPEED_2500)
3735                                         val |= BNX2_EMAC_MODE_25G_MODE;
3736                         }
3737
3738                         REG_WR(bp, BNX2_EMAC_MODE, val);
3739
3740                         /* receive all multicast */
3741                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3742                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3743                                        0xffffffff);
3744                         }
3745                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3746                                BNX2_EMAC_RX_MODE_SORT_MODE);
3747
3748                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3749                               BNX2_RPM_SORT_USER0_MC_EN;
3750                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3751                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3752                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3753                                BNX2_RPM_SORT_USER0_ENA);
3754
3755                         /* Need to enable EMAC and RPM for WOL. */
3756                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3757                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3758                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3759                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3760
3761                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3762                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3763                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3764
3765                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3766                 }
3767                 else {
3768                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3769                 }
3770
3771                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3772                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3773                                      1, 0);
3774
3775                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3776                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3777                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3778
3779                         if (bp->wol)
3780                                 pmcsr |= 3;
3781                 }
3782                 else {
3783                         pmcsr |= 3;
3784                 }
3785                 if (bp->wol) {
3786                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3787                 }
3788                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3789                                       pmcsr);
3790
3791                 /* No more memory access after this point until
3792                  * device is brought back to D0.
3793                  */
3794                 udelay(50);
3795                 break;
3796         }
3797         default:
3798                 return -EINVAL;
3799         }
3800         return 0;
3801 }
3802
3803 static int
3804 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3805 {
3806         u32 val;
3807         int j;
3808
3809         /* Request access to the flash interface. */
3810         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3811         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3812                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3813                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3814                         break;
3815
3816                 udelay(5);
3817         }
3818
3819         if (j >= NVRAM_TIMEOUT_COUNT)
3820                 return -EBUSY;
3821
3822         return 0;
3823 }
3824
3825 static int
3826 bnx2_release_nvram_lock(struct bnx2 *bp)
3827 {
3828         int j;
3829         u32 val;
3830
3831         /* Relinquish nvram interface. */
3832         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3833
3834         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3835                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3836                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3837                         break;
3838
3839                 udelay(5);
3840         }
3841
3842         if (j >= NVRAM_TIMEOUT_COUNT)
3843                 return -EBUSY;
3844
3845         return 0;
3846 }
3847
3848
3849 static int
3850 bnx2_enable_nvram_write(struct bnx2 *bp)
3851 {
3852         u32 val;
3853
3854         val = REG_RD(bp, BNX2_MISC_CFG);
3855         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3856
3857         if (bp->flash_info->flags & BNX2_NV_WREN) {
3858                 int j;
3859
3860                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3861                 REG_WR(bp, BNX2_NVM_COMMAND,
3862                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3863
3864                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3865                         udelay(5);
3866
3867                         val = REG_RD(bp, BNX2_NVM_COMMAND);
3868                         if (val & BNX2_NVM_COMMAND_DONE)
3869                                 break;
3870                 }
3871
3872                 if (j >= NVRAM_TIMEOUT_COUNT)
3873                         return -EBUSY;
3874         }
3875         return 0;
3876 }
3877
3878 static void
3879 bnx2_disable_nvram_write(struct bnx2 *bp)
3880 {
3881         u32 val;
3882
3883         val = REG_RD(bp, BNX2_MISC_CFG);
3884         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3885 }
3886
3887
3888 static void
3889 bnx2_enable_nvram_access(struct bnx2 *bp)
3890 {
3891         u32 val;
3892
3893         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3894         /* Enable both bits, even on read. */
3895         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3896                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3897 }
3898
3899 static void
3900 bnx2_disable_nvram_access(struct bnx2 *bp)
3901 {
3902         u32 val;
3903
3904         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3905         /* Disable both bits, even after read. */
3906         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3907                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3908                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3909 }
3910
3911 static int
3912 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3913 {
3914         u32 cmd;
3915         int j;
3916
3917         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3918                 /* Buffered flash, no erase needed */
3919                 return 0;
3920
3921         /* Build an erase command */
3922         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3923               BNX2_NVM_COMMAND_DOIT;
3924
3925         /* Need to clear DONE bit separately. */
3926         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3927
3928         /* Address of the NVRAM to read from. */
3929         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3930
3931         /* Issue an erase command. */
3932         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3933
3934         /* Wait for completion. */
3935         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3936                 u32 val;
3937
3938                 udelay(5);
3939
3940                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3941                 if (val & BNX2_NVM_COMMAND_DONE)
3942                         break;
3943         }
3944
3945         if (j >= NVRAM_TIMEOUT_COUNT)
3946                 return -EBUSY;
3947
3948         return 0;
3949 }
3950
3951 static int
3952 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3953 {
3954         u32 cmd;
3955         int j;
3956
3957         /* Build the command word. */
3958         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3959
3960         /* Calculate an offset of a buffered flash, not needed for 5709. */
3961         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3962                 offset = ((offset / bp->flash_info->page_size) <<
3963                            bp->flash_info->page_bits) +
3964                           (offset % bp->flash_info->page_size);
3965         }
3966
3967         /* Need to clear DONE bit separately. */
3968         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3969
3970         /* Address of the NVRAM to read from. */
3971         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3972
3973         /* Issue a read command. */
3974         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3975
3976         /* Wait for completion. */
3977         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3978                 u32 val;
3979
3980                 udelay(5);
3981
3982                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3983                 if (val & BNX2_NVM_COMMAND_DONE) {
3984                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
3985                         memcpy(ret_val, &v, 4);
3986                         break;
3987                 }
3988         }
3989         if (j >= NVRAM_TIMEOUT_COUNT)
3990                 return -EBUSY;
3991
3992         return 0;
3993 }
3994
3995
3996 static int
3997 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3998 {
3999         u32 cmd;
4000         __be32 val32;
4001         int j;
4002
4003         /* Build the command word. */
4004         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4005
4006         /* Calculate an offset of a buffered flash, not needed for 5709. */
4007         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4008                 offset = ((offset / bp->flash_info->page_size) <<
4009                           bp->flash_info->page_bits) +
4010                          (offset % bp->flash_info->page_size);
4011         }
4012
4013         /* Need to clear DONE bit separately. */
4014         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4015
4016         memcpy(&val32, val, 4);
4017
4018         /* Write the data. */
4019         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4020
4021         /* Address of the NVRAM to write to. */
4022         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4023
4024         /* Issue the write command. */
4025         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4026
4027         /* Wait for completion. */
4028         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4029                 udelay(5);
4030
4031                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4032                         break;
4033         }
4034         if (j >= NVRAM_TIMEOUT_COUNT)
4035                 return -EBUSY;
4036
4037         return 0;
4038 }
4039
4040 static int
4041 bnx2_init_nvram(struct bnx2 *bp)
4042 {
4043         u32 val;
4044         int j, entry_count, rc = 0;
4045         struct flash_spec *flash;
4046
4047         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4048                 bp->flash_info = &flash_5709;
4049                 goto get_flash_size;
4050         }
4051
4052         /* Determine the selected interface. */
4053         val = REG_RD(bp, BNX2_NVM_CFG1);
4054
4055         entry_count = ARRAY_SIZE(flash_table);
4056
4057         if (val & 0x40000000) {
4058
4059                 /* Flash interface has been reconfigured */
4060                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4061                      j++, flash++) {
4062                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
4063                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4064                                 bp->flash_info = flash;
4065                                 break;
4066                         }
4067                 }
4068         }
4069         else {
4070                 u32 mask;
4071                 /* Not yet been reconfigured */
4072
4073                 if (val & (1 << 23))
4074                         mask = FLASH_BACKUP_STRAP_MASK;
4075                 else
4076                         mask = FLASH_STRAP_MASK;
4077
4078                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4079                         j++, flash++) {
4080
4081                         if ((val & mask) == (flash->strapping & mask)) {
4082                                 bp->flash_info = flash;
4083
4084                                 /* Request access to the flash interface. */
4085                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4086                                         return rc;
4087
4088                                 /* Enable access to flash interface */
4089                                 bnx2_enable_nvram_access(bp);
4090
4091                                 /* Reconfigure the flash interface */
4092                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4093                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4094                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4095                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4096
4097                                 /* Disable access to flash interface */
4098                                 bnx2_disable_nvram_access(bp);
4099                                 bnx2_release_nvram_lock(bp);
4100
4101                                 break;
4102                         }
4103                 }
4104         } /* if (val & 0x40000000) */
4105
4106         if (j == entry_count) {
4107                 bp->flash_info = NULL;
4108                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
4109                 return -ENODEV;
4110         }
4111
4112 get_flash_size:
4113         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4114         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4115         if (val)
4116                 bp->flash_size = val;
4117         else
4118                 bp->flash_size = bp->flash_info->total_size;
4119
4120         return rc;
4121 }
4122
4123 static int
4124 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4125                 int buf_size)
4126 {
4127         int rc = 0;
4128         u32 cmd_flags, offset32, len32, extra;
4129
4130         if (buf_size == 0)
4131                 return 0;
4132
4133         /* Request access to the flash interface. */
4134         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4135                 return rc;
4136
4137         /* Enable access to flash interface */
4138         bnx2_enable_nvram_access(bp);
4139
4140         len32 = buf_size;
4141         offset32 = offset;
4142         extra = 0;
4143
4144         cmd_flags = 0;
4145
4146         if (offset32 & 3) {
4147                 u8 buf[4];
4148                 u32 pre_len;
4149
4150                 offset32 &= ~3;
4151                 pre_len = 4 - (offset & 3);
4152
4153                 if (pre_len >= len32) {
4154                         pre_len = len32;
4155                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4156                                     BNX2_NVM_COMMAND_LAST;
4157                 }
4158                 else {
4159                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4160                 }
4161
4162                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4163
4164                 if (rc)
4165                         return rc;
4166
4167                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4168
4169                 offset32 += 4;
4170                 ret_buf += pre_len;
4171                 len32 -= pre_len;
4172         }
4173         if (len32 & 3) {
4174                 extra = 4 - (len32 & 3);
4175                 len32 = (len32 + 4) & ~3;
4176         }
4177
4178         if (len32 == 4) {
4179                 u8 buf[4];
4180
4181                 if (cmd_flags)
4182                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4183                 else
4184                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4185                                     BNX2_NVM_COMMAND_LAST;
4186
4187                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4188
4189                 memcpy(ret_buf, buf, 4 - extra);
4190         }
4191         else if (len32 > 0) {
4192                 u8 buf[4];
4193
4194                 /* Read the first word. */
4195                 if (cmd_flags)
4196                         cmd_flags = 0;
4197                 else
4198                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4199
4200                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4201
4202                 /* Advance to the next dword. */
4203                 offset32 += 4;
4204                 ret_buf += 4;
4205                 len32 -= 4;
4206
4207                 while (len32 > 4 && rc == 0) {
4208                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4209
4210                         /* Advance to the next dword. */
4211                         offset32 += 4;
4212                         ret_buf += 4;
4213                         len32 -= 4;
4214                 }
4215
4216                 if (rc)
4217                         return rc;
4218
4219                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4220                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4221
4222                 memcpy(ret_buf, buf, 4 - extra);
4223         }
4224
4225         /* Disable access to flash interface */
4226         bnx2_disable_nvram_access(bp);
4227
4228         bnx2_release_nvram_lock(bp);
4229
4230         return rc;
4231 }
4232
4233 static int
4234 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4235                 int buf_size)
4236 {
4237         u32 written, offset32, len32;
4238         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4239         int rc = 0;
4240         int align_start, align_end;
4241
4242         buf = data_buf;
4243         offset32 = offset;
4244         len32 = buf_size;
4245         align_start = align_end = 0;
4246
4247         if ((align_start = (offset32 & 3))) {
4248                 offset32 &= ~3;
4249                 len32 += align_start;
4250                 if (len32 < 4)
4251                         len32 = 4;
4252                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4253                         return rc;
4254         }
4255
4256         if (len32 & 3) {
4257                 align_end = 4 - (len32 & 3);
4258                 len32 += align_end;
4259                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4260                         return rc;
4261         }
4262
4263         if (align_start || align_end) {
4264                 align_buf = kmalloc(len32, GFP_KERNEL);
4265                 if (align_buf == NULL)
4266                         return -ENOMEM;
4267                 if (align_start) {
4268                         memcpy(align_buf, start, 4);
4269                 }
4270                 if (align_end) {
4271                         memcpy(align_buf + len32 - 4, end, 4);
4272                 }
4273                 memcpy(align_buf + align_start, data_buf, buf_size);
4274                 buf = align_buf;
4275         }
4276
4277         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4278                 flash_buffer = kmalloc(264, GFP_KERNEL);
4279                 if (flash_buffer == NULL) {
4280                         rc = -ENOMEM;
4281                         goto nvram_write_end;
4282                 }
4283         }
4284
4285         written = 0;
4286         while ((written < len32) && (rc == 0)) {
4287                 u32 page_start, page_end, data_start, data_end;
4288                 u32 addr, cmd_flags;
4289                 int i;
4290
4291                 /* Find the page_start addr */
4292                 page_start = offset32 + written;
4293                 page_start -= (page_start % bp->flash_info->page_size);
4294                 /* Find the page_end addr */
4295                 page_end = page_start + bp->flash_info->page_size;
4296                 /* Find the data_start addr */
4297                 data_start = (written == 0) ? offset32 : page_start;
4298                 /* Find the data_end addr */
4299                 data_end = (page_end > offset32 + len32) ?
4300                         (offset32 + len32) : page_end;
4301
4302                 /* Request access to the flash interface. */
4303                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4304                         goto nvram_write_end;
4305
4306                 /* Enable access to flash interface */
4307                 bnx2_enable_nvram_access(bp);
4308
4309                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4310                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4311                         int j;
4312
4313                         /* Read the whole page into the buffer
4314                          * (non-buffer flash only) */
4315                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4316                                 if (j == (bp->flash_info->page_size - 4)) {
4317                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4318                                 }
4319                                 rc = bnx2_nvram_read_dword(bp,
4320                                         page_start + j,
4321                                         &flash_buffer[j],
4322                                         cmd_flags);
4323
4324                                 if (rc)
4325                                         goto nvram_write_end;
4326
4327                                 cmd_flags = 0;
4328                         }
4329                 }
4330
4331                 /* Enable writes to flash interface (unlock write-protect) */
4332                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4333                         goto nvram_write_end;
4334
4335                 /* Loop to write back the buffer data from page_start to
4336                  * data_start */
4337                 i = 0;
4338                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4339                         /* Erase the page */
4340                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4341                                 goto nvram_write_end;
4342
4343                         /* Re-enable the write again for the actual write */
4344                         bnx2_enable_nvram_write(bp);
4345
4346                         for (addr = page_start; addr < data_start;
4347                                 addr += 4, i += 4) {
4348
4349                                 rc = bnx2_nvram_write_dword(bp, addr,
4350                                         &flash_buffer[i], cmd_flags);
4351
4352                                 if (rc != 0)
4353                                         goto nvram_write_end;
4354
4355                                 cmd_flags = 0;
4356                         }
4357                 }
4358
4359                 /* Loop to write the new data from data_start to data_end */
4360                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4361                         if ((addr == page_end - 4) ||
4362                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4363                                  (addr == data_end - 4))) {
4364
4365                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4366                         }
4367                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4368                                 cmd_flags);
4369
4370                         if (rc != 0)
4371                                 goto nvram_write_end;
4372
4373                         cmd_flags = 0;
4374                         buf += 4;
4375                 }
4376
4377                 /* Loop to write back the buffer data from data_end
4378                  * to page_end */
4379                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4380                         for (addr = data_end; addr < page_end;
4381                                 addr += 4, i += 4) {
4382
4383                                 if (addr == page_end-4) {
4384                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4385                                 }
4386                                 rc = bnx2_nvram_write_dword(bp, addr,
4387                                         &flash_buffer[i], cmd_flags);
4388
4389                                 if (rc != 0)
4390                                         goto nvram_write_end;
4391
4392                                 cmd_flags = 0;
4393                         }
4394                 }
4395
4396                 /* Disable writes to flash interface (lock write-protect) */
4397                 bnx2_disable_nvram_write(bp);
4398
4399                 /* Disable access to flash interface */
4400                 bnx2_disable_nvram_access(bp);
4401                 bnx2_release_nvram_lock(bp);
4402
4403                 /* Increment written */
4404                 written += data_end - data_start;
4405         }
4406
4407 nvram_write_end:
4408         kfree(flash_buffer);
4409         kfree(align_buf);
4410         return rc;
4411 }
4412
4413 static void
4414 bnx2_init_fw_cap(struct bnx2 *bp)
4415 {
4416         u32 val, sig = 0;
4417
4418         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4419         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4420
4421         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4422                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4423
4424         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4425         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4426                 return;
4427
4428         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4429                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4430                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4431         }
4432
4433         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4434             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4435                 u32 link;
4436
4437                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4438
4439                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4440                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4441                         bp->phy_port = PORT_FIBRE;
4442                 else
4443                         bp->phy_port = PORT_TP;
4444
4445                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4446                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4447         }
4448
4449         if (netif_running(bp->dev) && sig)
4450                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4451 }
4452
4453 static void
4454 bnx2_setup_msix_tbl(struct bnx2 *bp)
4455 {
4456         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4457
4458         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4459         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4460 }
4461
4462 static int
4463 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4464 {
4465         u32 val;
4466         int i, rc = 0;
4467         u8 old_port;
4468
4469         /* Wait for the current PCI transaction to complete before
4470          * issuing a reset. */
4471         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4472                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4473                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4474                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4475                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4476         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4477         udelay(5);
4478
4479         /* Wait for the firmware to tell us it is ok to issue a reset. */
4480         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4481
4482         /* Deposit a driver reset signature so the firmware knows that
4483          * this is a soft reset. */
4484         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4485                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4486
4487         /* Do a dummy read to force the chip to complete all current transaction
4488          * before we issue a reset. */
4489         val = REG_RD(bp, BNX2_MISC_ID);
4490
4491         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4492                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4493                 REG_RD(bp, BNX2_MISC_COMMAND);
4494                 udelay(5);
4495
4496                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4497                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4498
4499                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4500
4501         } else {
4502                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4503                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4504                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4505
4506                 /* Chip reset. */
4507                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4508
4509                 /* Reading back any register after chip reset will hang the
4510                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4511                  * of margin for write posting.
4512                  */
4513                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4514                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4515                         msleep(20);
4516
4517                 /* Reset takes approximate 30 usec */
4518                 for (i = 0; i < 10; i++) {
4519                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4520                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4521                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4522                                 break;
4523                         udelay(10);
4524                 }
4525
4526                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4527                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4528                         printk(KERN_ERR PFX "Chip reset did not complete\n");
4529                         return -EBUSY;
4530                 }
4531         }
4532
4533         /* Make sure byte swapping is properly configured. */
4534         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4535         if (val != 0x01020304) {
4536                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4537                 return -ENODEV;
4538         }
4539
4540         /* Wait for the firmware to finish its initialization. */
4541         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4542         if (rc)
4543                 return rc;
4544
4545         spin_lock_bh(&bp->phy_lock);
4546         old_port = bp->phy_port;
4547         bnx2_init_fw_cap(bp);
4548         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4549             old_port != bp->phy_port)
4550                 bnx2_set_default_remote_link(bp);
4551         spin_unlock_bh(&bp->phy_lock);
4552
4553         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4554                 /* Adjust the voltage regular to two steps lower.  The default
4555                  * of this register is 0x0000000e. */
4556                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4557
4558                 /* Remove bad rbuf memory from the free pool. */
4559                 rc = bnx2_alloc_bad_rbuf(bp);
4560         }
4561
4562         if (bp->flags & BNX2_FLAG_USING_MSIX)
4563                 bnx2_setup_msix_tbl(bp);
4564
4565         return rc;
4566 }
4567
4568 static int
4569 bnx2_init_chip(struct bnx2 *bp)
4570 {
4571         u32 val, mtu;
4572         int rc, i;
4573
4574         /* Make sure the interrupt is not active. */
4575         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4576
4577         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4578               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4579 #ifdef __BIG_ENDIAN
4580               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4581 #endif
4582               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4583               DMA_READ_CHANS << 12 |
4584               DMA_WRITE_CHANS << 16;
4585
4586         val |= (0x2 << 20) | (1 << 11);
4587
4588         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4589                 val |= (1 << 23);
4590
4591         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4592             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4593                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4594
4595         REG_WR(bp, BNX2_DMA_CONFIG, val);
4596
4597         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4598                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4599                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4600                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4601         }
4602
4603         if (bp->flags & BNX2_FLAG_PCIX) {
4604                 u16 val16;
4605
4606                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4607                                      &val16);
4608                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4609                                       val16 & ~PCI_X_CMD_ERO);
4610         }
4611
4612         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4613                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4614                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4615                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4616
4617         /* Initialize context mapping and zero out the quick contexts.  The
4618          * context block must have already been enabled. */
4619         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4620                 rc = bnx2_init_5709_context(bp);
4621                 if (rc)
4622                         return rc;
4623         } else
4624                 bnx2_init_context(bp);
4625
4626         if ((rc = bnx2_init_cpus(bp)) != 0)
4627                 return rc;
4628
4629         bnx2_init_nvram(bp);
4630
4631         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4632
4633         val = REG_RD(bp, BNX2_MQ_CONFIG);
4634         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4635         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4636         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4637                 val |= BNX2_MQ_CONFIG_HALT_DIS;
4638
4639         REG_WR(bp, BNX2_MQ_CONFIG, val);
4640
4641         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4642         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4643         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4644
4645         val = (BCM_PAGE_BITS - 8) << 24;
4646         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4647
4648         /* Configure page size. */
4649         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4650         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4651         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4652         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4653
4654         val = bp->mac_addr[0] +
4655               (bp->mac_addr[1] << 8) +
4656               (bp->mac_addr[2] << 16) +
4657               bp->mac_addr[3] +
4658               (bp->mac_addr[4] << 8) +
4659               (bp->mac_addr[5] << 16);
4660         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4661
4662         /* Program the MTU.  Also include 4 bytes for CRC32. */
4663         mtu = bp->dev->mtu;
4664         val = mtu + ETH_HLEN + ETH_FCS_LEN;
4665         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4666                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4667         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4668
4669         if (mtu < 1500)
4670                 mtu = 1500;
4671
4672         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4673         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4674         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4675
4676         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4677                 bp->bnx2_napi[i].last_status_idx = 0;
4678
4679         bp->idle_chk_status_idx = 0xffff;
4680
4681         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4682
4683         /* Set up how to generate a link change interrupt. */
4684         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4685
4686         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4687                (u64) bp->status_blk_mapping & 0xffffffff);
4688         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4689
4690         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4691                (u64) bp->stats_blk_mapping & 0xffffffff);
4692         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4693                (u64) bp->stats_blk_mapping >> 32);
4694
4695         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4696                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4697
4698         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4699                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4700
4701         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4702                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4703
4704         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4705
4706         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4707
4708         REG_WR(bp, BNX2_HC_COM_TICKS,
4709                (bp->com_ticks_int << 16) | bp->com_ticks);
4710
4711         REG_WR(bp, BNX2_HC_CMD_TICKS,
4712                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4713
4714         if (CHIP_NUM(bp) == CHIP_NUM_5708)
4715                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4716         else
4717                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4718         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4719
4720         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4721                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4722         else {
4723                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4724                       BNX2_HC_CONFIG_COLLECT_STATS;
4725         }
4726
4727         if (bp->irq_nvecs > 1) {
4728                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4729                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4730
4731                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4732         }
4733
4734         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4735                 val |= BNX2_HC_CONFIG_ONE_SHOT;
4736
4737         REG_WR(bp, BNX2_HC_CONFIG, val);
4738
4739         for (i = 1; i < bp->irq_nvecs; i++) {
4740                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4741                            BNX2_HC_SB_CONFIG_1;
4742
4743                 REG_WR(bp, base,
4744                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4745                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4746                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4747
4748                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4749                         (bp->tx_quick_cons_trip_int << 16) |
4750                          bp->tx_quick_cons_trip);
4751
4752                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4753                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4754
4755                 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4756                        (bp->rx_quick_cons_trip_int << 16) |
4757                         bp->rx_quick_cons_trip);
4758
4759                 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4760                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
4761         }
4762
4763         /* Clear internal stats counters. */
4764         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4765
4766         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4767
4768         /* Initialize the receive filter. */
4769         bnx2_set_rx_mode(bp->dev);
4770
4771         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4772                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4773                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4774                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4775         }
4776         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4777                           1, 0);
4778
4779         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4780         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4781
4782         udelay(20);
4783
4784         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4785
4786         return rc;
4787 }
4788
4789 static void
4790 bnx2_clear_ring_states(struct bnx2 *bp)
4791 {
4792         struct bnx2_napi *bnapi;
4793         struct bnx2_tx_ring_info *txr;
4794         struct bnx2_rx_ring_info *rxr;
4795         int i;
4796
4797         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4798                 bnapi = &bp->bnx2_napi[i];
4799                 txr = &bnapi->tx_ring;
4800                 rxr = &bnapi->rx_ring;
4801
4802                 txr->tx_cons = 0;
4803                 txr->hw_tx_cons = 0;
4804                 rxr->rx_prod_bseq = 0;
4805                 rxr->rx_prod = 0;
4806                 rxr->rx_cons = 0;
4807                 rxr->rx_pg_prod = 0;
4808                 rxr->rx_pg_cons = 0;
4809         }
4810 }
4811
4812 static void
4813 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
4814 {
4815         u32 val, offset0, offset1, offset2, offset3;
4816         u32 cid_addr = GET_CID_ADDR(cid);
4817
4818         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4819                 offset0 = BNX2_L2CTX_TYPE_XI;
4820                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4821                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4822                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4823         } else {
4824                 offset0 = BNX2_L2CTX_TYPE;
4825                 offset1 = BNX2_L2CTX_CMD_TYPE;
4826                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4827                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4828         }
4829         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4830         bnx2_ctx_wr(bp, cid_addr, offset0, val);
4831
4832         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4833         bnx2_ctx_wr(bp, cid_addr, offset1, val);
4834
4835         val = (u64) txr->tx_desc_mapping >> 32;
4836         bnx2_ctx_wr(bp, cid_addr, offset2, val);
4837
4838         val = (u64) txr->tx_desc_mapping & 0xffffffff;
4839         bnx2_ctx_wr(bp, cid_addr, offset3, val);
4840 }
4841
4842 static void
4843 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
4844 {
4845         struct tx_bd *txbd;
4846         u32 cid = TX_CID;
4847         struct bnx2_napi *bnapi;
4848         struct bnx2_tx_ring_info *txr;
4849
4850         bnapi = &bp->bnx2_napi[ring_num];
4851         txr = &bnapi->tx_ring;
4852
4853         if (ring_num == 0)
4854                 cid = TX_CID;
4855         else
4856                 cid = TX_TSS_CID + ring_num - 1;
4857
4858         bp->tx_wake_thresh = bp->tx_ring_size / 2;
4859
4860         txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
4861
4862         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
4863         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
4864
4865         txr->tx_prod = 0;
4866         txr->tx_prod_bseq = 0;
4867
4868         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4869         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4870
4871         bnx2_init_tx_context(bp, cid, txr);
4872 }
4873
4874 static void
4875 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4876                      int num_rings)
4877 {
4878         int i;
4879         struct rx_bd *rxbd;
4880
4881         for (i = 0; i < num_rings; i++) {
4882                 int j;
4883
4884                 rxbd = &rx_ring[i][0];
4885                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4886                         rxbd->rx_bd_len = buf_size;
4887                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4888                 }
4889                 if (i == (num_rings - 1))
4890                         j = 0;
4891                 else
4892                         j = i + 1;
4893                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4894                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4895         }
4896 }
4897
4898 static void
4899 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
4900 {
4901         int i;
4902         u16 prod, ring_prod;
4903         u32 cid, rx_cid_addr, val;
4904         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
4905         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4906
4907         if (ring_num == 0)
4908                 cid = RX_CID;
4909         else
4910                 cid = RX_RSS_CID + ring_num - 1;
4911
4912         rx_cid_addr = GET_CID_ADDR(cid);
4913
4914         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
4915                              bp->rx_buf_use_size, bp->rx_max_ring);
4916
4917         bnx2_init_rx_context(bp, cid);
4918
4919         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4920                 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
4921                 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
4922         }
4923
4924         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4925         if (bp->rx_pg_ring_size) {
4926                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
4927                                      rxr->rx_pg_desc_mapping,
4928                                      PAGE_SIZE, bp->rx_max_pg_ring);
4929                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4930                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4931                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4932                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
4933
4934                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
4935                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4936
4937                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
4938                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4939
4940                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4941                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4942         }
4943
4944         val = (u64) rxr->rx_desc_mapping[0] >> 32;
4945         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4946
4947         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
4948         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4949
4950         ring_prod = prod = rxr->rx_pg_prod;
4951         for (i = 0; i < bp->rx_pg_ring_size; i++) {
4952                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
4953                         break;
4954                 prod = NEXT_RX_BD(prod);
4955                 ring_prod = RX_PG_RING_IDX(prod);
4956         }
4957         rxr->rx_pg_prod = prod;
4958
4959         ring_prod = prod = rxr->rx_prod;
4960         for (i = 0; i < bp->rx_ring_size; i++) {
4961                 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
4962                         break;
4963                 prod = NEXT_RX_BD(prod);
4964                 ring_prod = RX_RING_IDX(prod);
4965         }
4966         rxr->rx_prod = prod;
4967
4968         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
4969         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
4970         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
4971
4972         REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
4973         REG_WR16(bp, rxr->rx_bidx_addr, prod);
4974
4975         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
4976 }
4977
4978 static void
4979 bnx2_init_all_rings(struct bnx2 *bp)
4980 {
4981         int i;
4982         u32 val;
4983
4984         bnx2_clear_ring_states(bp);
4985
4986         REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
4987         for (i = 0; i < bp->num_tx_rings; i++)
4988                 bnx2_init_tx_ring(bp, i);
4989
4990         if (bp->num_tx_rings > 1)
4991                 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
4992                        (TX_TSS_CID << 7));
4993
4994         REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
4995         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
4996
4997         for (i = 0; i < bp->num_rx_rings; i++)
4998                 bnx2_init_rx_ring(bp, i);
4999
5000         if (bp->num_rx_rings > 1) {
5001                 u32 tbl_32;
5002                 u8 *tbl = (u8 *) &tbl_32;
5003
5004                 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5005                                 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5006
5007                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5008                         tbl[i % 4] = i % (bp->num_rx_rings - 1);
5009                         if ((i % 4) == 3)
5010                                 bnx2_reg_wr_ind(bp,
5011                                                 BNX2_RXP_SCRATCH_RSS_TBL + i,
5012                                                 cpu_to_be32(tbl_32));
5013                 }
5014
5015                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5016                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5017
5018                 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5019
5020         }
5021 }
5022
5023 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5024 {
5025         u32 max, num_rings = 1;
5026
5027         while (ring_size > MAX_RX_DESC_CNT) {
5028                 ring_size -= MAX_RX_DESC_CNT;
5029                 num_rings++;
5030         }
5031         /* round to next power of 2 */
5032         max = max_size;
5033         while ((max & num_rings) == 0)
5034                 max >>= 1;
5035
5036         if (num_rings != max)
5037                 max <<= 1;
5038
5039         return max;
5040 }
5041
5042 static void
5043 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5044 {
5045         u32 rx_size, rx_space, jumbo_size;
5046
5047         /* 8 for CRC and VLAN */
5048         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5049
5050         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5051                 sizeof(struct skb_shared_info);
5052
5053         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5054         bp->rx_pg_ring_size = 0;
5055         bp->rx_max_pg_ring = 0;
5056         bp->rx_max_pg_ring_idx = 0;
5057         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5058                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5059
5060                 jumbo_size = size * pages;
5061                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5062                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5063
5064                 bp->rx_pg_ring_size = jumbo_size;
5065                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5066                                                         MAX_RX_PG_RINGS);
5067                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5068                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5069                 bp->rx_copy_thresh = 0;
5070         }
5071
5072         bp->rx_buf_use_size = rx_size;
5073         /* hw alignment */
5074         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5075         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5076         bp->rx_ring_size = size;
5077         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5078         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5079 }
5080
5081 static void
5082 bnx2_free_tx_skbs(struct bnx2 *bp)
5083 {
5084         int i;
5085
5086         for (i = 0; i < bp->num_tx_rings; i++) {
5087                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5088                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5089                 int j;
5090
5091                 if (txr->tx_buf_ring == NULL)
5092                         continue;
5093
5094                 for (j = 0; j < TX_DESC_CNT; ) {
5095                         struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5096                         struct sk_buff *skb = tx_buf->skb;
5097
5098                         if (skb == NULL) {
5099                                 j++;
5100                                 continue;
5101                         }
5102
5103                         skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
5104
5105                         tx_buf->skb = NULL;
5106
5107                         j += skb_shinfo(skb)->nr_frags + 1;
5108                         dev_kfree_skb(skb);
5109                 }
5110         }
5111 }
5112
5113 static void
5114 bnx2_free_rx_skbs(struct bnx2 *bp)
5115 {
5116         int i;
5117
5118         for (i = 0; i < bp->num_rx_rings; i++) {
5119                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5120                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5121                 int j;
5122
5123                 if (rxr->rx_buf_ring == NULL)
5124                         return;
5125
5126                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5127                         struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5128                         struct sk_buff *skb = rx_buf->skb;
5129
5130                         if (skb == NULL)
5131                                 continue;
5132
5133                         pci_unmap_single(bp->pdev,
5134                                          pci_unmap_addr(rx_buf, mapping),
5135                                          bp->rx_buf_use_size,
5136                                          PCI_DMA_FROMDEVICE);
5137
5138                         rx_buf->skb = NULL;
5139
5140                         dev_kfree_skb(skb);
5141                 }
5142                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5143                         bnx2_free_rx_page(bp, rxr, j);
5144         }
5145 }
5146
5147 static void
5148 bnx2_free_skbs(struct bnx2 *bp)
5149 {
5150         bnx2_free_tx_skbs(bp);
5151         bnx2_free_rx_skbs(bp);
5152 }
5153
5154 static int
5155 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5156 {
5157         int rc;
5158
5159         rc = bnx2_reset_chip(bp, reset_code);
5160         bnx2_free_skbs(bp);
5161         if (rc)
5162                 return rc;
5163
5164         if ((rc = bnx2_init_chip(bp)) != 0)
5165                 return rc;
5166
5167         bnx2_init_all_rings(bp);
5168         return 0;
5169 }
5170
5171 static int
5172 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5173 {
5174         int rc;
5175
5176         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5177                 return rc;
5178
5179         spin_lock_bh(&bp->phy_lock);
5180         bnx2_init_phy(bp, reset_phy);
5181         bnx2_set_link(bp);
5182         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5183                 bnx2_remote_phy_event(bp);
5184         spin_unlock_bh(&bp->phy_lock);
5185         return 0;
5186 }
5187
5188 static int
5189 bnx2_shutdown_chip(struct bnx2 *bp)
5190 {
5191         u32 reset_code;
5192
5193         if (bp->flags & BNX2_FLAG_NO_WOL)
5194                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5195         else if (bp->wol)
5196                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5197         else
5198                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5199
5200         return bnx2_reset_chip(bp, reset_code);
5201 }
5202
5203 static int
5204 bnx2_test_registers(struct bnx2 *bp)
5205 {
5206         int ret;
5207         int i, is_5709;
5208         static const struct {
5209                 u16   offset;
5210                 u16   flags;
5211 #define BNX2_FL_NOT_5709        1
5212                 u32   rw_mask;
5213                 u32   ro_mask;
5214         } reg_tbl[] = {
5215                 { 0x006c, 0, 0x00000000, 0x0000003f },
5216                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5217                 { 0x0094, 0, 0x00000000, 0x00000000 },
5218
5219                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5220                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5221                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5222                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5223                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5224                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5225                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5226                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5227                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5228
5229                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5230                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5231                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5232                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5233                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5234                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5235
5236                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5237                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5238                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5239
5240                 { 0x1000, 0, 0x00000000, 0x00000001 },
5241                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5242
5243                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5244                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5245                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5246                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5247                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5248                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5249                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5250                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5251                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5252                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5253
5254                 { 0x1800, 0, 0x00000000, 0x00000001 },
5255                 { 0x1804, 0, 0x00000000, 0x00000003 },
5256
5257                 { 0x2800, 0, 0x00000000, 0x00000001 },
5258                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5259                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5260                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5261                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5262                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5263                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5264                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5265                 { 0x2840, 0, 0x00000000, 0xffffffff },
5266                 { 0x2844, 0, 0x00000000, 0xffffffff },
5267                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5268                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5269
5270                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5271                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5272
5273                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5274                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5275                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5276                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5277                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5278                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5279                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5280                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5281                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5282
5283                 { 0x5004, 0, 0x00000000, 0x0000007f },
5284                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5285
5286                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5287                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5288                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5289                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5290                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5291                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5292                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5293                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5294                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5295
5296                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5297                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5298                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5299                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5300                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5301                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5302                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5303                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5304                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5305                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5306                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5307                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5308                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5309                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5310                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5311                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5312                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5313                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5314                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5315                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5316                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5317                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5318                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5319
5320                 { 0xffff, 0, 0x00000000, 0x00000000 },
5321         };
5322
5323         ret = 0;
5324         is_5709 = 0;
5325         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5326                 is_5709 = 1;
5327
5328         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5329                 u32 offset, rw_mask, ro_mask, save_val, val;
5330                 u16 flags = reg_tbl[i].flags;
5331
5332                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5333                         continue;
5334
5335                 offset = (u32) reg_tbl[i].offset;
5336                 rw_mask = reg_tbl[i].rw_mask;
5337                 ro_mask = reg_tbl[i].ro_mask;
5338
5339                 save_val = readl(bp->regview + offset);
5340
5341                 writel(0, bp->regview + offset);
5342
5343                 val = readl(bp->regview + offset);
5344                 if ((val & rw_mask) != 0) {
5345                         goto reg_test_err;
5346                 }
5347
5348                 if ((val & ro_mask) != (save_val & ro_mask)) {
5349                         goto reg_test_err;
5350                 }
5351
5352                 writel(0xffffffff, bp->regview + offset);
5353
5354                 val = readl(bp->regview + offset);
5355                 if ((val & rw_mask) != rw_mask) {
5356                         goto reg_test_err;
5357                 }
5358
5359                 if ((val & ro_mask) != (save_val & ro_mask)) {
5360                         goto reg_test_err;
5361                 }
5362
5363                 writel(save_val, bp->regview + offset);
5364                 continue;
5365
5366 reg_test_err:
5367                 writel(save_val, bp->regview + offset);
5368                 ret = -ENODEV;
5369                 break;
5370         }
5371         return ret;
5372 }
5373
5374 static int
5375 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5376 {
5377         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5378                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5379         int i;
5380
5381         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5382                 u32 offset;
5383
5384                 for (offset = 0; offset < size; offset += 4) {
5385
5386                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5387
5388                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5389                                 test_pattern[i]) {
5390                                 return -ENODEV;
5391                         }
5392                 }
5393         }
5394         return 0;
5395 }
5396
5397 static int
5398 bnx2_test_memory(struct bnx2 *bp)
5399 {
5400         int ret = 0;
5401         int i;
5402         static struct mem_entry {
5403                 u32   offset;
5404                 u32   len;
5405         } mem_tbl_5706[] = {
5406                 { 0x60000,  0x4000 },
5407                 { 0xa0000,  0x3000 },
5408                 { 0xe0000,  0x4000 },
5409                 { 0x120000, 0x4000 },
5410                 { 0x1a0000, 0x4000 },
5411                 { 0x160000, 0x4000 },
5412                 { 0xffffffff, 0    },
5413         },
5414         mem_tbl_5709[] = {
5415                 { 0x60000,  0x4000 },
5416                 { 0xa0000,  0x3000 },
5417                 { 0xe0000,  0x4000 },
5418                 { 0x120000, 0x4000 },
5419                 { 0x1a0000, 0x4000 },
5420                 { 0xffffffff, 0    },
5421         };
5422         struct mem_entry *mem_tbl;
5423
5424         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5425                 mem_tbl = mem_tbl_5709;
5426         else
5427                 mem_tbl = mem_tbl_5706;
5428
5429         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5430                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5431                         mem_tbl[i].len)) != 0) {
5432                         return ret;
5433                 }
5434         }
5435
5436         return ret;
5437 }
5438
5439 #define BNX2_MAC_LOOPBACK       0
5440 #define BNX2_PHY_LOOPBACK       1
5441
5442 static int
5443 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5444 {
5445         unsigned int pkt_size, num_pkts, i;
5446         struct sk_buff *skb, *rx_skb;
5447         unsigned char *packet;
5448         u16 rx_start_idx, rx_idx;
5449         dma_addr_t map;
5450         struct tx_bd *txbd;
5451         struct sw_bd *rx_buf;
5452         struct l2_fhdr *rx_hdr;
5453         int ret = -ENODEV;
5454         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5455         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5456         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5457
5458         tx_napi = bnapi;
5459
5460         txr = &tx_napi->tx_ring;
5461         rxr = &bnapi->rx_ring;
5462         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5463                 bp->loopback = MAC_LOOPBACK;
5464                 bnx2_set_mac_loopback(bp);
5465         }
5466         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5467                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5468                         return 0;
5469
5470                 bp->loopback = PHY_LOOPBACK;
5471                 bnx2_set_phy_loopback(bp);
5472         }
5473         else
5474                 return -EINVAL;
5475
5476         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5477         skb = netdev_alloc_skb(bp->dev, pkt_size);
5478         if (!skb)
5479                 return -ENOMEM;
5480         packet = skb_put(skb, pkt_size);
5481         memcpy(packet, bp->dev->dev_addr, 6);
5482         memset(packet + 6, 0x0, 8);
5483         for (i = 14; i < pkt_size; i++)
5484                 packet[i] = (unsigned char) (i & 0xff);
5485
5486         if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
5487                 dev_kfree_skb(skb);
5488                 return -EIO;
5489         }
5490         map = skb_shinfo(skb)->dma_head;
5491
5492         REG_WR(bp, BNX2_HC_COMMAND,
5493                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5494
5495         REG_RD(bp, BNX2_HC_COMMAND);
5496
5497         udelay(5);
5498         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5499
5500         num_pkts = 0;
5501
5502         txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5503
5504         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5505         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5506         txbd->tx_bd_mss_nbytes = pkt_size;
5507         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5508
5509         num_pkts++;
5510         txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5511         txr->tx_prod_bseq += pkt_size;
5512
5513         REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5514         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5515
5516         udelay(100);
5517
5518         REG_WR(bp, BNX2_HC_COMMAND,
5519                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5520
5521         REG_RD(bp, BNX2_HC_COMMAND);
5522
5523         udelay(5);
5524
5525         skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
5526         dev_kfree_skb(skb);
5527
5528         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5529                 goto loopback_test_done;
5530
5531         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5532         if (rx_idx != rx_start_idx + num_pkts) {
5533                 goto loopback_test_done;
5534         }
5535
5536         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5537         rx_skb = rx_buf->skb;
5538
5539         rx_hdr = (struct l2_fhdr *) rx_skb->data;
5540         skb_reserve(rx_skb, BNX2_RX_OFFSET);
5541
5542         pci_dma_sync_single_for_cpu(bp->pdev,
5543                 pci_unmap_addr(rx_buf, mapping),
5544                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5545
5546         if (rx_hdr->l2_fhdr_status &
5547                 (L2_FHDR_ERRORS_BAD_CRC |
5548                 L2_FHDR_ERRORS_PHY_DECODE |
5549                 L2_FHDR_ERRORS_ALIGNMENT |
5550                 L2_FHDR_ERRORS_TOO_SHORT |
5551                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5552
5553                 goto loopback_test_done;
5554         }
5555
5556         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5557                 goto loopback_test_done;
5558         }
5559
5560         for (i = 14; i < pkt_size; i++) {
5561                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5562                         goto loopback_test_done;
5563                 }
5564         }
5565
5566         ret = 0;
5567
5568 loopback_test_done:
5569         bp->loopback = 0;
5570         return ret;
5571 }
5572
5573 #define BNX2_MAC_LOOPBACK_FAILED        1
5574 #define BNX2_PHY_LOOPBACK_FAILED        2
5575 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5576                                          BNX2_PHY_LOOPBACK_FAILED)
5577
5578 static int
5579 bnx2_test_loopback(struct bnx2 *bp)
5580 {
5581         int rc = 0;
5582
5583         if (!netif_running(bp->dev))
5584                 return BNX2_LOOPBACK_FAILED;
5585
5586         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5587         spin_lock_bh(&bp->phy_lock);
5588         bnx2_init_phy(bp, 1);
5589         spin_unlock_bh(&bp->phy_lock);
5590         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5591                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5592         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5593                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5594         return rc;
5595 }
5596
5597 #define NVRAM_SIZE 0x200
5598 #define CRC32_RESIDUAL 0xdebb20e3
5599
5600 static int
5601 bnx2_test_nvram(struct bnx2 *bp)
5602 {
5603         __be32 buf[NVRAM_SIZE / 4];
5604         u8 *data = (u8 *) buf;
5605         int rc = 0;
5606         u32 magic, csum;
5607
5608         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5609                 goto test_nvram_done;
5610
5611         magic = be32_to_cpu(buf[0]);
5612         if (magic != 0x669955aa) {
5613                 rc = -ENODEV;
5614                 goto test_nvram_done;
5615         }
5616
5617         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5618                 goto test_nvram_done;
5619
5620         csum = ether_crc_le(0x100, data);
5621         if (csum != CRC32_RESIDUAL) {
5622                 rc = -ENODEV;
5623                 goto test_nvram_done;
5624         }
5625
5626         csum = ether_crc_le(0x100, data + 0x100);
5627         if (csum != CRC32_RESIDUAL) {
5628                 rc = -ENODEV;
5629         }
5630
5631 test_nvram_done:
5632         return rc;
5633 }
5634
5635 static int
5636 bnx2_test_link(struct bnx2 *bp)
5637 {
5638         u32 bmsr;
5639
5640         if (!netif_running(bp->dev))
5641                 return -ENODEV;
5642
5643         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5644                 if (bp->link_up)
5645                         return 0;
5646                 return -ENODEV;
5647         }
5648         spin_lock_bh(&bp->phy_lock);
5649         bnx2_enable_bmsr1(bp);
5650         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5651         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5652         bnx2_disable_bmsr1(bp);
5653         spin_unlock_bh(&bp->phy_lock);
5654
5655         if (bmsr & BMSR_LSTATUS) {
5656                 return 0;
5657         }
5658         return -ENODEV;
5659 }
5660
5661 static int
5662 bnx2_test_intr(struct bnx2 *bp)
5663 {
5664         int i;
5665         u16 status_idx;
5666
5667         if (!netif_running(bp->dev))
5668                 return -ENODEV;
5669
5670         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5671
5672         /* This register is not touched during run-time. */
5673         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5674         REG_RD(bp, BNX2_HC_COMMAND);
5675
5676         for (i = 0; i < 10; i++) {
5677                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5678                         status_idx) {
5679
5680                         break;
5681                 }
5682
5683                 msleep_interruptible(10);
5684         }
5685         if (i < 10)
5686                 return 0;
5687
5688         return -ENODEV;
5689 }
5690
5691 /* Determining link for parallel detection. */
5692 static int
5693 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5694 {
5695         u32 mode_ctl, an_dbg, exp;
5696
5697         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5698                 return 0;
5699
5700         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5701         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5702
5703         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5704                 return 0;
5705
5706         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5707         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5708         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5709
5710         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5711                 return 0;
5712
5713         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5714         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5715         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5716
5717         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5718                 return 0;
5719
5720         return 1;
5721 }
5722
5723 static void
5724 bnx2_5706_serdes_timer(struct bnx2 *bp)
5725 {
5726         int check_link = 1;
5727
5728         spin_lock(&bp->phy_lock);
5729         if (bp->serdes_an_pending) {
5730                 bp->serdes_an_pending--;
5731                 check_link = 0;
5732         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5733                 u32 bmcr;
5734
5735                 bp->current_interval = BNX2_TIMER_INTERVAL;
5736
5737                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5738
5739                 if (bmcr & BMCR_ANENABLE) {
5740                         if (bnx2_5706_serdes_has_link(bp)) {
5741                                 bmcr &= ~BMCR_ANENABLE;
5742                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5743                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5744                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5745                         }
5746                 }
5747         }
5748         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5749                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5750                 u32 phy2;
5751
5752                 bnx2_write_phy(bp, 0x17, 0x0f01);
5753                 bnx2_read_phy(bp, 0x15, &phy2);
5754                 if (phy2 & 0x20) {
5755                         u32 bmcr;
5756
5757                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5758                         bmcr |= BMCR_ANENABLE;
5759                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5760
5761                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5762                 }
5763         } else
5764                 bp->current_interval = BNX2_TIMER_INTERVAL;
5765
5766         if (check_link) {
5767                 u32 val;
5768
5769                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5770                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5771                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5772
5773                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5774                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5775                                 bnx2_5706s_force_link_dn(bp, 1);
5776                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5777                         } else
5778                                 bnx2_set_link(bp);
5779                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5780                         bnx2_set_link(bp);
5781         }
5782         spin_unlock(&bp->phy_lock);
5783 }
5784
5785 static void
5786 bnx2_5708_serdes_timer(struct bnx2 *bp)
5787 {
5788         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5789                 return;
5790
5791         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5792                 bp->serdes_an_pending = 0;
5793                 return;
5794         }
5795
5796         spin_lock(&bp->phy_lock);
5797         if (bp->serdes_an_pending)
5798                 bp->serdes_an_pending--;
5799         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5800                 u32 bmcr;
5801
5802                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5803                 if (bmcr & BMCR_ANENABLE) {
5804                         bnx2_enable_forced_2g5(bp);
5805                         bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
5806                 } else {
5807                         bnx2_disable_forced_2g5(bp);
5808                         bp->serdes_an_pending = 2;
5809                         bp->current_interval = BNX2_TIMER_INTERVAL;
5810                 }
5811
5812         } else
5813                 bp->current_interval = BNX2_TIMER_INTERVAL;
5814
5815         spin_unlock(&bp->phy_lock);
5816 }
5817
5818 static void
5819 bnx2_timer(unsigned long data)
5820 {
5821         struct bnx2 *bp = (struct bnx2 *) data;
5822
5823         if (!netif_running(bp->dev))
5824                 return;
5825
5826         if (atomic_read(&bp->intr_sem) != 0)
5827                 goto bnx2_restart_timer;
5828
5829         if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
5830              BNX2_FLAG_USING_MSI)
5831                 bnx2_chk_missed_msi(bp);
5832
5833         bnx2_send_heart_beat(bp);
5834
5835         bp->stats_blk->stat_FwRxDrop =
5836                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
5837
5838         /* workaround occasional corrupted counters */
5839         if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5840                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5841                                             BNX2_HC_COMMAND_STATS_NOW);
5842
5843         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5844                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5845                         bnx2_5706_serdes_timer(bp);
5846                 else
5847                         bnx2_5708_serdes_timer(bp);
5848         }
5849
5850 bnx2_restart_timer:
5851         mod_timer(&bp->timer, jiffies + bp->current_interval);
5852 }
5853
5854 static int
5855 bnx2_request_irq(struct bnx2 *bp)
5856 {
5857         unsigned long flags;
5858         struct bnx2_irq *irq;
5859         int rc = 0, i;
5860
5861         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
5862                 flags = 0;
5863         else
5864                 flags = IRQF_SHARED;
5865
5866         for (i = 0; i < bp->irq_nvecs; i++) {
5867                 irq = &bp->irq_tbl[i];
5868                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5869                                  &bp->bnx2_napi[i]);
5870                 if (rc)
5871                         break;
5872                 irq->requested = 1;
5873         }
5874         return rc;
5875 }
5876
5877 static void
5878 bnx2_free_irq(struct bnx2 *bp)
5879 {
5880         struct bnx2_irq *irq;
5881         int i;
5882
5883         for (i = 0; i < bp->irq_nvecs; i++) {
5884                 irq = &bp->irq_tbl[i];
5885                 if (irq->requested)
5886                         free_irq(irq->vector, &bp->bnx2_napi[i]);
5887                 irq->requested = 0;
5888         }
5889         if (bp->flags & BNX2_FLAG_USING_MSI)
5890                 pci_disable_msi(bp->pdev);
5891         else if (bp->flags & BNX2_FLAG_USING_MSIX)
5892                 pci_disable_msix(bp->pdev);
5893
5894         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
5895 }
5896
5897 static void
5898 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
5899 {
5900         int i, rc;
5901         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5902         struct net_device *dev = bp->dev;
5903         const int len = sizeof(bp->irq_tbl[0].name);
5904
5905         bnx2_setup_msix_tbl(bp);
5906         REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5907         REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5908         REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5909
5910         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5911                 msix_ent[i].entry = i;
5912                 msix_ent[i].vector = 0;
5913         }
5914
5915         rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5916         if (rc != 0)
5917                 return;
5918
5919         bp->irq_nvecs = msix_vecs;
5920         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
5921         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5922                 bp->irq_tbl[i].vector = msix_ent[i].vector;
5923                 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
5924                 bp->irq_tbl[i].handler = bnx2_msi_1shot;
5925         }
5926 }
5927
5928 static void
5929 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5930 {
5931         int cpus = num_online_cpus();
5932         int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
5933
5934         bp->irq_tbl[0].handler = bnx2_interrupt;
5935         strcpy(bp->irq_tbl[0].name, bp->dev->name);
5936         bp->irq_nvecs = 1;
5937         bp->irq_tbl[0].vector = bp->pdev->irq;
5938
5939         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
5940                 bnx2_enable_msix(bp, msix_vecs);
5941
5942         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5943             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
5944                 if (pci_enable_msi(bp->pdev) == 0) {
5945                         bp->flags |= BNX2_FLAG_USING_MSI;
5946                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5947                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
5948                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5949                         } else
5950                                 bp->irq_tbl[0].handler = bnx2_msi;
5951
5952                         bp->irq_tbl[0].vector = bp->pdev->irq;
5953                 }
5954         }
5955
5956         bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
5957         bp->dev->real_num_tx_queues = bp->num_tx_rings;
5958
5959         bp->num_rx_rings = bp->irq_nvecs;
5960 }
5961
5962 /* Called with rtnl_lock */
5963 static int
5964 bnx2_open(struct net_device *dev)
5965 {
5966         struct bnx2 *bp = netdev_priv(dev);
5967         int rc;
5968
5969         netif_carrier_off(dev);
5970
5971         bnx2_set_power_state(bp, PCI_D0);
5972         bnx2_disable_int(bp);
5973
5974         bnx2_setup_int_mode(bp, disable_msi);
5975         bnx2_napi_enable(bp);
5976         rc = bnx2_alloc_mem(bp);
5977         if (rc)
5978                 goto open_err;
5979
5980         rc = bnx2_request_irq(bp);
5981         if (rc)
5982                 goto open_err;
5983
5984         rc = bnx2_init_nic(bp, 1);
5985         if (rc)
5986                 goto open_err;
5987
5988         mod_timer(&bp->timer, jiffies + bp->current_interval);
5989
5990         atomic_set(&bp->intr_sem, 0);
5991
5992         bnx2_enable_int(bp);
5993
5994         if (bp->flags & BNX2_FLAG_USING_MSI) {
5995                 /* Test MSI to make sure it is working
5996                  * If MSI test fails, go back to INTx mode
5997                  */
5998                 if (bnx2_test_intr(bp) != 0) {
5999                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
6000                                " using MSI, switching to INTx mode. Please"
6001                                " report this failure to the PCI maintainer"
6002                                " and include system chipset information.\n",
6003                                bp->dev->name);
6004
6005                         bnx2_disable_int(bp);
6006                         bnx2_free_irq(bp);
6007
6008                         bnx2_setup_int_mode(bp, 1);
6009
6010                         rc = bnx2_init_nic(bp, 0);
6011
6012                         if (!rc)
6013                                 rc = bnx2_request_irq(bp);
6014
6015                         if (rc) {
6016                                 del_timer_sync(&bp->timer);
6017                                 goto open_err;
6018                         }
6019                         bnx2_enable_int(bp);
6020                 }
6021         }
6022         if (bp->flags & BNX2_FLAG_USING_MSI)
6023                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
6024         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6025                 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
6026
6027         netif_tx_start_all_queues(dev);
6028
6029         return 0;
6030
6031 open_err:
6032         bnx2_napi_disable(bp);
6033         bnx2_free_skbs(bp);
6034         bnx2_free_irq(bp);
6035         bnx2_free_mem(bp);
6036         return rc;
6037 }
6038
6039 static void
6040 bnx2_reset_task(struct work_struct *work)
6041 {
6042         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6043
6044         if (!netif_running(bp->dev))
6045                 return;
6046
6047         bnx2_netif_stop(bp);
6048
6049         bnx2_init_nic(bp, 1);
6050
6051         atomic_set(&bp->intr_sem, 1);
6052         bnx2_netif_start(bp);
6053 }
6054
6055 static void
6056 bnx2_tx_timeout(struct net_device *dev)
6057 {
6058         struct bnx2 *bp = netdev_priv(dev);
6059
6060         /* This allows the netif to be shutdown gracefully before resetting */
6061         schedule_work(&bp->reset_task);
6062 }
6063
6064 #ifdef BCM_VLAN
6065 /* Called with rtnl_lock */
6066 static void
6067 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6068 {
6069         struct bnx2 *bp = netdev_priv(dev);
6070
6071         bnx2_netif_stop(bp);
6072
6073         bp->vlgrp = vlgrp;
6074         bnx2_set_rx_mode(dev);
6075         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6076                 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6077
6078         bnx2_netif_start(bp);
6079 }
6080 #endif
6081
6082 /* Called with netif_tx_lock.
6083  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6084  * netif_wake_queue().
6085  */
6086 static int
6087 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6088 {
6089         struct bnx2 *bp = netdev_priv(dev);
6090         dma_addr_t mapping;
6091         struct tx_bd *txbd;
6092         struct sw_tx_bd *tx_buf;
6093         u32 len, vlan_tag_flags, last_frag, mss;
6094         u16 prod, ring_prod;
6095         int i;
6096         struct bnx2_napi *bnapi;
6097         struct bnx2_tx_ring_info *txr;
6098         struct netdev_queue *txq;
6099         struct skb_shared_info *sp;
6100
6101         /*  Determine which tx ring we will be placed on */
6102         i = skb_get_queue_mapping(skb);
6103         bnapi = &bp->bnx2_napi[i];
6104         txr = &bnapi->tx_ring;
6105         txq = netdev_get_tx_queue(dev, i);
6106
6107         if (unlikely(bnx2_tx_avail(bp, txr) <
6108             (skb_shinfo(skb)->nr_frags + 1))) {
6109                 netif_tx_stop_queue(txq);
6110                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
6111                         dev->name);
6112
6113                 return NETDEV_TX_BUSY;
6114         }
6115         len = skb_headlen(skb);
6116         prod = txr->tx_prod;
6117         ring_prod = TX_RING_IDX(prod);
6118
6119         vlan_tag_flags = 0;
6120         if (skb->ip_summed == CHECKSUM_PARTIAL) {
6121                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6122         }
6123
6124 #ifdef BCM_VLAN
6125         if (bp->vlgrp && vlan_tx_tag_present(skb)) {
6126                 vlan_tag_flags |=
6127                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6128         }
6129 #endif
6130         if ((mss = skb_shinfo(skb)->gso_size)) {
6131                 u32 tcp_opt_len;
6132                 struct iphdr *iph;
6133
6134                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6135
6136                 tcp_opt_len = tcp_optlen(skb);
6137
6138                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6139                         u32 tcp_off = skb_transport_offset(skb) -
6140                                       sizeof(struct ipv6hdr) - ETH_HLEN;
6141
6142                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6143                                           TX_BD_FLAGS_SW_FLAGS;
6144                         if (likely(tcp_off == 0))
6145                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6146                         else {
6147                                 tcp_off >>= 3;
6148                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
6149                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
6150                                                   ((tcp_off & 0x10) <<
6151                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
6152                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6153                         }
6154                 } else {
6155                         iph = ip_hdr(skb);
6156                         if (tcp_opt_len || (iph->ihl > 5)) {
6157                                 vlan_tag_flags |= ((iph->ihl - 5) +
6158                                                    (tcp_opt_len >> 2)) << 8;
6159                         }
6160                 }
6161         } else
6162                 mss = 0;
6163
6164         if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
6165                 dev_kfree_skb(skb);
6166                 return NETDEV_TX_OK;
6167         }
6168
6169         sp = skb_shinfo(skb);
6170         mapping = sp->dma_head;
6171
6172         tx_buf = &txr->tx_buf_ring[ring_prod];
6173         tx_buf->skb = skb;
6174
6175         txbd = &txr->tx_desc_ring[ring_prod];
6176
6177         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6178         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6179         txbd->tx_bd_mss_nbytes = len | (mss << 16);
6180         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6181
6182         last_frag = skb_shinfo(skb)->nr_frags;
6183         tx_buf->nr_frags = last_frag;
6184         tx_buf->is_gso = skb_is_gso(skb);
6185
6186         for (i = 0; i < last_frag; i++) {
6187                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6188
6189                 prod = NEXT_TX_BD(prod);
6190                 ring_prod = TX_RING_IDX(prod);
6191                 txbd = &txr->tx_desc_ring[ring_prod];
6192
6193                 len = frag->size;
6194                 mapping = sp->dma_maps[i];
6195
6196                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6197                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6198                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6199                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6200
6201         }
6202         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6203
6204         prod = NEXT_TX_BD(prod);
6205         txr->tx_prod_bseq += skb->len;
6206
6207         REG_WR16(bp, txr->tx_bidx_addr, prod);
6208         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6209
6210         mmiowb();
6211
6212         txr->tx_prod = prod;
6213
6214         if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6215                 netif_tx_stop_queue(txq);
6216                 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6217                         netif_tx_wake_queue(txq);
6218         }
6219
6220         return NETDEV_TX_OK;
6221 }
6222
6223 /* Called with rtnl_lock */
6224 static int
6225 bnx2_close(struct net_device *dev)
6226 {
6227         struct bnx2 *bp = netdev_priv(dev);
6228
6229         cancel_work_sync(&bp->reset_task);
6230
6231         bnx2_disable_int_sync(bp);
6232         bnx2_napi_disable(bp);
6233         del_timer_sync(&bp->timer);
6234         bnx2_shutdown_chip(bp);
6235         bnx2_free_irq(bp);
6236         bnx2_free_skbs(bp);
6237         bnx2_free_mem(bp);
6238         bp->link_up = 0;
6239         netif_carrier_off(bp->dev);
6240         bnx2_set_power_state(bp, PCI_D3hot);
6241         return 0;
6242 }
6243
6244 #define GET_NET_STATS64(ctr)                                    \
6245         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
6246         (unsigned long) (ctr##_lo)
6247
6248 #define GET_NET_STATS32(ctr)            \
6249         (ctr##_lo)
6250
6251 #if (BITS_PER_LONG == 64)
6252 #define GET_NET_STATS   GET_NET_STATS64
6253 #else
6254 #define GET_NET_STATS   GET_NET_STATS32
6255 #endif
6256
6257 static struct net_device_stats *
6258 bnx2_get_stats(struct net_device *dev)
6259 {
6260         struct bnx2 *bp = netdev_priv(dev);
6261         struct statistics_block *stats_blk = bp->stats_blk;
6262         struct net_device_stats *net_stats = &dev->stats;
6263
6264         if (bp->stats_blk == NULL) {
6265                 return net_stats;
6266         }
6267         net_stats->rx_packets =
6268                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
6269                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
6270                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
6271
6272         net_stats->tx_packets =
6273                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
6274                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
6275                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
6276
6277         net_stats->rx_bytes =
6278                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
6279
6280         net_stats->tx_bytes =
6281                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
6282
6283         net_stats->multicast =
6284                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
6285
6286         net_stats->collisions =
6287                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
6288
6289         net_stats->rx_length_errors =
6290                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
6291                 stats_blk->stat_EtherStatsOverrsizePkts);
6292
6293         net_stats->rx_over_errors =
6294                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
6295
6296         net_stats->rx_frame_errors =
6297                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
6298
6299         net_stats->rx_crc_errors =
6300                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
6301
6302         net_stats->rx_errors = net_stats->rx_length_errors +
6303                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6304                 net_stats->rx_crc_errors;
6305
6306         net_stats->tx_aborted_errors =
6307                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
6308                 stats_blk->stat_Dot3StatsLateCollisions);
6309
6310         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6311             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6312                 net_stats->tx_carrier_errors = 0;
6313         else {
6314                 net_stats->tx_carrier_errors =
6315                         (unsigned long)
6316                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
6317         }
6318
6319         net_stats->tx_errors =
6320                 (unsigned long)
6321                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6322                 +
6323                 net_stats->tx_aborted_errors +
6324                 net_stats->tx_carrier_errors;
6325
6326         net_stats->rx_missed_errors =
6327                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
6328                 stats_blk->stat_FwRxDrop);
6329
6330         return net_stats;
6331 }
6332
6333 /* All ethtool functions called with rtnl_lock */
6334
6335 static int
6336 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6337 {
6338         struct bnx2 *bp = netdev_priv(dev);
6339         int support_serdes = 0, support_copper = 0;
6340
6341         cmd->supported = SUPPORTED_Autoneg;
6342         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6343                 support_serdes = 1;
6344                 support_copper = 1;
6345         } else if (bp->phy_port == PORT_FIBRE)
6346                 support_serdes = 1;
6347         else
6348                 support_copper = 1;
6349
6350         if (support_serdes) {
6351                 cmd->supported |= SUPPORTED_1000baseT_Full |
6352                         SUPPORTED_FIBRE;
6353                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6354                         cmd->supported |= SUPPORTED_2500baseX_Full;
6355
6356         }
6357         if (support_copper) {
6358                 cmd->supported |= SUPPORTED_10baseT_Half |
6359                         SUPPORTED_10baseT_Full |
6360                         SUPPORTED_100baseT_Half |
6361                         SUPPORTED_100baseT_Full |
6362                         SUPPORTED_1000baseT_Full |
6363                         SUPPORTED_TP;
6364
6365         }
6366
6367         spin_lock_bh(&bp->phy_lock);
6368         cmd->port = bp->phy_port;
6369         cmd->advertising = bp->advertising;
6370
6371         if (bp->autoneg & AUTONEG_SPEED) {
6372                 cmd->autoneg = AUTONEG_ENABLE;
6373         }
6374         else {
6375                 cmd->autoneg = AUTONEG_DISABLE;
6376         }
6377
6378         if (netif_carrier_ok(dev)) {
6379                 cmd->speed = bp->line_speed;
6380                 cmd->duplex = bp->duplex;
6381         }
6382         else {
6383                 cmd->speed = -1;
6384                 cmd->duplex = -1;
6385         }
6386         spin_unlock_bh(&bp->phy_lock);
6387
6388         cmd->transceiver = XCVR_INTERNAL;
6389         cmd->phy_address = bp->phy_addr;
6390
6391         return 0;
6392 }
6393
6394 static int
6395 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6396 {
6397         struct bnx2 *bp = netdev_priv(dev);
6398         u8 autoneg = bp->autoneg;
6399         u8 req_duplex = bp->req_duplex;
6400         u16 req_line_speed = bp->req_line_speed;
6401         u32 advertising = bp->advertising;
6402         int err = -EINVAL;
6403
6404         spin_lock_bh(&bp->phy_lock);
6405
6406         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6407                 goto err_out_unlock;
6408
6409         if (cmd->port != bp->phy_port &&
6410             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6411                 goto err_out_unlock;
6412
6413         /* If device is down, we can store the settings only if the user
6414          * is setting the currently active port.
6415          */
6416         if (!netif_running(dev) && cmd->port != bp->phy_port)
6417                 goto err_out_unlock;
6418
6419         if (cmd->autoneg == AUTONEG_ENABLE) {
6420                 autoneg |= AUTONEG_SPEED;
6421
6422                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6423
6424                 /* allow advertising 1 speed */
6425                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6426                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
6427                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
6428                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
6429
6430                         if (cmd->port == PORT_FIBRE)
6431                                 goto err_out_unlock;
6432
6433                         advertising = cmd->advertising;
6434
6435                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6436                         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6437                             (cmd->port == PORT_TP))
6438                                 goto err_out_unlock;
6439                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6440                         advertising = cmd->advertising;
6441                 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6442                         goto err_out_unlock;
6443                 else {
6444                         if (cmd->port == PORT_FIBRE)
6445                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6446                         else
6447                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6448                 }
6449                 advertising |= ADVERTISED_Autoneg;
6450         }
6451         else {
6452                 if (cmd->port == PORT_FIBRE) {
6453                         if ((cmd->speed != SPEED_1000 &&
6454                              cmd->speed != SPEED_2500) ||
6455                             (cmd->duplex != DUPLEX_FULL))
6456                                 goto err_out_unlock;
6457
6458                         if (cmd->speed == SPEED_2500 &&
6459                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6460                                 goto err_out_unlock;
6461                 }
6462                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6463                         goto err_out_unlock;
6464
6465                 autoneg &= ~AUTONEG_SPEED;
6466                 req_line_speed = cmd->speed;
6467                 req_duplex = cmd->duplex;
6468                 advertising = 0;
6469         }
6470
6471         bp->autoneg = autoneg;
6472         bp->advertising = advertising;
6473         bp->req_line_speed = req_line_speed;
6474         bp->req_duplex = req_duplex;
6475
6476         err = 0;
6477         /* If device is down, the new settings will be picked up when it is
6478          * brought up.
6479          */
6480         if (netif_running(dev))
6481                 err = bnx2_setup_phy(bp, cmd->port);
6482
6483 err_out_unlock:
6484         spin_unlock_bh(&bp->phy_lock);
6485
6486         return err;
6487 }
6488
6489 static void
6490 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6491 {
6492         struct bnx2 *bp = netdev_priv(dev);
6493
6494         strcpy(info->driver, DRV_MODULE_NAME);
6495         strcpy(info->version, DRV_MODULE_VERSION);
6496         strcpy(info->bus_info, pci_name(bp->pdev));
6497         strcpy(info->fw_version, bp->fw_version);
6498 }
6499
6500 #define BNX2_REGDUMP_LEN                (32 * 1024)
6501
6502 static int
6503 bnx2_get_regs_len(struct net_device *dev)
6504 {
6505         return BNX2_REGDUMP_LEN;
6506 }
6507
6508 static void
6509 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6510 {
6511         u32 *p = _p, i, offset;
6512         u8 *orig_p = _p;
6513         struct bnx2 *bp = netdev_priv(dev);
6514         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6515                                  0x0800, 0x0880, 0x0c00, 0x0c10,
6516                                  0x0c30, 0x0d08, 0x1000, 0x101c,
6517                                  0x1040, 0x1048, 0x1080, 0x10a4,
6518                                  0x1400, 0x1490, 0x1498, 0x14f0,
6519                                  0x1500, 0x155c, 0x1580, 0x15dc,
6520                                  0x1600, 0x1658, 0x1680, 0x16d8,
6521                                  0x1800, 0x1820, 0x1840, 0x1854,
6522                                  0x1880, 0x1894, 0x1900, 0x1984,
6523                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6524                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
6525                                  0x2000, 0x2030, 0x23c0, 0x2400,
6526                                  0x2800, 0x2820, 0x2830, 0x2850,
6527                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
6528                                  0x3c00, 0x3c94, 0x4000, 0x4010,
6529                                  0x4080, 0x4090, 0x43c0, 0x4458,
6530                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
6531                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
6532                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
6533                                  0x5fc0, 0x6000, 0x6400, 0x6428,
6534                                  0x6800, 0x6848, 0x684c, 0x6860,
6535                                  0x6888, 0x6910, 0x8000 };
6536
6537         regs->version = 0;
6538
6539         memset(p, 0, BNX2_REGDUMP_LEN);
6540
6541         if (!netif_running(bp->dev))
6542                 return;
6543
6544         i = 0;
6545         offset = reg_boundaries[0];
6546         p += offset;
6547         while (offset < BNX2_REGDUMP_LEN) {
6548                 *p++ = REG_RD(bp, offset);
6549                 offset += 4;
6550                 if (offset == reg_boundaries[i + 1]) {
6551                         offset = reg_boundaries[i + 2];
6552                         p = (u32 *) (orig_p + offset);
6553                         i += 2;
6554                 }
6555         }
6556 }
6557
6558 static void
6559 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6560 {
6561         struct bnx2 *bp = netdev_priv(dev);
6562
6563         if (bp->flags & BNX2_FLAG_NO_WOL) {
6564                 wol->supported = 0;
6565                 wol->wolopts = 0;
6566         }
6567         else {
6568                 wol->supported = WAKE_MAGIC;
6569                 if (bp->wol)
6570                         wol->wolopts = WAKE_MAGIC;
6571                 else
6572                         wol->wolopts = 0;
6573         }
6574         memset(&wol->sopass, 0, sizeof(wol->sopass));
6575 }
6576
6577 static int
6578 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6579 {
6580         struct bnx2 *bp = netdev_priv(dev);
6581
6582         if (wol->wolopts & ~WAKE_MAGIC)
6583                 return -EINVAL;
6584
6585         if (wol->wolopts & WAKE_MAGIC) {
6586                 if (bp->flags & BNX2_FLAG_NO_WOL)
6587                         return -EINVAL;
6588
6589                 bp->wol = 1;
6590         }
6591         else {
6592                 bp->wol = 0;
6593         }
6594         return 0;
6595 }
6596
6597 static int
6598 bnx2_nway_reset(struct net_device *dev)
6599 {
6600         struct bnx2 *bp = netdev_priv(dev);
6601         u32 bmcr;
6602
6603         if (!netif_running(dev))
6604                 return -EAGAIN;
6605
6606         if (!(bp->autoneg & AUTONEG_SPEED)) {
6607                 return -EINVAL;
6608         }
6609
6610         spin_lock_bh(&bp->phy_lock);
6611
6612         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6613                 int rc;
6614
6615                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6616                 spin_unlock_bh(&bp->phy_lock);
6617                 return rc;
6618         }
6619
6620         /* Force a link down visible on the other side */
6621         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6622                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6623                 spin_unlock_bh(&bp->phy_lock);
6624
6625                 msleep(20);
6626
6627                 spin_lock_bh(&bp->phy_lock);
6628
6629                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6630                 bp->serdes_an_pending = 1;
6631                 mod_timer(&bp->timer, jiffies + bp->current_interval);
6632         }
6633
6634         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6635         bmcr &= ~BMCR_LOOPBACK;
6636         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6637
6638         spin_unlock_bh(&bp->phy_lock);
6639
6640         return 0;
6641 }
6642
6643 static int
6644 bnx2_get_eeprom_len(struct net_device *dev)
6645 {
6646         struct bnx2 *bp = netdev_priv(dev);
6647
6648         if (bp->flash_info == NULL)
6649                 return 0;
6650
6651         return (int) bp->flash_size;
6652 }
6653
6654 static int
6655 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6656                 u8 *eebuf)
6657 {
6658         struct bnx2 *bp = netdev_priv(dev);
6659         int rc;
6660
6661         if (!netif_running(dev))
6662                 return -EAGAIN;
6663
6664         /* parameters already validated in ethtool_get_eeprom */
6665
6666         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6667
6668         return rc;
6669 }
6670
6671 static int
6672 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6673                 u8 *eebuf)
6674 {
6675         struct bnx2 *bp = netdev_priv(dev);
6676         int rc;
6677
6678         if (!netif_running(dev))
6679                 return -EAGAIN;
6680
6681         /* parameters already validated in ethtool_set_eeprom */
6682
6683         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6684
6685         return rc;
6686 }
6687
6688 static int
6689 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6690 {
6691         struct bnx2 *bp = netdev_priv(dev);
6692
6693         memset(coal, 0, sizeof(struct ethtool_coalesce));
6694
6695         coal->rx_coalesce_usecs = bp->rx_ticks;
6696         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6697         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6698         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6699
6700         coal->tx_coalesce_usecs = bp->tx_ticks;
6701         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6702         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6703         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6704
6705         coal->stats_block_coalesce_usecs = bp->stats_ticks;
6706
6707         return 0;
6708 }
6709
6710 static int
6711 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6712 {
6713         struct bnx2 *bp = netdev_priv(dev);
6714
6715         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6716         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6717
6718         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6719         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6720
6721         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6722         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6723
6724         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6725         if (bp->rx_quick_cons_trip_int > 0xff)
6726                 bp->rx_quick_cons_trip_int = 0xff;
6727
6728         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6729         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6730
6731         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6732         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6733
6734         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6735         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6736
6737         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6738         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6739                 0xff;
6740
6741         bp->stats_ticks = coal->stats_block_coalesce_usecs;
6742         if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6743                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6744                         bp->stats_ticks = USEC_PER_SEC;
6745         }
6746         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6747                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6748         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6749
6750         if (netif_running(bp->dev)) {
6751                 bnx2_netif_stop(bp);
6752                 bnx2_init_nic(bp, 0);
6753                 bnx2_netif_start(bp);
6754         }
6755
6756         return 0;
6757 }
6758
6759 static void
6760 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6761 {
6762         struct bnx2 *bp = netdev_priv(dev);
6763
6764         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6765         ering->rx_mini_max_pending = 0;
6766         ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6767
6768         ering->rx_pending = bp->rx_ring_size;
6769         ering->rx_mini_pending = 0;
6770         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6771
6772         ering->tx_max_pending = MAX_TX_DESC_CNT;
6773         ering->tx_pending = bp->tx_ring_size;
6774 }
6775
6776 static int
6777 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6778 {
6779         if (netif_running(bp->dev)) {
6780                 bnx2_netif_stop(bp);
6781                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6782                 bnx2_free_skbs(bp);
6783                 bnx2_free_mem(bp);
6784         }
6785
6786         bnx2_set_rx_ring_size(bp, rx);
6787         bp->tx_ring_size = tx;
6788
6789         if (netif_running(bp->dev)) {
6790                 int rc;
6791
6792                 rc = bnx2_alloc_mem(bp);
6793                 if (rc)
6794                         return rc;
6795                 bnx2_init_nic(bp, 0);
6796                 bnx2_netif_start(bp);
6797         }
6798         return 0;
6799 }
6800
6801 static int
6802 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6803 {
6804         struct bnx2 *bp = netdev_priv(dev);
6805         int rc;
6806
6807         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6808                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6809                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6810
6811                 return -EINVAL;
6812         }
6813         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6814         return rc;
6815 }
6816
6817 static void
6818 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6819 {
6820         struct bnx2 *bp = netdev_priv(dev);
6821
6822         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6823         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6824         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6825 }
6826
6827 static int
6828 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6829 {
6830         struct bnx2 *bp = netdev_priv(dev);
6831
6832         bp->req_flow_ctrl = 0;
6833         if (epause->rx_pause)
6834                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6835         if (epause->tx_pause)
6836                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6837
6838         if (epause->autoneg) {
6839                 bp->autoneg |= AUTONEG_FLOW_CTRL;
6840         }
6841         else {
6842                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6843         }
6844
6845         if (netif_running(dev)) {
6846                 spin_lock_bh(&bp->phy_lock);
6847                 bnx2_setup_phy(bp, bp->phy_port);
6848                 spin_unlock_bh(&bp->phy_lock);
6849         }
6850
6851         return 0;
6852 }
6853
6854 static u32
6855 bnx2_get_rx_csum(struct net_device *dev)
6856 {
6857         struct bnx2 *bp = netdev_priv(dev);
6858
6859         return bp->rx_csum;
6860 }
6861
6862 static int
6863 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6864 {
6865         struct bnx2 *bp = netdev_priv(dev);
6866
6867         bp->rx_csum = data;
6868         return 0;
6869 }
6870
6871 static int
6872 bnx2_set_tso(struct net_device *dev, u32 data)
6873 {
6874         struct bnx2 *bp = netdev_priv(dev);
6875
6876         if (data) {
6877                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6878                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6879                         dev->features |= NETIF_F_TSO6;
6880         } else
6881                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6882                                    NETIF_F_TSO_ECN);
6883         return 0;
6884 }
6885
6886 #define BNX2_NUM_STATS 46
6887
6888 static struct {
6889         char string[ETH_GSTRING_LEN];
6890 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6891         { "rx_bytes" },
6892         { "rx_error_bytes" },
6893         { "tx_bytes" },
6894         { "tx_error_bytes" },
6895         { "rx_ucast_packets" },
6896         { "rx_mcast_packets" },
6897         { "rx_bcast_packets" },
6898         { "tx_ucast_packets" },
6899         { "tx_mcast_packets" },
6900         { "tx_bcast_packets" },
6901         { "tx_mac_errors" },
6902         { "tx_carrier_errors" },
6903         { "rx_crc_errors" },
6904         { "rx_align_errors" },
6905         { "tx_single_collisions" },
6906         { "tx_multi_collisions" },
6907         { "tx_deferred" },
6908         { "tx_excess_collisions" },
6909         { "tx_late_collisions" },
6910         { "tx_total_collisions" },
6911         { "rx_fragments" },
6912         { "rx_jabbers" },
6913         { "rx_undersize_packets" },
6914         { "rx_oversize_packets" },
6915         { "rx_64_byte_packets" },
6916         { "rx_65_to_127_byte_packets" },
6917         { "rx_128_to_255_byte_packets" },
6918         { "rx_256_to_511_byte_packets" },
6919         { "rx_512_to_1023_byte_packets" },
6920         { "rx_1024_to_1522_byte_packets" },
6921         { "rx_1523_to_9022_byte_packets" },
6922         { "tx_64_byte_packets" },
6923         { "tx_65_to_127_byte_packets" },
6924         { "tx_128_to_255_byte_packets" },
6925         { "tx_256_to_511_byte_packets" },
6926         { "tx_512_to_1023_byte_packets" },
6927         { "tx_1024_to_1522_byte_packets" },
6928         { "tx_1523_to_9022_byte_packets" },
6929         { "rx_xon_frames" },
6930         { "rx_xoff_frames" },
6931         { "tx_xon_frames" },
6932         { "tx_xoff_frames" },
6933         { "rx_mac_ctrl_frames" },
6934         { "rx_filtered_packets" },
6935         { "rx_discards" },
6936         { "rx_fw_discards" },
6937 };
6938
6939 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6940
6941 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6942     STATS_OFFSET32(stat_IfHCInOctets_hi),
6943     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6944     STATS_OFFSET32(stat_IfHCOutOctets_hi),
6945     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6946     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6947     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6948     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6949     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6950     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6951     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6952     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6953     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6954     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6955     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6956     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6957     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6958     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6959     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6960     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6961     STATS_OFFSET32(stat_EtherStatsCollisions),
6962     STATS_OFFSET32(stat_EtherStatsFragments),
6963     STATS_OFFSET32(stat_EtherStatsJabbers),
6964     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6965     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6966     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6967     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6968     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6969     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6970     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6971     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6972     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6973     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6974     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6975     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6976     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6977     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6978     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6979     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6980     STATS_OFFSET32(stat_XonPauseFramesReceived),
6981     STATS_OFFSET32(stat_XoffPauseFramesReceived),
6982     STATS_OFFSET32(stat_OutXonSent),
6983     STATS_OFFSET32(stat_OutXoffSent),
6984     STATS_OFFSET32(stat_MacControlFramesReceived),
6985     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6986     STATS_OFFSET32(stat_IfInMBUFDiscards),
6987     STATS_OFFSET32(stat_FwRxDrop),
6988 };
6989
6990 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6991  * skipped because of errata.
6992  */
6993 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6994         8,0,8,8,8,8,8,8,8,8,
6995         4,0,4,4,4,4,4,4,4,4,
6996         4,4,4,4,4,4,4,4,4,4,
6997         4,4,4,4,4,4,4,4,4,4,
6998         4,4,4,4,4,4,
6999 };
7000
7001 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7002         8,0,8,8,8,8,8,8,8,8,
7003         4,4,4,4,4,4,4,4,4,4,
7004         4,4,4,4,4,4,4,4,4,4,
7005         4,4,4,4,4,4,4,4,4,4,
7006         4,4,4,4,4,4,
7007 };
7008
7009 #define BNX2_NUM_TESTS 6
7010
7011 static struct {
7012         char string[ETH_GSTRING_LEN];
7013 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7014         { "register_test (offline)" },
7015         { "memory_test (offline)" },
7016         { "loopback_test (offline)" },
7017         { "nvram_test (online)" },
7018         { "interrupt_test (online)" },
7019         { "link_test (online)" },
7020 };
7021
7022 static int
7023 bnx2_get_sset_count(struct net_device *dev, int sset)
7024 {
7025         switch (sset) {
7026         case ETH_SS_TEST:
7027                 return BNX2_NUM_TESTS;
7028         case ETH_SS_STATS:
7029                 return BNX2_NUM_STATS;
7030         default:
7031                 return -EOPNOTSUPP;
7032         }
7033 }
7034
7035 static void
7036 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7037 {
7038         struct bnx2 *bp = netdev_priv(dev);
7039
7040         bnx2_set_power_state(bp, PCI_D0);
7041
7042         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7043         if (etest->flags & ETH_TEST_FL_OFFLINE) {
7044                 int i;
7045
7046                 bnx2_netif_stop(bp);
7047                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7048                 bnx2_free_skbs(bp);
7049
7050                 if (bnx2_test_registers(bp) != 0) {
7051                         buf[0] = 1;
7052                         etest->flags |= ETH_TEST_FL_FAILED;
7053                 }
7054                 if (bnx2_test_memory(bp) != 0) {
7055                         buf[1] = 1;
7056                         etest->flags |= ETH_TEST_FL_FAILED;
7057                 }
7058                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7059                         etest->flags |= ETH_TEST_FL_FAILED;
7060
7061                 if (!netif_running(bp->dev))
7062                         bnx2_shutdown_chip(bp);
7063                 else {
7064                         bnx2_init_nic(bp, 1);
7065                         bnx2_netif_start(bp);
7066                 }
7067
7068                 /* wait for link up */
7069                 for (i = 0; i < 7; i++) {
7070                         if (bp->link_up)
7071                                 break;
7072                         msleep_interruptible(1000);
7073                 }
7074         }
7075
7076         if (bnx2_test_nvram(bp) != 0) {
7077                 buf[3] = 1;
7078                 etest->flags |= ETH_TEST_FL_FAILED;
7079         }
7080         if (bnx2_test_intr(bp) != 0) {
7081                 buf[4] = 1;
7082                 etest->flags |= ETH_TEST_FL_FAILED;
7083         }
7084
7085         if (bnx2_test_link(bp) != 0) {
7086                 buf[5] = 1;
7087                 etest->flags |= ETH_TEST_FL_FAILED;
7088
7089         }
7090         if (!netif_running(bp->dev))
7091                 bnx2_set_power_state(bp, PCI_D3hot);
7092 }
7093
7094 static void
7095 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7096 {
7097         switch (stringset) {
7098         case ETH_SS_STATS:
7099                 memcpy(buf, bnx2_stats_str_arr,
7100                         sizeof(bnx2_stats_str_arr));
7101                 break;
7102         case ETH_SS_TEST:
7103                 memcpy(buf, bnx2_tests_str_arr,
7104                         sizeof(bnx2_tests_str_arr));
7105                 break;
7106         }
7107 }
7108
7109 static void
7110 bnx2_get_ethtool_stats(struct net_device *dev,
7111                 struct ethtool_stats *stats, u64 *buf)
7112 {
7113         struct bnx2 *bp = netdev_priv(dev);
7114         int i;
7115         u32 *hw_stats = (u32 *) bp->stats_blk;
7116         u8 *stats_len_arr = NULL;
7117
7118         if (hw_stats == NULL) {
7119                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7120                 return;
7121         }
7122
7123         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7124             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7125             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7126             (CHIP_ID(bp) == CHIP_ID_5708_A0))
7127                 stats_len_arr = bnx2_5706_stats_len_arr;
7128         else
7129                 stats_len_arr = bnx2_5708_stats_len_arr;
7130
7131         for (i = 0; i < BNX2_NUM_STATS; i++) {
7132                 if (stats_len_arr[i] == 0) {
7133                         /* skip this counter */
7134                         buf[i] = 0;
7135                         continue;
7136                 }
7137                 if (stats_len_arr[i] == 4) {
7138                         /* 4-byte counter */
7139                         buf[i] = (u64)
7140                                 *(hw_stats + bnx2_stats_offset_arr[i]);
7141                         continue;
7142                 }
7143                 /* 8-byte counter */
7144                 buf[i] = (((u64) *(hw_stats +
7145                                         bnx2_stats_offset_arr[i])) << 32) +
7146                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
7147         }
7148 }
7149
7150 static int
7151 bnx2_phys_id(struct net_device *dev, u32 data)
7152 {
7153         struct bnx2 *bp = netdev_priv(dev);
7154         int i;
7155         u32 save;
7156
7157         bnx2_set_power_state(bp, PCI_D0);
7158
7159         if (data == 0)
7160                 data = 2;
7161
7162         save = REG_RD(bp, BNX2_MISC_CFG);
7163         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7164
7165         for (i = 0; i < (data * 2); i++) {
7166                 if ((i % 2) == 0) {
7167                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7168                 }
7169                 else {
7170                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7171                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
7172                                 BNX2_EMAC_LED_100MB_OVERRIDE |
7173                                 BNX2_EMAC_LED_10MB_OVERRIDE |
7174                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7175                                 BNX2_EMAC_LED_TRAFFIC);
7176                 }
7177                 msleep_interruptible(500);
7178                 if (signal_pending(current))
7179                         break;
7180         }
7181         REG_WR(bp, BNX2_EMAC_LED, 0);
7182         REG_WR(bp, BNX2_MISC_CFG, save);
7183
7184         if (!netif_running(dev))
7185                 bnx2_set_power_state(bp, PCI_D3hot);
7186
7187         return 0;
7188 }
7189
7190 static int
7191 bnx2_set_tx_csum(struct net_device *dev, u32 data)
7192 {
7193         struct bnx2 *bp = netdev_priv(dev);
7194
7195         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7196                 return (ethtool_op_set_tx_ipv6_csum(dev, data));
7197         else
7198                 return (ethtool_op_set_tx_csum(dev, data));
7199 }
7200
7201 static const struct ethtool_ops bnx2_ethtool_ops = {
7202         .get_settings           = bnx2_get_settings,
7203         .set_settings           = bnx2_set_settings,
7204         .get_drvinfo            = bnx2_get_drvinfo,
7205         .get_regs_len           = bnx2_get_regs_len,
7206         .get_regs               = bnx2_get_regs,
7207         .get_wol                = bnx2_get_wol,
7208         .set_wol                = bnx2_set_wol,
7209         .nway_reset             = bnx2_nway_reset,
7210         .get_link               = ethtool_op_get_link,
7211         .get_eeprom_len         = bnx2_get_eeprom_len,
7212         .get_eeprom             = bnx2_get_eeprom,
7213         .set_eeprom             = bnx2_set_eeprom,
7214         .get_coalesce           = bnx2_get_coalesce,
7215         .set_coalesce           = bnx2_set_coalesce,
7216         .get_ringparam          = bnx2_get_ringparam,
7217         .set_ringparam          = bnx2_set_ringparam,
7218         .get_pauseparam         = bnx2_get_pauseparam,
7219         .set_pauseparam         = bnx2_set_pauseparam,
7220         .get_rx_csum            = bnx2_get_rx_csum,
7221         .set_rx_csum            = bnx2_set_rx_csum,
7222         .set_tx_csum            = bnx2_set_tx_csum,
7223         .set_sg                 = ethtool_op_set_sg,
7224         .set_tso                = bnx2_set_tso,
7225         .self_test              = bnx2_self_test,
7226         .get_strings            = bnx2_get_strings,
7227         .phys_id                = bnx2_phys_id,
7228         .get_ethtool_stats      = bnx2_get_ethtool_stats,
7229         .get_sset_count         = bnx2_get_sset_count,
7230 };
7231
7232 /* Called with rtnl_lock */
7233 static int
7234 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7235 {
7236         struct mii_ioctl_data *data = if_mii(ifr);
7237         struct bnx2 *bp = netdev_priv(dev);
7238         int err;
7239
7240         switch(cmd) {
7241         case SIOCGMIIPHY:
7242                 data->phy_id = bp->phy_addr;
7243
7244                 /* fallthru */
7245         case SIOCGMIIREG: {
7246                 u32 mii_regval;
7247
7248                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7249                         return -EOPNOTSUPP;
7250
7251                 if (!netif_running(dev))
7252                         return -EAGAIN;
7253
7254                 spin_lock_bh(&bp->phy_lock);
7255                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7256                 spin_unlock_bh(&bp->phy_lock);
7257
7258                 data->val_out = mii_regval;
7259
7260                 return err;
7261         }
7262
7263         case SIOCSMIIREG:
7264                 if (!capable(CAP_NET_ADMIN))
7265                         return -EPERM;
7266
7267                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7268                         return -EOPNOTSUPP;
7269
7270                 if (!netif_running(dev))
7271                         return -EAGAIN;
7272
7273                 spin_lock_bh(&bp->phy_lock);
7274                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7275                 spin_unlock_bh(&bp->phy_lock);
7276
7277                 return err;
7278
7279         default:
7280                 /* do nothing */
7281                 break;
7282         }
7283         return -EOPNOTSUPP;
7284 }
7285
7286 /* Called with rtnl_lock */
7287 static int
7288 bnx2_change_mac_addr(struct net_device *dev, void *p)
7289 {
7290         struct sockaddr *addr = p;
7291         struct bnx2 *bp = netdev_priv(dev);
7292
7293         if (!is_valid_ether_addr(addr->sa_data))
7294                 return -EINVAL;
7295
7296         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7297         if (netif_running(dev))
7298                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7299
7300         return 0;
7301 }
7302
7303 /* Called with rtnl_lock */
7304 static int
7305 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7306 {
7307         struct bnx2 *bp = netdev_priv(dev);
7308
7309         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7310                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7311                 return -EINVAL;
7312
7313         dev->mtu = new_mtu;
7314         return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7315 }
7316
7317 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7318 static void
7319 poll_bnx2(struct net_device *dev)
7320 {
7321         struct bnx2 *bp = netdev_priv(dev);
7322         int i;
7323
7324         for (i = 0; i < bp->irq_nvecs; i++) {
7325                 disable_irq(bp->irq_tbl[i].vector);
7326                 bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]);
7327                 enable_irq(bp->irq_tbl[i].vector);
7328         }
7329 }
7330 #endif
7331
7332 static void __devinit
7333 bnx2_get_5709_media(struct bnx2 *bp)
7334 {
7335         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7336         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7337         u32 strap;
7338
7339         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7340                 return;
7341         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7342                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7343                 return;
7344         }
7345
7346         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7347                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7348         else
7349                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7350
7351         if (PCI_FUNC(bp->pdev->devfn) == 0) {
7352                 switch (strap) {
7353                 case 0x4:
7354                 case 0x5:
7355                 case 0x6:
7356                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7357                         return;
7358                 }
7359         } else {
7360                 switch (strap) {
7361                 case 0x1:
7362                 case 0x2:
7363                 case 0x4:
7364                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7365                         return;
7366                 }
7367         }
7368 }
7369
7370 static void __devinit
7371 bnx2_get_pci_speed(struct bnx2 *bp)
7372 {
7373         u32 reg;
7374
7375         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7376         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7377                 u32 clkreg;
7378
7379                 bp->flags |= BNX2_FLAG_PCIX;
7380
7381                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7382
7383                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7384                 switch (clkreg) {
7385                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7386                         bp->bus_speed_mhz = 133;
7387                         break;
7388
7389                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7390                         bp->bus_speed_mhz = 100;
7391                         break;
7392
7393                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7394                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7395                         bp->bus_speed_mhz = 66;
7396                         break;
7397
7398                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7399                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7400                         bp->bus_speed_mhz = 50;
7401                         break;
7402
7403                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7404                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7405                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7406                         bp->bus_speed_mhz = 33;
7407                         break;
7408                 }
7409         }
7410         else {
7411                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7412                         bp->bus_speed_mhz = 66;
7413                 else
7414                         bp->bus_speed_mhz = 33;
7415         }
7416
7417         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7418                 bp->flags |= BNX2_FLAG_PCI_32BIT;
7419
7420 }
7421
7422 static int __devinit
7423 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7424 {
7425         struct bnx2 *bp;
7426         unsigned long mem_len;
7427         int rc, i, j;
7428         u32 reg;
7429         u64 dma_mask, persist_dma_mask;
7430
7431         SET_NETDEV_DEV(dev, &pdev->dev);
7432         bp = netdev_priv(dev);
7433
7434         bp->flags = 0;
7435         bp->phy_flags = 0;
7436
7437         /* enable device (incl. PCI PM wakeup), and bus-mastering */
7438         rc = pci_enable_device(pdev);
7439         if (rc) {
7440                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7441                 goto err_out;
7442         }
7443
7444         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7445                 dev_err(&pdev->dev,
7446                         "Cannot find PCI device base address, aborting.\n");
7447                 rc = -ENODEV;
7448                 goto err_out_disable;
7449         }
7450
7451         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7452         if (rc) {
7453                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7454                 goto err_out_disable;
7455         }
7456
7457         pci_set_master(pdev);
7458         pci_save_state(pdev);
7459
7460         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7461         if (bp->pm_cap == 0) {
7462                 dev_err(&pdev->dev,
7463                         "Cannot find power management capability, aborting.\n");
7464                 rc = -EIO;
7465                 goto err_out_release;
7466         }
7467
7468         bp->dev = dev;
7469         bp->pdev = pdev;
7470
7471         spin_lock_init(&bp->phy_lock);
7472         spin_lock_init(&bp->indirect_lock);
7473         INIT_WORK(&bp->reset_task, bnx2_reset_task);
7474
7475         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7476         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS);
7477         dev->mem_end = dev->mem_start + mem_len;
7478         dev->irq = pdev->irq;
7479
7480         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7481
7482         if (!bp->regview) {
7483                 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7484                 rc = -ENOMEM;
7485                 goto err_out_release;
7486         }
7487
7488         /* Configure byte swap and enable write to the reg_window registers.
7489          * Rely on CPU to do target byte swapping on big endian systems
7490          * The chip's target access swapping will not swap all accesses
7491          */
7492         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7493                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7494                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7495
7496         bnx2_set_power_state(bp, PCI_D0);
7497
7498         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7499
7500         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7501                 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7502                         dev_err(&pdev->dev,
7503                                 "Cannot find PCIE capability, aborting.\n");
7504                         rc = -EIO;
7505                         goto err_out_unmap;
7506                 }
7507                 bp->flags |= BNX2_FLAG_PCIE;
7508                 if (CHIP_REV(bp) == CHIP_REV_Ax)
7509                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7510         } else {
7511                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7512                 if (bp->pcix_cap == 0) {
7513                         dev_err(&pdev->dev,
7514                                 "Cannot find PCIX capability, aborting.\n");
7515                         rc = -EIO;
7516                         goto err_out_unmap;
7517                 }
7518         }
7519
7520         if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7521                 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7522                         bp->flags |= BNX2_FLAG_MSIX_CAP;
7523         }
7524
7525         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7526                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7527                         bp->flags |= BNX2_FLAG_MSI_CAP;
7528         }
7529
7530         /* 5708 cannot support DMA addresses > 40-bit.  */
7531         if (CHIP_NUM(bp) == CHIP_NUM_5708)
7532                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
7533         else
7534                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
7535
7536         /* Configure DMA attributes. */
7537         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7538                 dev->features |= NETIF_F_HIGHDMA;
7539                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7540                 if (rc) {
7541                         dev_err(&pdev->dev,
7542                                 "pci_set_consistent_dma_mask failed, aborting.\n");
7543                         goto err_out_unmap;
7544                 }
7545         } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
7546                 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7547                 goto err_out_unmap;
7548         }
7549
7550         if (!(bp->flags & BNX2_FLAG_PCIE))
7551                 bnx2_get_pci_speed(bp);
7552
7553         /* 5706A0 may falsely detect SERR and PERR. */
7554         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7555                 reg = REG_RD(bp, PCI_COMMAND);
7556                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7557                 REG_WR(bp, PCI_COMMAND, reg);
7558         }
7559         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7560                 !(bp->flags & BNX2_FLAG_PCIX)) {
7561
7562                 dev_err(&pdev->dev,
7563                         "5706 A1 can only be used in a PCIX bus, aborting.\n");
7564                 goto err_out_unmap;
7565         }
7566
7567         bnx2_init_nvram(bp);
7568
7569         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7570
7571         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7572             BNX2_SHM_HDR_SIGNATURE_SIG) {
7573                 u32 off = PCI_FUNC(pdev->devfn) << 2;
7574
7575                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7576         } else
7577                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7578
7579         /* Get the permanent MAC address.  First we need to make sure the
7580          * firmware is actually running.
7581          */
7582         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7583
7584         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7585             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7586                 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7587                 rc = -ENODEV;
7588                 goto err_out_unmap;
7589         }
7590
7591         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7592         for (i = 0, j = 0; i < 3; i++) {
7593                 u8 num, k, skip0;
7594
7595                 num = (u8) (reg >> (24 - (i * 8)));
7596                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7597                         if (num >= k || !skip0 || k == 1) {
7598                                 bp->fw_version[j++] = (num / k) + '0';
7599                                 skip0 = 0;
7600                         }
7601                 }
7602                 if (i != 2)
7603                         bp->fw_version[j++] = '.';
7604         }
7605         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
7606         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7607                 bp->wol = 1;
7608
7609         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7610                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
7611
7612                 for (i = 0; i < 30; i++) {
7613                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7614                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7615                                 break;
7616                         msleep(10);
7617                 }
7618         }
7619         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7620         reg &= BNX2_CONDITION_MFW_RUN_MASK;
7621         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7622             reg != BNX2_CONDITION_MFW_RUN_NONE) {
7623                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7624
7625                 bp->fw_version[j++] = ' ';
7626                 for (i = 0; i < 3; i++) {
7627                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
7628                         reg = swab32(reg);
7629                         memcpy(&bp->fw_version[j], &reg, 4);
7630                         j += 4;
7631                 }
7632         }
7633
7634         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
7635         bp->mac_addr[0] = (u8) (reg >> 8);
7636         bp->mac_addr[1] = (u8) reg;
7637
7638         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
7639         bp->mac_addr[2] = (u8) (reg >> 24);
7640         bp->mac_addr[3] = (u8) (reg >> 16);
7641         bp->mac_addr[4] = (u8) (reg >> 8);
7642         bp->mac_addr[5] = (u8) reg;
7643
7644         bp->tx_ring_size = MAX_TX_DESC_CNT;
7645         bnx2_set_rx_ring_size(bp, 255);
7646
7647         bp->rx_csum = 1;
7648
7649         bp->tx_quick_cons_trip_int = 20;
7650         bp->tx_quick_cons_trip = 20;
7651         bp->tx_ticks_int = 80;
7652         bp->tx_ticks = 80;
7653
7654         bp->rx_quick_cons_trip_int = 6;
7655         bp->rx_quick_cons_trip = 6;
7656         bp->rx_ticks_int = 18;
7657         bp->rx_ticks = 18;
7658
7659         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7660
7661         bp->current_interval = BNX2_TIMER_INTERVAL;
7662
7663         bp->phy_addr = 1;
7664
7665         /* Disable WOL support if we are running on a SERDES chip. */
7666         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7667                 bnx2_get_5709_media(bp);
7668         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7669                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7670
7671         bp->phy_port = PORT_TP;
7672         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7673                 bp->phy_port = PORT_FIBRE;
7674                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
7675                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7676                         bp->flags |= BNX2_FLAG_NO_WOL;
7677                         bp->wol = 0;
7678                 }
7679                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7680                         /* Don't do parallel detect on this board because of
7681                          * some board problems.  The link will not go down
7682                          * if we do parallel detect.
7683                          */
7684                         if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7685                             pdev->subsystem_device == 0x310c)
7686                                 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7687                 } else {
7688                         bp->phy_addr = 2;
7689                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7690                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7691                 }
7692         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7693                    CHIP_NUM(bp) == CHIP_NUM_5708)
7694                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
7695         else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7696                  (CHIP_REV(bp) == CHIP_REV_Ax ||
7697                   CHIP_REV(bp) == CHIP_REV_Bx))
7698                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7699
7700         bnx2_init_fw_cap(bp);
7701
7702         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7703             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7704             (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
7705             !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
7706                 bp->flags |= BNX2_FLAG_NO_WOL;
7707                 bp->wol = 0;
7708         }
7709
7710         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7711                 bp->tx_quick_cons_trip_int =
7712                         bp->tx_quick_cons_trip;
7713                 bp->tx_ticks_int = bp->tx_ticks;
7714                 bp->rx_quick_cons_trip_int =
7715                         bp->rx_quick_cons_trip;
7716                 bp->rx_ticks_int = bp->rx_ticks;
7717                 bp->comp_prod_trip_int = bp->comp_prod_trip;
7718                 bp->com_ticks_int = bp->com_ticks;
7719                 bp->cmd_ticks_int = bp->cmd_ticks;
7720         }
7721
7722         /* Disable MSI on 5706 if AMD 8132 bridge is found.
7723          *
7724          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
7725          * with byte enables disabled on the unused 32-bit word.  This is legal
7726          * but causes problems on the AMD 8132 which will eventually stop
7727          * responding after a while.
7728          *
7729          * AMD believes this incompatibility is unique to the 5706, and
7730          * prefers to locally disable MSI rather than globally disabling it.
7731          */
7732         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7733                 struct pci_dev *amd_8132 = NULL;
7734
7735                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7736                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
7737                                                   amd_8132))) {
7738
7739                         if (amd_8132->revision >= 0x10 &&
7740                             amd_8132->revision <= 0x13) {
7741                                 disable_msi = 1;
7742                                 pci_dev_put(amd_8132);
7743                                 break;
7744                         }
7745                 }
7746         }
7747
7748         bnx2_set_default_link(bp);
7749         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7750
7751         init_timer(&bp->timer);
7752         bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
7753         bp->timer.data = (unsigned long) bp;
7754         bp->timer.function = bnx2_timer;
7755
7756         return 0;
7757
7758 err_out_unmap:
7759         if (bp->regview) {
7760                 iounmap(bp->regview);
7761                 bp->regview = NULL;
7762         }
7763
7764 err_out_release:
7765         pci_release_regions(pdev);
7766
7767 err_out_disable:
7768         pci_disable_device(pdev);
7769         pci_set_drvdata(pdev, NULL);
7770
7771 err_out:
7772         return rc;
7773 }
7774
7775 static char * __devinit
7776 bnx2_bus_string(struct bnx2 *bp, char *str)
7777 {
7778         char *s = str;
7779
7780         if (bp->flags & BNX2_FLAG_PCIE) {
7781                 s += sprintf(s, "PCI Express");
7782         } else {
7783                 s += sprintf(s, "PCI");
7784                 if (bp->flags & BNX2_FLAG_PCIX)
7785                         s += sprintf(s, "-X");
7786                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
7787                         s += sprintf(s, " 32-bit");
7788                 else
7789                         s += sprintf(s, " 64-bit");
7790                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7791         }
7792         return str;
7793 }
7794
7795 static void __devinit
7796 bnx2_init_napi(struct bnx2 *bp)
7797 {
7798         int i;
7799
7800         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7801                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
7802                 int (*poll)(struct napi_struct *, int);
7803
7804                 if (i == 0)
7805                         poll = bnx2_poll;
7806                 else
7807                         poll = bnx2_poll_msix;
7808
7809                 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
7810                 bnapi->bp = bp;
7811         }
7812 }
7813
7814 static const struct net_device_ops bnx2_netdev_ops = {
7815         .ndo_open               = bnx2_open,
7816         .ndo_start_xmit         = bnx2_start_xmit,
7817         .ndo_stop               = bnx2_close,
7818         .ndo_get_stats          = bnx2_get_stats,
7819         .ndo_set_rx_mode        = bnx2_set_rx_mode,
7820         .ndo_do_ioctl           = bnx2_ioctl,
7821         .ndo_validate_addr      = eth_validate_addr,
7822         .ndo_set_mac_address    = bnx2_change_mac_addr,
7823         .ndo_change_mtu         = bnx2_change_mtu,
7824         .ndo_tx_timeout         = bnx2_tx_timeout,
7825 #ifdef BCM_VLAN
7826         .ndo_vlan_rx_register   = bnx2_vlan_rx_register,
7827 #endif
7828 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7829         .ndo_poll_controller    = poll_bnx2,
7830 #endif
7831 };
7832
7833 static int __devinit
7834 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7835 {
7836         static int version_printed = 0;
7837         struct net_device *dev = NULL;
7838         struct bnx2 *bp;
7839         int rc;
7840         char str[40];
7841
7842         if (version_printed++ == 0)
7843                 printk(KERN_INFO "%s", version);
7844
7845         /* dev zeroed in init_etherdev */
7846         dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
7847
7848         if (!dev)
7849                 return -ENOMEM;
7850
7851         rc = bnx2_init_board(pdev, dev);
7852         if (rc < 0) {
7853                 free_netdev(dev);
7854                 return rc;
7855         }
7856
7857         dev->netdev_ops = &bnx2_netdev_ops;
7858         dev->watchdog_timeo = TX_TIMEOUT;
7859         dev->ethtool_ops = &bnx2_ethtool_ops;
7860
7861         bp = netdev_priv(dev);
7862         bnx2_init_napi(bp);
7863
7864         pci_set_drvdata(pdev, dev);
7865
7866         rc = bnx2_request_firmware(bp);
7867         if (rc)
7868                 goto error;
7869
7870         memcpy(dev->dev_addr, bp->mac_addr, 6);
7871         memcpy(dev->perm_addr, bp->mac_addr, 6);
7872
7873         dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7874         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7875                 dev->features |= NETIF_F_IPV6_CSUM;
7876
7877 #ifdef BCM_VLAN
7878         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7879 #endif
7880         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7881         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7882                 dev->features |= NETIF_F_TSO6;
7883
7884         if ((rc = register_netdev(dev))) {
7885                 dev_err(&pdev->dev, "Cannot register net device\n");
7886                 goto error;
7887         }
7888
7889         printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7890                 "IRQ %d, node addr %pM\n",
7891                 dev->name,
7892                 board_info[ent->driver_data].name,
7893                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7894                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7895                 bnx2_bus_string(bp, str),
7896                 dev->base_addr,
7897                 bp->pdev->irq, dev->dev_addr);
7898
7899         return 0;
7900
7901 error:
7902         if (bp->mips_firmware)
7903                 release_firmware(bp->mips_firmware);
7904         if (bp->rv2p_firmware)
7905                 release_firmware(bp->rv2p_firmware);
7906
7907         if (bp->regview)
7908                 iounmap(bp->regview);
7909         pci_release_regions(pdev);
7910         pci_disable_device(pdev);
7911         pci_set_drvdata(pdev, NULL);
7912         free_netdev(dev);
7913         return rc;
7914 }
7915
7916 static void __devexit
7917 bnx2_remove_one(struct pci_dev *pdev)
7918 {
7919         struct net_device *dev = pci_get_drvdata(pdev);
7920         struct bnx2 *bp = netdev_priv(dev);
7921
7922         flush_scheduled_work();
7923
7924         unregister_netdev(dev);
7925
7926         if (bp->mips_firmware)
7927                 release_firmware(bp->mips_firmware);
7928         if (bp->rv2p_firmware)
7929                 release_firmware(bp->rv2p_firmware);
7930
7931         if (bp->regview)
7932                 iounmap(bp->regview);
7933
7934         free_netdev(dev);
7935         pci_release_regions(pdev);
7936         pci_disable_device(pdev);
7937         pci_set_drvdata(pdev, NULL);
7938 }
7939
7940 static int
7941 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7942 {
7943         struct net_device *dev = pci_get_drvdata(pdev);
7944         struct bnx2 *bp = netdev_priv(dev);
7945
7946         /* PCI register 4 needs to be saved whether netif_running() or not.
7947          * MSI address and data need to be saved if using MSI and
7948          * netif_running().
7949          */
7950         pci_save_state(pdev);
7951         if (!netif_running(dev))
7952                 return 0;
7953
7954         flush_scheduled_work();
7955         bnx2_netif_stop(bp);
7956         netif_device_detach(dev);
7957         del_timer_sync(&bp->timer);
7958         bnx2_shutdown_chip(bp);
7959         bnx2_free_skbs(bp);
7960         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7961         return 0;
7962 }
7963
7964 static int
7965 bnx2_resume(struct pci_dev *pdev)
7966 {
7967         struct net_device *dev = pci_get_drvdata(pdev);
7968         struct bnx2 *bp = netdev_priv(dev);
7969
7970         pci_restore_state(pdev);
7971         if (!netif_running(dev))
7972                 return 0;
7973
7974         bnx2_set_power_state(bp, PCI_D0);
7975         netif_device_attach(dev);
7976         bnx2_init_nic(bp, 1);
7977         bnx2_netif_start(bp);
7978         return 0;
7979 }
7980
7981 /**
7982  * bnx2_io_error_detected - called when PCI error is detected
7983  * @pdev: Pointer to PCI device
7984  * @state: The current pci connection state
7985  *
7986  * This function is called after a PCI bus error affecting
7987  * this device has been detected.
7988  */
7989 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
7990                                                pci_channel_state_t state)
7991 {
7992         struct net_device *dev = pci_get_drvdata(pdev);
7993         struct bnx2 *bp = netdev_priv(dev);
7994
7995         rtnl_lock();
7996         netif_device_detach(dev);
7997
7998         if (netif_running(dev)) {
7999                 bnx2_netif_stop(bp);
8000                 del_timer_sync(&bp->timer);
8001                 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8002         }
8003
8004         pci_disable_device(pdev);
8005         rtnl_unlock();
8006
8007         /* Request a slot slot reset. */
8008         return PCI_ERS_RESULT_NEED_RESET;
8009 }
8010
8011 /**
8012  * bnx2_io_slot_reset - called after the pci bus has been reset.
8013  * @pdev: Pointer to PCI device
8014  *
8015  * Restart the card from scratch, as if from a cold-boot.
8016  */
8017 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8018 {
8019         struct net_device *dev = pci_get_drvdata(pdev);
8020         struct bnx2 *bp = netdev_priv(dev);
8021
8022         rtnl_lock();
8023         if (pci_enable_device(pdev)) {
8024                 dev_err(&pdev->dev,
8025                         "Cannot re-enable PCI device after reset.\n");
8026                 rtnl_unlock();
8027                 return PCI_ERS_RESULT_DISCONNECT;
8028         }
8029         pci_set_master(pdev);
8030         pci_restore_state(pdev);
8031
8032         if (netif_running(dev)) {
8033                 bnx2_set_power_state(bp, PCI_D0);
8034                 bnx2_init_nic(bp, 1);
8035         }
8036
8037         rtnl_unlock();
8038         return PCI_ERS_RESULT_RECOVERED;
8039 }
8040
8041 /**
8042  * bnx2_io_resume - called when traffic can start flowing again.
8043  * @pdev: Pointer to PCI device
8044  *
8045  * This callback is called when the error recovery driver tells us that
8046  * its OK to resume normal operation.
8047  */
8048 static void bnx2_io_resume(struct pci_dev *pdev)
8049 {
8050         struct net_device *dev = pci_get_drvdata(pdev);
8051         struct bnx2 *bp = netdev_priv(dev);
8052
8053         rtnl_lock();
8054         if (netif_running(dev))
8055                 bnx2_netif_start(bp);
8056
8057         netif_device_attach(dev);
8058         rtnl_unlock();
8059 }
8060
8061 static struct pci_error_handlers bnx2_err_handler = {
8062         .error_detected = bnx2_io_error_detected,
8063         .slot_reset     = bnx2_io_slot_reset,
8064         .resume         = bnx2_io_resume,
8065 };
8066
8067 static struct pci_driver bnx2_pci_driver = {
8068         .name           = DRV_MODULE_NAME,
8069         .id_table       = bnx2_pci_tbl,
8070         .probe          = bnx2_init_one,
8071         .remove         = __devexit_p(bnx2_remove_one),
8072         .suspend        = bnx2_suspend,
8073         .resume         = bnx2_resume,
8074         .err_handler    = &bnx2_err_handler,
8075 };
8076
8077 static int __init bnx2_init(void)
8078 {
8079         return pci_register_driver(&bnx2_pci_driver);
8080 }
8081
8082 static void __exit bnx2_cleanup(void)
8083 {
8084         pci_unregister_driver(&bnx2_pci_driver);
8085 }
8086
8087 module_init(bnx2_init);
8088 module_exit(bnx2_cleanup);
8089
8090
8091