Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/holtmann/bluet...
[linux-2.6] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/if_vlan.h>
39 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/list.h>
52
53 #include "bnx2.h"
54 #include "bnx2_fw.h"
55
56 #define DRV_MODULE_NAME         "bnx2"
57 #define PFX DRV_MODULE_NAME     ": "
58 #define DRV_MODULE_VERSION      "2.0.1"
59 #define DRV_MODULE_RELDATE      "May 6, 2009"
60 #define FW_MIPS_FILE_06         "bnx2/bnx2-mips-06-4.6.16.fw"
61 #define FW_RV2P_FILE_06         "bnx2/bnx2-rv2p-06-4.6.16.fw"
62 #define FW_MIPS_FILE_09         "bnx2/bnx2-mips-09-4.6.17.fw"
63 #define FW_RV2P_FILE_09         "bnx2/bnx2-rv2p-09-4.6.15.fw"
64
65 #define RUN_AT(x) (jiffies + (x))
66
67 /* Time in jiffies before concluding the transmitter is hung. */
68 #define TX_TIMEOUT  (5*HZ)
69
70 static char version[] __devinitdata =
71         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
72
73 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
74 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
75 MODULE_LICENSE("GPL");
76 MODULE_VERSION(DRV_MODULE_VERSION);
77 MODULE_FIRMWARE(FW_MIPS_FILE_06);
78 MODULE_FIRMWARE(FW_RV2P_FILE_06);
79 MODULE_FIRMWARE(FW_MIPS_FILE_09);
80 MODULE_FIRMWARE(FW_RV2P_FILE_09);
81
82 static int disable_msi = 0;
83
84 module_param(disable_msi, int, 0);
85 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
86
87 typedef enum {
88         BCM5706 = 0,
89         NC370T,
90         NC370I,
91         BCM5706S,
92         NC370F,
93         BCM5708,
94         BCM5708S,
95         BCM5709,
96         BCM5709S,
97         BCM5716,
98         BCM5716S,
99 } board_t;
100
101 /* indexed by board_t, above */
102 static struct {
103         char *name;
104 } board_info[] __devinitdata = {
105         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
106         { "HP NC370T Multifunction Gigabit Server Adapter" },
107         { "HP NC370i Multifunction Gigabit Server Adapter" },
108         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
109         { "HP NC370F Multifunction Gigabit Server Adapter" },
110         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
111         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
112         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
113         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
114         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
115         { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
116         };
117
118 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
119         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
120           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
121         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
122           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
123         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
124           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
125         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
126           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
127         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
128           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
129         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
130           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
131         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
132           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
133         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
134           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
135         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
136           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
137         { PCI_VENDOR_ID_BROADCOM, 0x163b,
138           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
139         { PCI_VENDOR_ID_BROADCOM, 0x163c,
140           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
141         { 0, }
142 };
143
144 static struct flash_spec flash_table[] =
145 {
146 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
147 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
148         /* Slow EEPROM */
149         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
150          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
151          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
152          "EEPROM - slow"},
153         /* Expansion entry 0001 */
154         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
155          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
156          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
157          "Entry 0001"},
158         /* Saifun SA25F010 (non-buffered flash) */
159         /* strap, cfg1, & write1 need updates */
160         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
161          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
162          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
163          "Non-buffered flash (128kB)"},
164         /* Saifun SA25F020 (non-buffered flash) */
165         /* strap, cfg1, & write1 need updates */
166         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
167          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
168          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
169          "Non-buffered flash (256kB)"},
170         /* Expansion entry 0100 */
171         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
172          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
174          "Entry 0100"},
175         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
176         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
177          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
178          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
179          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
180         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
181         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
182          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
184          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
185         /* Saifun SA25F005 (non-buffered flash) */
186         /* strap, cfg1, & write1 need updates */
187         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
188          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
190          "Non-buffered flash (64kB)"},
191         /* Fast EEPROM */
192         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
193          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
194          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
195          "EEPROM - fast"},
196         /* Expansion entry 1001 */
197         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
198          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200          "Entry 1001"},
201         /* Expansion entry 1010 */
202         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1010"},
206         /* ATMEL AT45DB011B (buffered flash) */
207         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
208          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
210          "Buffered flash (128kB)"},
211         /* Expansion entry 1100 */
212         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
213          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
214          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
215          "Entry 1100"},
216         /* Expansion entry 1101 */
217         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
218          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220          "Entry 1101"},
221         /* Ateml Expansion entry 1110 */
222         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
223          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
224          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
225          "Entry 1110 (Atmel)"},
226         /* ATMEL AT45DB021B (buffered flash) */
227         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
228          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
230          "Buffered flash (256kB)"},
231 };
232
233 static struct flash_spec flash_5709 = {
234         .flags          = BNX2_NV_BUFFERED,
235         .page_bits      = BCM5709_FLASH_PAGE_BITS,
236         .page_size      = BCM5709_FLASH_PAGE_SIZE,
237         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
238         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
239         .name           = "5709 Buffered flash (256kB)",
240 };
241
242 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
243
244 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
245 {
246         u32 diff;
247
248         smp_mb();
249
250         /* The ring uses 256 indices for 255 entries, one of them
251          * needs to be skipped.
252          */
253         diff = txr->tx_prod - txr->tx_cons;
254         if (unlikely(diff >= TX_DESC_CNT)) {
255                 diff &= 0xffff;
256                 if (diff == TX_DESC_CNT)
257                         diff = MAX_TX_DESC_CNT;
258         }
259         return (bp->tx_ring_size - diff);
260 }
261
262 static u32
263 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
264 {
265         u32 val;
266
267         spin_lock_bh(&bp->indirect_lock);
268         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
269         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
270         spin_unlock_bh(&bp->indirect_lock);
271         return val;
272 }
273
274 static void
275 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
276 {
277         spin_lock_bh(&bp->indirect_lock);
278         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
279         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
280         spin_unlock_bh(&bp->indirect_lock);
281 }
282
283 static void
284 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
285 {
286         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
287 }
288
289 static u32
290 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
291 {
292         return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
293 }
294
295 static void
296 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
297 {
298         offset += cid_addr;
299         spin_lock_bh(&bp->indirect_lock);
300         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
301                 int i;
302
303                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
304                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
305                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
306                 for (i = 0; i < 5; i++) {
307                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
308                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
309                                 break;
310                         udelay(5);
311                 }
312         } else {
313                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
314                 REG_WR(bp, BNX2_CTX_DATA, val);
315         }
316         spin_unlock_bh(&bp->indirect_lock);
317 }
318
319 static int
320 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
321 {
322         u32 val1;
323         int i, ret;
324
325         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
326                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
327                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
328
329                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
330                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
331
332                 udelay(40);
333         }
334
335         val1 = (bp->phy_addr << 21) | (reg << 16) |
336                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
337                 BNX2_EMAC_MDIO_COMM_START_BUSY;
338         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
339
340         for (i = 0; i < 50; i++) {
341                 udelay(10);
342
343                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
344                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
345                         udelay(5);
346
347                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
348                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
349
350                         break;
351                 }
352         }
353
354         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
355                 *val = 0x0;
356                 ret = -EBUSY;
357         }
358         else {
359                 *val = val1;
360                 ret = 0;
361         }
362
363         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
364                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
365                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
366
367                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
368                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
369
370                 udelay(40);
371         }
372
373         return ret;
374 }
375
376 static int
377 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
378 {
379         u32 val1;
380         int i, ret;
381
382         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
383                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
384                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
385
386                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
387                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388
389                 udelay(40);
390         }
391
392         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
393                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
394                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
395         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
396
397         for (i = 0; i < 50; i++) {
398                 udelay(10);
399
400                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
401                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
402                         udelay(5);
403                         break;
404                 }
405         }
406
407         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
408                 ret = -EBUSY;
409         else
410                 ret = 0;
411
412         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
413                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
414                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
415
416                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
417                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
418
419                 udelay(40);
420         }
421
422         return ret;
423 }
424
425 static void
426 bnx2_disable_int(struct bnx2 *bp)
427 {
428         int i;
429         struct bnx2_napi *bnapi;
430
431         for (i = 0; i < bp->irq_nvecs; i++) {
432                 bnapi = &bp->bnx2_napi[i];
433                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
434                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
435         }
436         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
437 }
438
439 static void
440 bnx2_enable_int(struct bnx2 *bp)
441 {
442         int i;
443         struct bnx2_napi *bnapi;
444
445         for (i = 0; i < bp->irq_nvecs; i++) {
446                 bnapi = &bp->bnx2_napi[i];
447
448                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
449                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
450                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
451                        bnapi->last_status_idx);
452
453                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
454                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
455                        bnapi->last_status_idx);
456         }
457         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
458 }
459
460 static void
461 bnx2_disable_int_sync(struct bnx2 *bp)
462 {
463         int i;
464
465         atomic_inc(&bp->intr_sem);
466         bnx2_disable_int(bp);
467         for (i = 0; i < bp->irq_nvecs; i++)
468                 synchronize_irq(bp->irq_tbl[i].vector);
469 }
470
471 static void
472 bnx2_napi_disable(struct bnx2 *bp)
473 {
474         int i;
475
476         for (i = 0; i < bp->irq_nvecs; i++)
477                 napi_disable(&bp->bnx2_napi[i].napi);
478 }
479
480 static void
481 bnx2_napi_enable(struct bnx2 *bp)
482 {
483         int i;
484
485         for (i = 0; i < bp->irq_nvecs; i++)
486                 napi_enable(&bp->bnx2_napi[i].napi);
487 }
488
489 static void
490 bnx2_netif_stop(struct bnx2 *bp)
491 {
492         bnx2_disable_int_sync(bp);
493         if (netif_running(bp->dev)) {
494                 bnx2_napi_disable(bp);
495                 netif_tx_disable(bp->dev);
496                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
497         }
498 }
499
500 static void
501 bnx2_netif_start(struct bnx2 *bp)
502 {
503         if (atomic_dec_and_test(&bp->intr_sem)) {
504                 if (netif_running(bp->dev)) {
505                         netif_tx_wake_all_queues(bp->dev);
506                         bnx2_napi_enable(bp);
507                         bnx2_enable_int(bp);
508                 }
509         }
510 }
511
512 static void
513 bnx2_free_tx_mem(struct bnx2 *bp)
514 {
515         int i;
516
517         for (i = 0; i < bp->num_tx_rings; i++) {
518                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
519                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
520
521                 if (txr->tx_desc_ring) {
522                         pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
523                                             txr->tx_desc_ring,
524                                             txr->tx_desc_mapping);
525                         txr->tx_desc_ring = NULL;
526                 }
527                 kfree(txr->tx_buf_ring);
528                 txr->tx_buf_ring = NULL;
529         }
530 }
531
532 static void
533 bnx2_free_rx_mem(struct bnx2 *bp)
534 {
535         int i;
536
537         for (i = 0; i < bp->num_rx_rings; i++) {
538                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
539                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
540                 int j;
541
542                 for (j = 0; j < bp->rx_max_ring; j++) {
543                         if (rxr->rx_desc_ring[j])
544                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
545                                                     rxr->rx_desc_ring[j],
546                                                     rxr->rx_desc_mapping[j]);
547                         rxr->rx_desc_ring[j] = NULL;
548                 }
549                 vfree(rxr->rx_buf_ring);
550                 rxr->rx_buf_ring = NULL;
551
552                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
553                         if (rxr->rx_pg_desc_ring[j])
554                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
555                                                     rxr->rx_pg_desc_ring[j],
556                                                     rxr->rx_pg_desc_mapping[j]);
557                         rxr->rx_pg_desc_ring[j] = NULL;
558                 }
559                 vfree(rxr->rx_pg_ring);
560                 rxr->rx_pg_ring = NULL;
561         }
562 }
563
564 static int
565 bnx2_alloc_tx_mem(struct bnx2 *bp)
566 {
567         int i;
568
569         for (i = 0; i < bp->num_tx_rings; i++) {
570                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
571                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
572
573                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
574                 if (txr->tx_buf_ring == NULL)
575                         return -ENOMEM;
576
577                 txr->tx_desc_ring =
578                         pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
579                                              &txr->tx_desc_mapping);
580                 if (txr->tx_desc_ring == NULL)
581                         return -ENOMEM;
582         }
583         return 0;
584 }
585
586 static int
587 bnx2_alloc_rx_mem(struct bnx2 *bp)
588 {
589         int i;
590
591         for (i = 0; i < bp->num_rx_rings; i++) {
592                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
593                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
594                 int j;
595
596                 rxr->rx_buf_ring =
597                         vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
598                 if (rxr->rx_buf_ring == NULL)
599                         return -ENOMEM;
600
601                 memset(rxr->rx_buf_ring, 0,
602                        SW_RXBD_RING_SIZE * bp->rx_max_ring);
603
604                 for (j = 0; j < bp->rx_max_ring; j++) {
605                         rxr->rx_desc_ring[j] =
606                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
607                                                      &rxr->rx_desc_mapping[j]);
608                         if (rxr->rx_desc_ring[j] == NULL)
609                                 return -ENOMEM;
610
611                 }
612
613                 if (bp->rx_pg_ring_size) {
614                         rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
615                                                   bp->rx_max_pg_ring);
616                         if (rxr->rx_pg_ring == NULL)
617                                 return -ENOMEM;
618
619                         memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
620                                bp->rx_max_pg_ring);
621                 }
622
623                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
624                         rxr->rx_pg_desc_ring[j] =
625                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
626                                                 &rxr->rx_pg_desc_mapping[j]);
627                         if (rxr->rx_pg_desc_ring[j] == NULL)
628                                 return -ENOMEM;
629
630                 }
631         }
632         return 0;
633 }
634
635 static void
636 bnx2_free_mem(struct bnx2 *bp)
637 {
638         int i;
639         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
640
641         bnx2_free_tx_mem(bp);
642         bnx2_free_rx_mem(bp);
643
644         for (i = 0; i < bp->ctx_pages; i++) {
645                 if (bp->ctx_blk[i]) {
646                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
647                                             bp->ctx_blk[i],
648                                             bp->ctx_blk_mapping[i]);
649                         bp->ctx_blk[i] = NULL;
650                 }
651         }
652         if (bnapi->status_blk.msi) {
653                 pci_free_consistent(bp->pdev, bp->status_stats_size,
654                                     bnapi->status_blk.msi,
655                                     bp->status_blk_mapping);
656                 bnapi->status_blk.msi = NULL;
657                 bp->stats_blk = NULL;
658         }
659 }
660
661 static int
662 bnx2_alloc_mem(struct bnx2 *bp)
663 {
664         int i, status_blk_size, err;
665         struct bnx2_napi *bnapi;
666         void *status_blk;
667
668         /* Combine status and statistics blocks into one allocation. */
669         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
670         if (bp->flags & BNX2_FLAG_MSIX_CAP)
671                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
672                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
673         bp->status_stats_size = status_blk_size +
674                                 sizeof(struct statistics_block);
675
676         status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
677                                           &bp->status_blk_mapping);
678         if (status_blk == NULL)
679                 goto alloc_mem_err;
680
681         memset(status_blk, 0, bp->status_stats_size);
682
683         bnapi = &bp->bnx2_napi[0];
684         bnapi->status_blk.msi = status_blk;
685         bnapi->hw_tx_cons_ptr =
686                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
687         bnapi->hw_rx_cons_ptr =
688                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
689         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
690                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
691                         struct status_block_msix *sblk;
692
693                         bnapi = &bp->bnx2_napi[i];
694
695                         sblk = (void *) (status_blk +
696                                          BNX2_SBLK_MSIX_ALIGN_SIZE * i);
697                         bnapi->status_blk.msix = sblk;
698                         bnapi->hw_tx_cons_ptr =
699                                 &sblk->status_tx_quick_consumer_index;
700                         bnapi->hw_rx_cons_ptr =
701                                 &sblk->status_rx_quick_consumer_index;
702                         bnapi->int_num = i << 24;
703                 }
704         }
705
706         bp->stats_blk = status_blk + status_blk_size;
707
708         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
709
710         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
711                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
712                 if (bp->ctx_pages == 0)
713                         bp->ctx_pages = 1;
714                 for (i = 0; i < bp->ctx_pages; i++) {
715                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
716                                                 BCM_PAGE_SIZE,
717                                                 &bp->ctx_blk_mapping[i]);
718                         if (bp->ctx_blk[i] == NULL)
719                                 goto alloc_mem_err;
720                 }
721         }
722
723         err = bnx2_alloc_rx_mem(bp);
724         if (err)
725                 goto alloc_mem_err;
726
727         err = bnx2_alloc_tx_mem(bp);
728         if (err)
729                 goto alloc_mem_err;
730
731         return 0;
732
733 alloc_mem_err:
734         bnx2_free_mem(bp);
735         return -ENOMEM;
736 }
737
738 static void
739 bnx2_report_fw_link(struct bnx2 *bp)
740 {
741         u32 fw_link_status = 0;
742
743         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
744                 return;
745
746         if (bp->link_up) {
747                 u32 bmsr;
748
749                 switch (bp->line_speed) {
750                 case SPEED_10:
751                         if (bp->duplex == DUPLEX_HALF)
752                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
753                         else
754                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
755                         break;
756                 case SPEED_100:
757                         if (bp->duplex == DUPLEX_HALF)
758                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
759                         else
760                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
761                         break;
762                 case SPEED_1000:
763                         if (bp->duplex == DUPLEX_HALF)
764                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
765                         else
766                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
767                         break;
768                 case SPEED_2500:
769                         if (bp->duplex == DUPLEX_HALF)
770                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
771                         else
772                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
773                         break;
774                 }
775
776                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
777
778                 if (bp->autoneg) {
779                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
780
781                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
782                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
783
784                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
785                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
786                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
787                         else
788                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
789                 }
790         }
791         else
792                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
793
794         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
795 }
796
797 static char *
798 bnx2_xceiver_str(struct bnx2 *bp)
799 {
800         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
801                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
802                  "Copper"));
803 }
804
805 static void
806 bnx2_report_link(struct bnx2 *bp)
807 {
808         if (bp->link_up) {
809                 netif_carrier_on(bp->dev);
810                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
811                        bnx2_xceiver_str(bp));
812
813                 printk("%d Mbps ", bp->line_speed);
814
815                 if (bp->duplex == DUPLEX_FULL)
816                         printk("full duplex");
817                 else
818                         printk("half duplex");
819
820                 if (bp->flow_ctrl) {
821                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
822                                 printk(", receive ");
823                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
824                                         printk("& transmit ");
825                         }
826                         else {
827                                 printk(", transmit ");
828                         }
829                         printk("flow control ON");
830                 }
831                 printk("\n");
832         }
833         else {
834                 netif_carrier_off(bp->dev);
835                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
836                        bnx2_xceiver_str(bp));
837         }
838
839         bnx2_report_fw_link(bp);
840 }
841
842 static void
843 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
844 {
845         u32 local_adv, remote_adv;
846
847         bp->flow_ctrl = 0;
848         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
849                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
850
851                 if (bp->duplex == DUPLEX_FULL) {
852                         bp->flow_ctrl = bp->req_flow_ctrl;
853                 }
854                 return;
855         }
856
857         if (bp->duplex != DUPLEX_FULL) {
858                 return;
859         }
860
861         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
862             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
863                 u32 val;
864
865                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
866                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
867                         bp->flow_ctrl |= FLOW_CTRL_TX;
868                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
869                         bp->flow_ctrl |= FLOW_CTRL_RX;
870                 return;
871         }
872
873         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
874         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
875
876         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
877                 u32 new_local_adv = 0;
878                 u32 new_remote_adv = 0;
879
880                 if (local_adv & ADVERTISE_1000XPAUSE)
881                         new_local_adv |= ADVERTISE_PAUSE_CAP;
882                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
883                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
884                 if (remote_adv & ADVERTISE_1000XPAUSE)
885                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
886                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
887                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
888
889                 local_adv = new_local_adv;
890                 remote_adv = new_remote_adv;
891         }
892
893         /* See Table 28B-3 of 802.3ab-1999 spec. */
894         if (local_adv & ADVERTISE_PAUSE_CAP) {
895                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
896                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
897                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
898                         }
899                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
900                                 bp->flow_ctrl = FLOW_CTRL_RX;
901                         }
902                 }
903                 else {
904                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
905                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
906                         }
907                 }
908         }
909         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
910                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
911                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
912
913                         bp->flow_ctrl = FLOW_CTRL_TX;
914                 }
915         }
916 }
917
918 static int
919 bnx2_5709s_linkup(struct bnx2 *bp)
920 {
921         u32 val, speed;
922
923         bp->link_up = 1;
924
925         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
926         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
927         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
928
929         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
930                 bp->line_speed = bp->req_line_speed;
931                 bp->duplex = bp->req_duplex;
932                 return 0;
933         }
934         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
935         switch (speed) {
936                 case MII_BNX2_GP_TOP_AN_SPEED_10:
937                         bp->line_speed = SPEED_10;
938                         break;
939                 case MII_BNX2_GP_TOP_AN_SPEED_100:
940                         bp->line_speed = SPEED_100;
941                         break;
942                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
943                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
944                         bp->line_speed = SPEED_1000;
945                         break;
946                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
947                         bp->line_speed = SPEED_2500;
948                         break;
949         }
950         if (val & MII_BNX2_GP_TOP_AN_FD)
951                 bp->duplex = DUPLEX_FULL;
952         else
953                 bp->duplex = DUPLEX_HALF;
954         return 0;
955 }
956
957 static int
958 bnx2_5708s_linkup(struct bnx2 *bp)
959 {
960         u32 val;
961
962         bp->link_up = 1;
963         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
964         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
965                 case BCM5708S_1000X_STAT1_SPEED_10:
966                         bp->line_speed = SPEED_10;
967                         break;
968                 case BCM5708S_1000X_STAT1_SPEED_100:
969                         bp->line_speed = SPEED_100;
970                         break;
971                 case BCM5708S_1000X_STAT1_SPEED_1G:
972                         bp->line_speed = SPEED_1000;
973                         break;
974                 case BCM5708S_1000X_STAT1_SPEED_2G5:
975                         bp->line_speed = SPEED_2500;
976                         break;
977         }
978         if (val & BCM5708S_1000X_STAT1_FD)
979                 bp->duplex = DUPLEX_FULL;
980         else
981                 bp->duplex = DUPLEX_HALF;
982
983         return 0;
984 }
985
986 static int
987 bnx2_5706s_linkup(struct bnx2 *bp)
988 {
989         u32 bmcr, local_adv, remote_adv, common;
990
991         bp->link_up = 1;
992         bp->line_speed = SPEED_1000;
993
994         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
995         if (bmcr & BMCR_FULLDPLX) {
996                 bp->duplex = DUPLEX_FULL;
997         }
998         else {
999                 bp->duplex = DUPLEX_HALF;
1000         }
1001
1002         if (!(bmcr & BMCR_ANENABLE)) {
1003                 return 0;
1004         }
1005
1006         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1007         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1008
1009         common = local_adv & remote_adv;
1010         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1011
1012                 if (common & ADVERTISE_1000XFULL) {
1013                         bp->duplex = DUPLEX_FULL;
1014                 }
1015                 else {
1016                         bp->duplex = DUPLEX_HALF;
1017                 }
1018         }
1019
1020         return 0;
1021 }
1022
1023 static int
1024 bnx2_copper_linkup(struct bnx2 *bp)
1025 {
1026         u32 bmcr;
1027
1028         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1029         if (bmcr & BMCR_ANENABLE) {
1030                 u32 local_adv, remote_adv, common;
1031
1032                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1033                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1034
1035                 common = local_adv & (remote_adv >> 2);
1036                 if (common & ADVERTISE_1000FULL) {
1037                         bp->line_speed = SPEED_1000;
1038                         bp->duplex = DUPLEX_FULL;
1039                 }
1040                 else if (common & ADVERTISE_1000HALF) {
1041                         bp->line_speed = SPEED_1000;
1042                         bp->duplex = DUPLEX_HALF;
1043                 }
1044                 else {
1045                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1046                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1047
1048                         common = local_adv & remote_adv;
1049                         if (common & ADVERTISE_100FULL) {
1050                                 bp->line_speed = SPEED_100;
1051                                 bp->duplex = DUPLEX_FULL;
1052                         }
1053                         else if (common & ADVERTISE_100HALF) {
1054                                 bp->line_speed = SPEED_100;
1055                                 bp->duplex = DUPLEX_HALF;
1056                         }
1057                         else if (common & ADVERTISE_10FULL) {
1058                                 bp->line_speed = SPEED_10;
1059                                 bp->duplex = DUPLEX_FULL;
1060                         }
1061                         else if (common & ADVERTISE_10HALF) {
1062                                 bp->line_speed = SPEED_10;
1063                                 bp->duplex = DUPLEX_HALF;
1064                         }
1065                         else {
1066                                 bp->line_speed = 0;
1067                                 bp->link_up = 0;
1068                         }
1069                 }
1070         }
1071         else {
1072                 if (bmcr & BMCR_SPEED100) {
1073                         bp->line_speed = SPEED_100;
1074                 }
1075                 else {
1076                         bp->line_speed = SPEED_10;
1077                 }
1078                 if (bmcr & BMCR_FULLDPLX) {
1079                         bp->duplex = DUPLEX_FULL;
1080                 }
1081                 else {
1082                         bp->duplex = DUPLEX_HALF;
1083                 }
1084         }
1085
1086         return 0;
1087 }
1088
1089 static void
1090 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1091 {
1092         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1093
1094         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1095         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1096         val |= 0x02 << 8;
1097
1098         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1099                 u32 lo_water, hi_water;
1100
1101                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1102                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1103                 else
1104                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1105                 if (lo_water >= bp->rx_ring_size)
1106                         lo_water = 0;
1107
1108                 hi_water = bp->rx_ring_size / 4;
1109
1110                 if (hi_water <= lo_water)
1111                         lo_water = 0;
1112
1113                 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1114                 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1115
1116                 if (hi_water > 0xf)
1117                         hi_water = 0xf;
1118                 else if (hi_water == 0)
1119                         lo_water = 0;
1120                 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1121         }
1122         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1123 }
1124
1125 static void
1126 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1127 {
1128         int i;
1129         u32 cid;
1130
1131         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1132                 if (i == 1)
1133                         cid = RX_RSS_CID;
1134                 bnx2_init_rx_context(bp, cid);
1135         }
1136 }
1137
1138 static void
1139 bnx2_set_mac_link(struct bnx2 *bp)
1140 {
1141         u32 val;
1142
1143         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1144         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1145                 (bp->duplex == DUPLEX_HALF)) {
1146                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1147         }
1148
1149         /* Configure the EMAC mode register. */
1150         val = REG_RD(bp, BNX2_EMAC_MODE);
1151
1152         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1153                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1154                 BNX2_EMAC_MODE_25G_MODE);
1155
1156         if (bp->link_up) {
1157                 switch (bp->line_speed) {
1158                         case SPEED_10:
1159                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1160                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1161                                         break;
1162                                 }
1163                                 /* fall through */
1164                         case SPEED_100:
1165                                 val |= BNX2_EMAC_MODE_PORT_MII;
1166                                 break;
1167                         case SPEED_2500:
1168                                 val |= BNX2_EMAC_MODE_25G_MODE;
1169                                 /* fall through */
1170                         case SPEED_1000:
1171                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1172                                 break;
1173                 }
1174         }
1175         else {
1176                 val |= BNX2_EMAC_MODE_PORT_GMII;
1177         }
1178
1179         /* Set the MAC to operate in the appropriate duplex mode. */
1180         if (bp->duplex == DUPLEX_HALF)
1181                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1182         REG_WR(bp, BNX2_EMAC_MODE, val);
1183
1184         /* Enable/disable rx PAUSE. */
1185         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1186
1187         if (bp->flow_ctrl & FLOW_CTRL_RX)
1188                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1189         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1190
1191         /* Enable/disable tx PAUSE. */
1192         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1193         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1194
1195         if (bp->flow_ctrl & FLOW_CTRL_TX)
1196                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1197         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1198
1199         /* Acknowledge the interrupt. */
1200         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1201
1202         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1203                 bnx2_init_all_rx_contexts(bp);
1204 }
1205
1206 static void
1207 bnx2_enable_bmsr1(struct bnx2 *bp)
1208 {
1209         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1210             (CHIP_NUM(bp) == CHIP_NUM_5709))
1211                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1212                                MII_BNX2_BLK_ADDR_GP_STATUS);
1213 }
1214
1215 static void
1216 bnx2_disable_bmsr1(struct bnx2 *bp)
1217 {
1218         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1219             (CHIP_NUM(bp) == CHIP_NUM_5709))
1220                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1221                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1222 }
1223
1224 static int
1225 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1226 {
1227         u32 up1;
1228         int ret = 1;
1229
1230         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1231                 return 0;
1232
1233         if (bp->autoneg & AUTONEG_SPEED)
1234                 bp->advertising |= ADVERTISED_2500baseX_Full;
1235
1236         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1237                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1238
1239         bnx2_read_phy(bp, bp->mii_up1, &up1);
1240         if (!(up1 & BCM5708S_UP1_2G5)) {
1241                 up1 |= BCM5708S_UP1_2G5;
1242                 bnx2_write_phy(bp, bp->mii_up1, up1);
1243                 ret = 0;
1244         }
1245
1246         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1247                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1248                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1249
1250         return ret;
1251 }
1252
1253 static int
1254 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1255 {
1256         u32 up1;
1257         int ret = 0;
1258
1259         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1260                 return 0;
1261
1262         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1263                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1264
1265         bnx2_read_phy(bp, bp->mii_up1, &up1);
1266         if (up1 & BCM5708S_UP1_2G5) {
1267                 up1 &= ~BCM5708S_UP1_2G5;
1268                 bnx2_write_phy(bp, bp->mii_up1, up1);
1269                 ret = 1;
1270         }
1271
1272         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1273                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1274                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1275
1276         return ret;
1277 }
1278
1279 static void
1280 bnx2_enable_forced_2g5(struct bnx2 *bp)
1281 {
1282         u32 bmcr;
1283
1284         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1285                 return;
1286
1287         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1288                 u32 val;
1289
1290                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1291                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1292                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1293                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1294                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1295                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1296
1297                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1298                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1299                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1300
1301         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1302                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1303                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1304         }
1305
1306         if (bp->autoneg & AUTONEG_SPEED) {
1307                 bmcr &= ~BMCR_ANENABLE;
1308                 if (bp->req_duplex == DUPLEX_FULL)
1309                         bmcr |= BMCR_FULLDPLX;
1310         }
1311         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1312 }
1313
1314 static void
1315 bnx2_disable_forced_2g5(struct bnx2 *bp)
1316 {
1317         u32 bmcr;
1318
1319         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1320                 return;
1321
1322         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1323                 u32 val;
1324
1325                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1326                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1327                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1328                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1329                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1330
1331                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1332                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1333                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1334
1335         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1336                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1337                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1338         }
1339
1340         if (bp->autoneg & AUTONEG_SPEED)
1341                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1342         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1343 }
1344
1345 static void
1346 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1347 {
1348         u32 val;
1349
1350         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1351         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1352         if (start)
1353                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1354         else
1355                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1356 }
1357
1358 static int
1359 bnx2_set_link(struct bnx2 *bp)
1360 {
1361         u32 bmsr;
1362         u8 link_up;
1363
1364         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1365                 bp->link_up = 1;
1366                 return 0;
1367         }
1368
1369         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1370                 return 0;
1371
1372         link_up = bp->link_up;
1373
1374         bnx2_enable_bmsr1(bp);
1375         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1376         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1377         bnx2_disable_bmsr1(bp);
1378
1379         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1380             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1381                 u32 val, an_dbg;
1382
1383                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1384                         bnx2_5706s_force_link_dn(bp, 0);
1385                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1386                 }
1387                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1388
1389                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1390                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1391                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1392
1393                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1394                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1395                         bmsr |= BMSR_LSTATUS;
1396                 else
1397                         bmsr &= ~BMSR_LSTATUS;
1398         }
1399
1400         if (bmsr & BMSR_LSTATUS) {
1401                 bp->link_up = 1;
1402
1403                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1404                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1405                                 bnx2_5706s_linkup(bp);
1406                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1407                                 bnx2_5708s_linkup(bp);
1408                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1409                                 bnx2_5709s_linkup(bp);
1410                 }
1411                 else {
1412                         bnx2_copper_linkup(bp);
1413                 }
1414                 bnx2_resolve_flow_ctrl(bp);
1415         }
1416         else {
1417                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1418                     (bp->autoneg & AUTONEG_SPEED))
1419                         bnx2_disable_forced_2g5(bp);
1420
1421                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1422                         u32 bmcr;
1423
1424                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1425                         bmcr |= BMCR_ANENABLE;
1426                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1427
1428                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1429                 }
1430                 bp->link_up = 0;
1431         }
1432
1433         if (bp->link_up != link_up) {
1434                 bnx2_report_link(bp);
1435         }
1436
1437         bnx2_set_mac_link(bp);
1438
1439         return 0;
1440 }
1441
1442 static int
1443 bnx2_reset_phy(struct bnx2 *bp)
1444 {
1445         int i;
1446         u32 reg;
1447
1448         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1449
1450 #define PHY_RESET_MAX_WAIT 100
1451         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1452                 udelay(10);
1453
1454                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1455                 if (!(reg & BMCR_RESET)) {
1456                         udelay(20);
1457                         break;
1458                 }
1459         }
1460         if (i == PHY_RESET_MAX_WAIT) {
1461                 return -EBUSY;
1462         }
1463         return 0;
1464 }
1465
1466 static u32
1467 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1468 {
1469         u32 adv = 0;
1470
1471         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1472                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1473
1474                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1475                         adv = ADVERTISE_1000XPAUSE;
1476                 }
1477                 else {
1478                         adv = ADVERTISE_PAUSE_CAP;
1479                 }
1480         }
1481         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1482                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1483                         adv = ADVERTISE_1000XPSE_ASYM;
1484                 }
1485                 else {
1486                         adv = ADVERTISE_PAUSE_ASYM;
1487                 }
1488         }
1489         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1490                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1491                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1492                 }
1493                 else {
1494                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1495                 }
1496         }
1497         return adv;
1498 }
1499
1500 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1501
1502 static int
1503 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1504 __releases(&bp->phy_lock)
1505 __acquires(&bp->phy_lock)
1506 {
1507         u32 speed_arg = 0, pause_adv;
1508
1509         pause_adv = bnx2_phy_get_pause_adv(bp);
1510
1511         if (bp->autoneg & AUTONEG_SPEED) {
1512                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1513                 if (bp->advertising & ADVERTISED_10baseT_Half)
1514                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1515                 if (bp->advertising & ADVERTISED_10baseT_Full)
1516                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1517                 if (bp->advertising & ADVERTISED_100baseT_Half)
1518                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1519                 if (bp->advertising & ADVERTISED_100baseT_Full)
1520                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1521                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1522                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1523                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1524                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1525         } else {
1526                 if (bp->req_line_speed == SPEED_2500)
1527                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1528                 else if (bp->req_line_speed == SPEED_1000)
1529                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1530                 else if (bp->req_line_speed == SPEED_100) {
1531                         if (bp->req_duplex == DUPLEX_FULL)
1532                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1533                         else
1534                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1535                 } else if (bp->req_line_speed == SPEED_10) {
1536                         if (bp->req_duplex == DUPLEX_FULL)
1537                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1538                         else
1539                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1540                 }
1541         }
1542
1543         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1544                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1545         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1546                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1547
1548         if (port == PORT_TP)
1549                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1550                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1551
1552         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1553
1554         spin_unlock_bh(&bp->phy_lock);
1555         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1556         spin_lock_bh(&bp->phy_lock);
1557
1558         return 0;
1559 }
1560
1561 static int
1562 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1563 __releases(&bp->phy_lock)
1564 __acquires(&bp->phy_lock)
1565 {
1566         u32 adv, bmcr;
1567         u32 new_adv = 0;
1568
1569         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1570                 return (bnx2_setup_remote_phy(bp, port));
1571
1572         if (!(bp->autoneg & AUTONEG_SPEED)) {
1573                 u32 new_bmcr;
1574                 int force_link_down = 0;
1575
1576                 if (bp->req_line_speed == SPEED_2500) {
1577                         if (!bnx2_test_and_enable_2g5(bp))
1578                                 force_link_down = 1;
1579                 } else if (bp->req_line_speed == SPEED_1000) {
1580                         if (bnx2_test_and_disable_2g5(bp))
1581                                 force_link_down = 1;
1582                 }
1583                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1584                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1585
1586                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1587                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1588                 new_bmcr |= BMCR_SPEED1000;
1589
1590                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1591                         if (bp->req_line_speed == SPEED_2500)
1592                                 bnx2_enable_forced_2g5(bp);
1593                         else if (bp->req_line_speed == SPEED_1000) {
1594                                 bnx2_disable_forced_2g5(bp);
1595                                 new_bmcr &= ~0x2000;
1596                         }
1597
1598                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1599                         if (bp->req_line_speed == SPEED_2500)
1600                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1601                         else
1602                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1603                 }
1604
1605                 if (bp->req_duplex == DUPLEX_FULL) {
1606                         adv |= ADVERTISE_1000XFULL;
1607                         new_bmcr |= BMCR_FULLDPLX;
1608                 }
1609                 else {
1610                         adv |= ADVERTISE_1000XHALF;
1611                         new_bmcr &= ~BMCR_FULLDPLX;
1612                 }
1613                 if ((new_bmcr != bmcr) || (force_link_down)) {
1614                         /* Force a link down visible on the other side */
1615                         if (bp->link_up) {
1616                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1617                                                ~(ADVERTISE_1000XFULL |
1618                                                  ADVERTISE_1000XHALF));
1619                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1620                                         BMCR_ANRESTART | BMCR_ANENABLE);
1621
1622                                 bp->link_up = 0;
1623                                 netif_carrier_off(bp->dev);
1624                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1625                                 bnx2_report_link(bp);
1626                         }
1627                         bnx2_write_phy(bp, bp->mii_adv, adv);
1628                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1629                 } else {
1630                         bnx2_resolve_flow_ctrl(bp);
1631                         bnx2_set_mac_link(bp);
1632                 }
1633                 return 0;
1634         }
1635
1636         bnx2_test_and_enable_2g5(bp);
1637
1638         if (bp->advertising & ADVERTISED_1000baseT_Full)
1639                 new_adv |= ADVERTISE_1000XFULL;
1640
1641         new_adv |= bnx2_phy_get_pause_adv(bp);
1642
1643         bnx2_read_phy(bp, bp->mii_adv, &adv);
1644         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1645
1646         bp->serdes_an_pending = 0;
1647         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1648                 /* Force a link down visible on the other side */
1649                 if (bp->link_up) {
1650                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1651                         spin_unlock_bh(&bp->phy_lock);
1652                         msleep(20);
1653                         spin_lock_bh(&bp->phy_lock);
1654                 }
1655
1656                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1657                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1658                         BMCR_ANENABLE);
1659                 /* Speed up link-up time when the link partner
1660                  * does not autonegotiate which is very common
1661                  * in blade servers. Some blade servers use
1662                  * IPMI for kerboard input and it's important
1663                  * to minimize link disruptions. Autoneg. involves
1664                  * exchanging base pages plus 3 next pages and
1665                  * normally completes in about 120 msec.
1666                  */
1667                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1668                 bp->serdes_an_pending = 1;
1669                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1670         } else {
1671                 bnx2_resolve_flow_ctrl(bp);
1672                 bnx2_set_mac_link(bp);
1673         }
1674
1675         return 0;
1676 }
1677
1678 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1679         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1680                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1681                 (ADVERTISED_1000baseT_Full)
1682
1683 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1684         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1685         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1686         ADVERTISED_1000baseT_Full)
1687
1688 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1689         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1690
1691 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1692
1693 static void
1694 bnx2_set_default_remote_link(struct bnx2 *bp)
1695 {
1696         u32 link;
1697
1698         if (bp->phy_port == PORT_TP)
1699                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1700         else
1701                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1702
1703         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1704                 bp->req_line_speed = 0;
1705                 bp->autoneg |= AUTONEG_SPEED;
1706                 bp->advertising = ADVERTISED_Autoneg;
1707                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1708                         bp->advertising |= ADVERTISED_10baseT_Half;
1709                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1710                         bp->advertising |= ADVERTISED_10baseT_Full;
1711                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1712                         bp->advertising |= ADVERTISED_100baseT_Half;
1713                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1714                         bp->advertising |= ADVERTISED_100baseT_Full;
1715                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1716                         bp->advertising |= ADVERTISED_1000baseT_Full;
1717                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1718                         bp->advertising |= ADVERTISED_2500baseX_Full;
1719         } else {
1720                 bp->autoneg = 0;
1721                 bp->advertising = 0;
1722                 bp->req_duplex = DUPLEX_FULL;
1723                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1724                         bp->req_line_speed = SPEED_10;
1725                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1726                                 bp->req_duplex = DUPLEX_HALF;
1727                 }
1728                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1729                         bp->req_line_speed = SPEED_100;
1730                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1731                                 bp->req_duplex = DUPLEX_HALF;
1732                 }
1733                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1734                         bp->req_line_speed = SPEED_1000;
1735                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1736                         bp->req_line_speed = SPEED_2500;
1737         }
1738 }
1739
1740 static void
1741 bnx2_set_default_link(struct bnx2 *bp)
1742 {
1743         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1744                 bnx2_set_default_remote_link(bp);
1745                 return;
1746         }
1747
1748         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1749         bp->req_line_speed = 0;
1750         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1751                 u32 reg;
1752
1753                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1754
1755                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1756                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1757                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1758                         bp->autoneg = 0;
1759                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1760                         bp->req_duplex = DUPLEX_FULL;
1761                 }
1762         } else
1763                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1764 }
1765
1766 static void
1767 bnx2_send_heart_beat(struct bnx2 *bp)
1768 {
1769         u32 msg;
1770         u32 addr;
1771
1772         spin_lock(&bp->indirect_lock);
1773         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1774         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1775         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1776         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1777         spin_unlock(&bp->indirect_lock);
1778 }
1779
1780 static void
1781 bnx2_remote_phy_event(struct bnx2 *bp)
1782 {
1783         u32 msg;
1784         u8 link_up = bp->link_up;
1785         u8 old_port;
1786
1787         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1788
1789         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1790                 bnx2_send_heart_beat(bp);
1791
1792         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1793
1794         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1795                 bp->link_up = 0;
1796         else {
1797                 u32 speed;
1798
1799                 bp->link_up = 1;
1800                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1801                 bp->duplex = DUPLEX_FULL;
1802                 switch (speed) {
1803                         case BNX2_LINK_STATUS_10HALF:
1804                                 bp->duplex = DUPLEX_HALF;
1805                         case BNX2_LINK_STATUS_10FULL:
1806                                 bp->line_speed = SPEED_10;
1807                                 break;
1808                         case BNX2_LINK_STATUS_100HALF:
1809                                 bp->duplex = DUPLEX_HALF;
1810                         case BNX2_LINK_STATUS_100BASE_T4:
1811                         case BNX2_LINK_STATUS_100FULL:
1812                                 bp->line_speed = SPEED_100;
1813                                 break;
1814                         case BNX2_LINK_STATUS_1000HALF:
1815                                 bp->duplex = DUPLEX_HALF;
1816                         case BNX2_LINK_STATUS_1000FULL:
1817                                 bp->line_speed = SPEED_1000;
1818                                 break;
1819                         case BNX2_LINK_STATUS_2500HALF:
1820                                 bp->duplex = DUPLEX_HALF;
1821                         case BNX2_LINK_STATUS_2500FULL:
1822                                 bp->line_speed = SPEED_2500;
1823                                 break;
1824                         default:
1825                                 bp->line_speed = 0;
1826                                 break;
1827                 }
1828
1829                 bp->flow_ctrl = 0;
1830                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1831                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1832                         if (bp->duplex == DUPLEX_FULL)
1833                                 bp->flow_ctrl = bp->req_flow_ctrl;
1834                 } else {
1835                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1836                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1837                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1838                                 bp->flow_ctrl |= FLOW_CTRL_RX;
1839                 }
1840
1841                 old_port = bp->phy_port;
1842                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1843                         bp->phy_port = PORT_FIBRE;
1844                 else
1845                         bp->phy_port = PORT_TP;
1846
1847                 if (old_port != bp->phy_port)
1848                         bnx2_set_default_link(bp);
1849
1850         }
1851         if (bp->link_up != link_up)
1852                 bnx2_report_link(bp);
1853
1854         bnx2_set_mac_link(bp);
1855 }
1856
1857 static int
1858 bnx2_set_remote_link(struct bnx2 *bp)
1859 {
1860         u32 evt_code;
1861
1862         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
1863         switch (evt_code) {
1864                 case BNX2_FW_EVT_CODE_LINK_EVENT:
1865                         bnx2_remote_phy_event(bp);
1866                         break;
1867                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1868                 default:
1869                         bnx2_send_heart_beat(bp);
1870                         break;
1871         }
1872         return 0;
1873 }
1874
1875 static int
1876 bnx2_setup_copper_phy(struct bnx2 *bp)
1877 __releases(&bp->phy_lock)
1878 __acquires(&bp->phy_lock)
1879 {
1880         u32 bmcr;
1881         u32 new_bmcr;
1882
1883         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1884
1885         if (bp->autoneg & AUTONEG_SPEED) {
1886                 u32 adv_reg, adv1000_reg;
1887                 u32 new_adv_reg = 0;
1888                 u32 new_adv1000_reg = 0;
1889
1890                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1891                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1892                         ADVERTISE_PAUSE_ASYM);
1893
1894                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1895                 adv1000_reg &= PHY_ALL_1000_SPEED;
1896
1897                 if (bp->advertising & ADVERTISED_10baseT_Half)
1898                         new_adv_reg |= ADVERTISE_10HALF;
1899                 if (bp->advertising & ADVERTISED_10baseT_Full)
1900                         new_adv_reg |= ADVERTISE_10FULL;
1901                 if (bp->advertising & ADVERTISED_100baseT_Half)
1902                         new_adv_reg |= ADVERTISE_100HALF;
1903                 if (bp->advertising & ADVERTISED_100baseT_Full)
1904                         new_adv_reg |= ADVERTISE_100FULL;
1905                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1906                         new_adv1000_reg |= ADVERTISE_1000FULL;
1907
1908                 new_adv_reg |= ADVERTISE_CSMA;
1909
1910                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1911
1912                 if ((adv1000_reg != new_adv1000_reg) ||
1913                         (adv_reg != new_adv_reg) ||
1914                         ((bmcr & BMCR_ANENABLE) == 0)) {
1915
1916                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1917                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1918                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1919                                 BMCR_ANENABLE);
1920                 }
1921                 else if (bp->link_up) {
1922                         /* Flow ctrl may have changed from auto to forced */
1923                         /* or vice-versa. */
1924
1925                         bnx2_resolve_flow_ctrl(bp);
1926                         bnx2_set_mac_link(bp);
1927                 }
1928                 return 0;
1929         }
1930
1931         new_bmcr = 0;
1932         if (bp->req_line_speed == SPEED_100) {
1933                 new_bmcr |= BMCR_SPEED100;
1934         }
1935         if (bp->req_duplex == DUPLEX_FULL) {
1936                 new_bmcr |= BMCR_FULLDPLX;
1937         }
1938         if (new_bmcr != bmcr) {
1939                 u32 bmsr;
1940
1941                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1942                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1943
1944                 if (bmsr & BMSR_LSTATUS) {
1945                         /* Force link down */
1946                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1947                         spin_unlock_bh(&bp->phy_lock);
1948                         msleep(50);
1949                         spin_lock_bh(&bp->phy_lock);
1950
1951                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1952                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1953                 }
1954
1955                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1956
1957                 /* Normally, the new speed is setup after the link has
1958                  * gone down and up again. In some cases, link will not go
1959                  * down so we need to set up the new speed here.
1960                  */
1961                 if (bmsr & BMSR_LSTATUS) {
1962                         bp->line_speed = bp->req_line_speed;
1963                         bp->duplex = bp->req_duplex;
1964                         bnx2_resolve_flow_ctrl(bp);
1965                         bnx2_set_mac_link(bp);
1966                 }
1967         } else {
1968                 bnx2_resolve_flow_ctrl(bp);
1969                 bnx2_set_mac_link(bp);
1970         }
1971         return 0;
1972 }
1973
1974 static int
1975 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1976 __releases(&bp->phy_lock)
1977 __acquires(&bp->phy_lock)
1978 {
1979         if (bp->loopback == MAC_LOOPBACK)
1980                 return 0;
1981
1982         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1983                 return (bnx2_setup_serdes_phy(bp, port));
1984         }
1985         else {
1986                 return (bnx2_setup_copper_phy(bp));
1987         }
1988 }
1989
1990 static int
1991 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
1992 {
1993         u32 val;
1994
1995         bp->mii_bmcr = MII_BMCR + 0x10;
1996         bp->mii_bmsr = MII_BMSR + 0x10;
1997         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1998         bp->mii_adv = MII_ADVERTISE + 0x10;
1999         bp->mii_lpa = MII_LPA + 0x10;
2000         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2001
2002         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2003         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2004
2005         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2006         if (reset_phy)
2007                 bnx2_reset_phy(bp);
2008
2009         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2010
2011         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2012         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2013         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2014         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2015
2016         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2017         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2018         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2019                 val |= BCM5708S_UP1_2G5;
2020         else
2021                 val &= ~BCM5708S_UP1_2G5;
2022         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2023
2024         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2025         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2026         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2027         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2028
2029         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2030
2031         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2032               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2033         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2034
2035         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2036
2037         return 0;
2038 }
2039
2040 static int
2041 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2042 {
2043         u32 val;
2044
2045         if (reset_phy)
2046                 bnx2_reset_phy(bp);
2047
2048         bp->mii_up1 = BCM5708S_UP1;
2049
2050         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2051         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2052         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2053
2054         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2055         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2056         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2057
2058         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2059         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2060         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2061
2062         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2063                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2064                 val |= BCM5708S_UP1_2G5;
2065                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2066         }
2067
2068         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2069             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2070             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2071                 /* increase tx signal amplitude */
2072                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2073                                BCM5708S_BLK_ADDR_TX_MISC);
2074                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2075                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2076                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2077                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2078         }
2079
2080         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2081               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2082
2083         if (val) {
2084                 u32 is_backplane;
2085
2086                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2087                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2088                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2089                                        BCM5708S_BLK_ADDR_TX_MISC);
2090                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2091                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2092                                        BCM5708S_BLK_ADDR_DIG);
2093                 }
2094         }
2095         return 0;
2096 }
2097
2098 static int
2099 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2100 {
2101         if (reset_phy)
2102                 bnx2_reset_phy(bp);
2103
2104         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2105
2106         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2107                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2108
2109         if (bp->dev->mtu > 1500) {
2110                 u32 val;
2111
2112                 /* Set extended packet length bit */
2113                 bnx2_write_phy(bp, 0x18, 0x7);
2114                 bnx2_read_phy(bp, 0x18, &val);
2115                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2116
2117                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2118                 bnx2_read_phy(bp, 0x1c, &val);
2119                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2120         }
2121         else {
2122                 u32 val;
2123
2124                 bnx2_write_phy(bp, 0x18, 0x7);
2125                 bnx2_read_phy(bp, 0x18, &val);
2126                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2127
2128                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2129                 bnx2_read_phy(bp, 0x1c, &val);
2130                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2131         }
2132
2133         return 0;
2134 }
2135
2136 static int
2137 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2138 {
2139         u32 val;
2140
2141         if (reset_phy)
2142                 bnx2_reset_phy(bp);
2143
2144         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2145                 bnx2_write_phy(bp, 0x18, 0x0c00);
2146                 bnx2_write_phy(bp, 0x17, 0x000a);
2147                 bnx2_write_phy(bp, 0x15, 0x310b);
2148                 bnx2_write_phy(bp, 0x17, 0x201f);
2149                 bnx2_write_phy(bp, 0x15, 0x9506);
2150                 bnx2_write_phy(bp, 0x17, 0x401f);
2151                 bnx2_write_phy(bp, 0x15, 0x14e2);
2152                 bnx2_write_phy(bp, 0x18, 0x0400);
2153         }
2154
2155         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2156                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2157                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2158                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2159                 val &= ~(1 << 8);
2160                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2161         }
2162
2163         if (bp->dev->mtu > 1500) {
2164                 /* Set extended packet length bit */
2165                 bnx2_write_phy(bp, 0x18, 0x7);
2166                 bnx2_read_phy(bp, 0x18, &val);
2167                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2168
2169                 bnx2_read_phy(bp, 0x10, &val);
2170                 bnx2_write_phy(bp, 0x10, val | 0x1);
2171         }
2172         else {
2173                 bnx2_write_phy(bp, 0x18, 0x7);
2174                 bnx2_read_phy(bp, 0x18, &val);
2175                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2176
2177                 bnx2_read_phy(bp, 0x10, &val);
2178                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2179         }
2180
2181         /* ethernet@wirespeed */
2182         bnx2_write_phy(bp, 0x18, 0x7007);
2183         bnx2_read_phy(bp, 0x18, &val);
2184         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2185         return 0;
2186 }
2187
2188
2189 static int
2190 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2191 __releases(&bp->phy_lock)
2192 __acquires(&bp->phy_lock)
2193 {
2194         u32 val;
2195         int rc = 0;
2196
2197         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2198         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2199
2200         bp->mii_bmcr = MII_BMCR;
2201         bp->mii_bmsr = MII_BMSR;
2202         bp->mii_bmsr1 = MII_BMSR;
2203         bp->mii_adv = MII_ADVERTISE;
2204         bp->mii_lpa = MII_LPA;
2205
2206         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2207
2208         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2209                 goto setup_phy;
2210
2211         bnx2_read_phy(bp, MII_PHYSID1, &val);
2212         bp->phy_id = val << 16;
2213         bnx2_read_phy(bp, MII_PHYSID2, &val);
2214         bp->phy_id |= val & 0xffff;
2215
2216         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2217                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2218                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2219                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2220                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2221                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2222                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2223         }
2224         else {
2225                 rc = bnx2_init_copper_phy(bp, reset_phy);
2226         }
2227
2228 setup_phy:
2229         if (!rc)
2230                 rc = bnx2_setup_phy(bp, bp->phy_port);
2231
2232         return rc;
2233 }
2234
2235 static int
2236 bnx2_set_mac_loopback(struct bnx2 *bp)
2237 {
2238         u32 mac_mode;
2239
2240         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2241         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2242         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2243         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2244         bp->link_up = 1;
2245         return 0;
2246 }
2247
2248 static int bnx2_test_link(struct bnx2 *);
2249
2250 static int
2251 bnx2_set_phy_loopback(struct bnx2 *bp)
2252 {
2253         u32 mac_mode;
2254         int rc, i;
2255
2256         spin_lock_bh(&bp->phy_lock);
2257         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2258                             BMCR_SPEED1000);
2259         spin_unlock_bh(&bp->phy_lock);
2260         if (rc)
2261                 return rc;
2262
2263         for (i = 0; i < 10; i++) {
2264                 if (bnx2_test_link(bp) == 0)
2265                         break;
2266                 msleep(100);
2267         }
2268
2269         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2270         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2271                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2272                       BNX2_EMAC_MODE_25G_MODE);
2273
2274         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2275         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2276         bp->link_up = 1;
2277         return 0;
2278 }
2279
2280 static int
2281 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2282 {
2283         int i;
2284         u32 val;
2285
2286         bp->fw_wr_seq++;
2287         msg_data |= bp->fw_wr_seq;
2288
2289         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2290
2291         if (!ack)
2292                 return 0;
2293
2294         /* wait for an acknowledgement. */
2295         for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2296                 msleep(10);
2297
2298                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2299
2300                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2301                         break;
2302         }
2303         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2304                 return 0;
2305
2306         /* If we timed out, inform the firmware that this is the case. */
2307         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2308                 if (!silent)
2309                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2310                                             "%x\n", msg_data);
2311
2312                 msg_data &= ~BNX2_DRV_MSG_CODE;
2313                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2314
2315                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2316
2317                 return -EBUSY;
2318         }
2319
2320         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2321                 return -EIO;
2322
2323         return 0;
2324 }
2325
2326 static int
2327 bnx2_init_5709_context(struct bnx2 *bp)
2328 {
2329         int i, ret = 0;
2330         u32 val;
2331
2332         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2333         val |= (BCM_PAGE_BITS - 8) << 16;
2334         REG_WR(bp, BNX2_CTX_COMMAND, val);
2335         for (i = 0; i < 10; i++) {
2336                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2337                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2338                         break;
2339                 udelay(2);
2340         }
2341         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2342                 return -EBUSY;
2343
2344         for (i = 0; i < bp->ctx_pages; i++) {
2345                 int j;
2346
2347                 if (bp->ctx_blk[i])
2348                         memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2349                 else
2350                         return -ENOMEM;
2351
2352                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2353                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2354                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2355                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2356                        (u64) bp->ctx_blk_mapping[i] >> 32);
2357                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2358                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2359                 for (j = 0; j < 10; j++) {
2360
2361                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2362                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2363                                 break;
2364                         udelay(5);
2365                 }
2366                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2367                         ret = -EBUSY;
2368                         break;
2369                 }
2370         }
2371         return ret;
2372 }
2373
2374 static void
2375 bnx2_init_context(struct bnx2 *bp)
2376 {
2377         u32 vcid;
2378
2379         vcid = 96;
2380         while (vcid) {
2381                 u32 vcid_addr, pcid_addr, offset;
2382                 int i;
2383
2384                 vcid--;
2385
2386                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2387                         u32 new_vcid;
2388
2389                         vcid_addr = GET_PCID_ADDR(vcid);
2390                         if (vcid & 0x8) {
2391                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2392                         }
2393                         else {
2394                                 new_vcid = vcid;
2395                         }
2396                         pcid_addr = GET_PCID_ADDR(new_vcid);
2397                 }
2398                 else {
2399                         vcid_addr = GET_CID_ADDR(vcid);
2400                         pcid_addr = vcid_addr;
2401                 }
2402
2403                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2404                         vcid_addr += (i << PHY_CTX_SHIFT);
2405                         pcid_addr += (i << PHY_CTX_SHIFT);
2406
2407                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2408                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2409
2410                         /* Zero out the context. */
2411                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2412                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2413                 }
2414         }
2415 }
2416
2417 static int
2418 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2419 {
2420         u16 *good_mbuf;
2421         u32 good_mbuf_cnt;
2422         u32 val;
2423
2424         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2425         if (good_mbuf == NULL) {
2426                 printk(KERN_ERR PFX "Failed to allocate memory in "
2427                                     "bnx2_alloc_bad_rbuf\n");
2428                 return -ENOMEM;
2429         }
2430
2431         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2432                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2433
2434         good_mbuf_cnt = 0;
2435
2436         /* Allocate a bunch of mbufs and save the good ones in an array. */
2437         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2438         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2439                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2440                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2441
2442                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2443
2444                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2445
2446                 /* The addresses with Bit 9 set are bad memory blocks. */
2447                 if (!(val & (1 << 9))) {
2448                         good_mbuf[good_mbuf_cnt] = (u16) val;
2449                         good_mbuf_cnt++;
2450                 }
2451
2452                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2453         }
2454
2455         /* Free the good ones back to the mbuf pool thus discarding
2456          * all the bad ones. */
2457         while (good_mbuf_cnt) {
2458                 good_mbuf_cnt--;
2459
2460                 val = good_mbuf[good_mbuf_cnt];
2461                 val = (val << 9) | val | 1;
2462
2463                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2464         }
2465         kfree(good_mbuf);
2466         return 0;
2467 }
2468
2469 static void
2470 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2471 {
2472         u32 val;
2473
2474         val = (mac_addr[0] << 8) | mac_addr[1];
2475
2476         REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2477
2478         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2479                 (mac_addr[4] << 8) | mac_addr[5];
2480
2481         REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2482 }
2483
2484 static inline int
2485 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2486 {
2487         dma_addr_t mapping;
2488         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2489         struct rx_bd *rxbd =
2490                 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2491         struct page *page = alloc_page(GFP_ATOMIC);
2492
2493         if (!page)
2494                 return -ENOMEM;
2495         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2496                                PCI_DMA_FROMDEVICE);
2497         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2498                 __free_page(page);
2499                 return -EIO;
2500         }
2501
2502         rx_pg->page = page;
2503         pci_unmap_addr_set(rx_pg, mapping, mapping);
2504         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2505         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2506         return 0;
2507 }
2508
2509 static void
2510 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2511 {
2512         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2513         struct page *page = rx_pg->page;
2514
2515         if (!page)
2516                 return;
2517
2518         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2519                        PCI_DMA_FROMDEVICE);
2520
2521         __free_page(page);
2522         rx_pg->page = NULL;
2523 }
2524
2525 static inline int
2526 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2527 {
2528         struct sk_buff *skb;
2529         struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2530         dma_addr_t mapping;
2531         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2532         unsigned long align;
2533
2534         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2535         if (skb == NULL) {
2536                 return -ENOMEM;
2537         }
2538
2539         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2540                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2541
2542         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2543                 PCI_DMA_FROMDEVICE);
2544         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2545                 dev_kfree_skb(skb);
2546                 return -EIO;
2547         }
2548
2549         rx_buf->skb = skb;
2550         pci_unmap_addr_set(rx_buf, mapping, mapping);
2551
2552         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2553         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2554
2555         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2556
2557         return 0;
2558 }
2559
2560 static int
2561 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2562 {
2563         struct status_block *sblk = bnapi->status_blk.msi;
2564         u32 new_link_state, old_link_state;
2565         int is_set = 1;
2566
2567         new_link_state = sblk->status_attn_bits & event;
2568         old_link_state = sblk->status_attn_bits_ack & event;
2569         if (new_link_state != old_link_state) {
2570                 if (new_link_state)
2571                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2572                 else
2573                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2574         } else
2575                 is_set = 0;
2576
2577         return is_set;
2578 }
2579
2580 static void
2581 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2582 {
2583         spin_lock(&bp->phy_lock);
2584
2585         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2586                 bnx2_set_link(bp);
2587         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2588                 bnx2_set_remote_link(bp);
2589
2590         spin_unlock(&bp->phy_lock);
2591
2592 }
2593
2594 static inline u16
2595 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2596 {
2597         u16 cons;
2598
2599         /* Tell compiler that status block fields can change. */
2600         barrier();
2601         cons = *bnapi->hw_tx_cons_ptr;
2602         barrier();
2603         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2604                 cons++;
2605         return cons;
2606 }
2607
2608 static int
2609 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2610 {
2611         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2612         u16 hw_cons, sw_cons, sw_ring_cons;
2613         int tx_pkt = 0, index;
2614         struct netdev_queue *txq;
2615
2616         index = (bnapi - bp->bnx2_napi);
2617         txq = netdev_get_tx_queue(bp->dev, index);
2618
2619         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2620         sw_cons = txr->tx_cons;
2621
2622         while (sw_cons != hw_cons) {
2623                 struct sw_tx_bd *tx_buf;
2624                 struct sk_buff *skb;
2625                 int i, last;
2626
2627                 sw_ring_cons = TX_RING_IDX(sw_cons);
2628
2629                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2630                 skb = tx_buf->skb;
2631
2632                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2633                 prefetch(&skb->end);
2634
2635                 /* partial BD completions possible with TSO packets */
2636                 if (tx_buf->is_gso) {
2637                         u16 last_idx, last_ring_idx;
2638
2639                         last_idx = sw_cons + tx_buf->nr_frags + 1;
2640                         last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2641                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2642                                 last_idx++;
2643                         }
2644                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2645                                 break;
2646                         }
2647                 }
2648
2649                 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
2650
2651                 tx_buf->skb = NULL;
2652                 last = tx_buf->nr_frags;
2653
2654                 for (i = 0; i < last; i++) {
2655                         sw_cons = NEXT_TX_BD(sw_cons);
2656                 }
2657
2658                 sw_cons = NEXT_TX_BD(sw_cons);
2659
2660                 dev_kfree_skb(skb);
2661                 tx_pkt++;
2662                 if (tx_pkt == budget)
2663                         break;
2664
2665                 if (hw_cons == sw_cons)
2666                         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2667         }
2668
2669         txr->hw_tx_cons = hw_cons;
2670         txr->tx_cons = sw_cons;
2671
2672         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2673          * before checking for netif_tx_queue_stopped().  Without the
2674          * memory barrier, there is a small possibility that bnx2_start_xmit()
2675          * will miss it and cause the queue to be stopped forever.
2676          */
2677         smp_mb();
2678
2679         if (unlikely(netif_tx_queue_stopped(txq)) &&
2680                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2681                 __netif_tx_lock(txq, smp_processor_id());
2682                 if ((netif_tx_queue_stopped(txq)) &&
2683                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2684                         netif_tx_wake_queue(txq);
2685                 __netif_tx_unlock(txq);
2686         }
2687
2688         return tx_pkt;
2689 }
2690
2691 static void
2692 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2693                         struct sk_buff *skb, int count)
2694 {
2695         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2696         struct rx_bd *cons_bd, *prod_bd;
2697         int i;
2698         u16 hw_prod, prod;
2699         u16 cons = rxr->rx_pg_cons;
2700
2701         cons_rx_pg = &rxr->rx_pg_ring[cons];
2702
2703         /* The caller was unable to allocate a new page to replace the
2704          * last one in the frags array, so we need to recycle that page
2705          * and then free the skb.
2706          */
2707         if (skb) {
2708                 struct page *page;
2709                 struct skb_shared_info *shinfo;
2710
2711                 shinfo = skb_shinfo(skb);
2712                 shinfo->nr_frags--;
2713                 page = shinfo->frags[shinfo->nr_frags].page;
2714                 shinfo->frags[shinfo->nr_frags].page = NULL;
2715
2716                 cons_rx_pg->page = page;
2717                 dev_kfree_skb(skb);
2718         }
2719
2720         hw_prod = rxr->rx_pg_prod;
2721
2722         for (i = 0; i < count; i++) {
2723                 prod = RX_PG_RING_IDX(hw_prod);
2724
2725                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2726                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2727                 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2728                 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2729
2730                 if (prod != cons) {
2731                         prod_rx_pg->page = cons_rx_pg->page;
2732                         cons_rx_pg->page = NULL;
2733                         pci_unmap_addr_set(prod_rx_pg, mapping,
2734                                 pci_unmap_addr(cons_rx_pg, mapping));
2735
2736                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2737                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2738
2739                 }
2740                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2741                 hw_prod = NEXT_RX_BD(hw_prod);
2742         }
2743         rxr->rx_pg_prod = hw_prod;
2744         rxr->rx_pg_cons = cons;
2745 }
2746
2747 static inline void
2748 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2749                   struct sk_buff *skb, u16 cons, u16 prod)
2750 {
2751         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2752         struct rx_bd *cons_bd, *prod_bd;
2753
2754         cons_rx_buf = &rxr->rx_buf_ring[cons];
2755         prod_rx_buf = &rxr->rx_buf_ring[prod];
2756
2757         pci_dma_sync_single_for_device(bp->pdev,
2758                 pci_unmap_addr(cons_rx_buf, mapping),
2759                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2760
2761         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2762
2763         prod_rx_buf->skb = skb;
2764
2765         if (cons == prod)
2766                 return;
2767
2768         pci_unmap_addr_set(prod_rx_buf, mapping,
2769                         pci_unmap_addr(cons_rx_buf, mapping));
2770
2771         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2772         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2773         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2774         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2775 }
2776
2777 static int
2778 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2779             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2780             u32 ring_idx)
2781 {
2782         int err;
2783         u16 prod = ring_idx & 0xffff;
2784
2785         err = bnx2_alloc_rx_skb(bp, rxr, prod);
2786         if (unlikely(err)) {
2787                 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2788                 if (hdr_len) {
2789                         unsigned int raw_len = len + 4;
2790                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2791
2792                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2793                 }
2794                 return err;
2795         }
2796
2797         skb_reserve(skb, BNX2_RX_OFFSET);
2798         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2799                          PCI_DMA_FROMDEVICE);
2800
2801         if (hdr_len == 0) {
2802                 skb_put(skb, len);
2803                 return 0;
2804         } else {
2805                 unsigned int i, frag_len, frag_size, pages;
2806                 struct sw_pg *rx_pg;
2807                 u16 pg_cons = rxr->rx_pg_cons;
2808                 u16 pg_prod = rxr->rx_pg_prod;
2809
2810                 frag_size = len + 4 - hdr_len;
2811                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2812                 skb_put(skb, hdr_len);
2813
2814                 for (i = 0; i < pages; i++) {
2815                         dma_addr_t mapping_old;
2816
2817                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2818                         if (unlikely(frag_len <= 4)) {
2819                                 unsigned int tail = 4 - frag_len;
2820
2821                                 rxr->rx_pg_cons = pg_cons;
2822                                 rxr->rx_pg_prod = pg_prod;
2823                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
2824                                                         pages - i);
2825                                 skb->len -= tail;
2826                                 if (i == 0) {
2827                                         skb->tail -= tail;
2828                                 } else {
2829                                         skb_frag_t *frag =
2830                                                 &skb_shinfo(skb)->frags[i - 1];
2831                                         frag->size -= tail;
2832                                         skb->data_len -= tail;
2833                                         skb->truesize -= tail;
2834                                 }
2835                                 return 0;
2836                         }
2837                         rx_pg = &rxr->rx_pg_ring[pg_cons];
2838
2839                         /* Don't unmap yet.  If we're unable to allocate a new
2840                          * page, we need to recycle the page and the DMA addr.
2841                          */
2842                         mapping_old = pci_unmap_addr(rx_pg, mapping);
2843                         if (i == pages - 1)
2844                                 frag_len -= 4;
2845
2846                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2847                         rx_pg->page = NULL;
2848
2849                         err = bnx2_alloc_rx_page(bp, rxr,
2850                                                  RX_PG_RING_IDX(pg_prod));
2851                         if (unlikely(err)) {
2852                                 rxr->rx_pg_cons = pg_cons;
2853                                 rxr->rx_pg_prod = pg_prod;
2854                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
2855                                                         pages - i);
2856                                 return err;
2857                         }
2858
2859                         pci_unmap_page(bp->pdev, mapping_old,
2860                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2861
2862                         frag_size -= frag_len;
2863                         skb->data_len += frag_len;
2864                         skb->truesize += frag_len;
2865                         skb->len += frag_len;
2866
2867                         pg_prod = NEXT_RX_BD(pg_prod);
2868                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2869                 }
2870                 rxr->rx_pg_prod = pg_prod;
2871                 rxr->rx_pg_cons = pg_cons;
2872         }
2873         return 0;
2874 }
2875
2876 static inline u16
2877 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2878 {
2879         u16 cons;
2880
2881         /* Tell compiler that status block fields can change. */
2882         barrier();
2883         cons = *bnapi->hw_rx_cons_ptr;
2884         barrier();
2885         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2886                 cons++;
2887         return cons;
2888 }
2889
2890 static int
2891 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2892 {
2893         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
2894         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2895         struct l2_fhdr *rx_hdr;
2896         int rx_pkt = 0, pg_ring_used = 0;
2897
2898         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2899         sw_cons = rxr->rx_cons;
2900         sw_prod = rxr->rx_prod;
2901
2902         /* Memory barrier necessary as speculative reads of the rx
2903          * buffer can be ahead of the index in the status block
2904          */
2905         rmb();
2906         while (sw_cons != hw_cons) {
2907                 unsigned int len, hdr_len;
2908                 u32 status;
2909                 struct sw_bd *rx_buf;
2910                 struct sk_buff *skb;
2911                 dma_addr_t dma_addr;
2912                 u16 vtag = 0;
2913                 int hw_vlan __maybe_unused = 0;
2914
2915                 sw_ring_cons = RX_RING_IDX(sw_cons);
2916                 sw_ring_prod = RX_RING_IDX(sw_prod);
2917
2918                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
2919                 skb = rx_buf->skb;
2920
2921                 rx_buf->skb = NULL;
2922
2923                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2924
2925                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2926                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
2927                         PCI_DMA_FROMDEVICE);
2928
2929                 rx_hdr = (struct l2_fhdr *) skb->data;
2930                 len = rx_hdr->l2_fhdr_pkt_len;
2931                 status = rx_hdr->l2_fhdr_status;
2932
2933                 hdr_len = 0;
2934                 if (status & L2_FHDR_STATUS_SPLIT) {
2935                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2936                         pg_ring_used = 1;
2937                 } else if (len > bp->rx_jumbo_thresh) {
2938                         hdr_len = bp->rx_jumbo_thresh;
2939                         pg_ring_used = 1;
2940                 }
2941
2942                 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
2943                                        L2_FHDR_ERRORS_PHY_DECODE |
2944                                        L2_FHDR_ERRORS_ALIGNMENT |
2945                                        L2_FHDR_ERRORS_TOO_SHORT |
2946                                        L2_FHDR_ERRORS_GIANT_FRAME))) {
2947
2948                         bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2949                                           sw_ring_prod);
2950                         if (pg_ring_used) {
2951                                 int pages;
2952
2953                                 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
2954
2955                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2956                         }
2957                         goto next_rx;
2958                 }
2959
2960                 len -= 4;
2961
2962                 if (len <= bp->rx_copy_thresh) {
2963                         struct sk_buff *new_skb;
2964
2965                         new_skb = netdev_alloc_skb(bp->dev, len + 6);
2966                         if (new_skb == NULL) {
2967                                 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2968                                                   sw_ring_prod);
2969                                 goto next_rx;
2970                         }
2971
2972                         /* aligned copy */
2973                         skb_copy_from_linear_data_offset(skb,
2974                                                          BNX2_RX_OFFSET - 6,
2975                                       new_skb->data, len + 6);
2976                         skb_reserve(new_skb, 6);
2977                         skb_put(new_skb, len);
2978
2979                         bnx2_reuse_rx_skb(bp, rxr, skb,
2980                                 sw_ring_cons, sw_ring_prod);
2981
2982                         skb = new_skb;
2983                 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
2984                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2985                         goto next_rx;
2986
2987                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
2988                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
2989                         vtag = rx_hdr->l2_fhdr_vlan_tag;
2990 #ifdef BCM_VLAN
2991                         if (bp->vlgrp)
2992                                 hw_vlan = 1;
2993                         else
2994 #endif
2995                         {
2996                                 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
2997                                         __skb_push(skb, 4);
2998
2999                                 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3000                                 ve->h_vlan_proto = htons(ETH_P_8021Q);
3001                                 ve->h_vlan_TCI = htons(vtag);
3002                                 len += 4;
3003                         }
3004                 }
3005
3006                 skb->protocol = eth_type_trans(skb, bp->dev);
3007
3008                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3009                         (ntohs(skb->protocol) != 0x8100)) {
3010
3011                         dev_kfree_skb(skb);
3012                         goto next_rx;
3013
3014                 }
3015
3016                 skb->ip_summed = CHECKSUM_NONE;
3017                 if (bp->rx_csum &&
3018                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3019                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
3020
3021                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3022                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3023                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3024                 }
3025
3026                 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3027
3028 #ifdef BCM_VLAN
3029                 if (hw_vlan)
3030                         vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
3031                 else
3032 #endif
3033                         netif_receive_skb(skb);
3034
3035                 rx_pkt++;
3036
3037 next_rx:
3038                 sw_cons = NEXT_RX_BD(sw_cons);
3039                 sw_prod = NEXT_RX_BD(sw_prod);
3040
3041                 if ((rx_pkt == budget))
3042                         break;
3043
3044                 /* Refresh hw_cons to see if there is new work */
3045                 if (sw_cons == hw_cons) {
3046                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3047                         rmb();
3048                 }
3049         }
3050         rxr->rx_cons = sw_cons;
3051         rxr->rx_prod = sw_prod;
3052
3053         if (pg_ring_used)
3054                 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3055
3056         REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3057
3058         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3059
3060         mmiowb();
3061
3062         return rx_pkt;
3063
3064 }
3065
3066 /* MSI ISR - The only difference between this and the INTx ISR
3067  * is that the MSI interrupt is always serviced.
3068  */
3069 static irqreturn_t
3070 bnx2_msi(int irq, void *dev_instance)
3071 {
3072         struct bnx2_napi *bnapi = dev_instance;
3073         struct bnx2 *bp = bnapi->bp;
3074
3075         prefetch(bnapi->status_blk.msi);
3076         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3077                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3078                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3079
3080         /* Return here if interrupt is disabled. */
3081         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3082                 return IRQ_HANDLED;
3083
3084         napi_schedule(&bnapi->napi);
3085
3086         return IRQ_HANDLED;
3087 }
3088
3089 static irqreturn_t
3090 bnx2_msi_1shot(int irq, void *dev_instance)
3091 {
3092         struct bnx2_napi *bnapi = dev_instance;
3093         struct bnx2 *bp = bnapi->bp;
3094
3095         prefetch(bnapi->status_blk.msi);
3096
3097         /* Return here if interrupt is disabled. */
3098         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3099                 return IRQ_HANDLED;
3100
3101         napi_schedule(&bnapi->napi);
3102
3103         return IRQ_HANDLED;
3104 }
3105
3106 static irqreturn_t
3107 bnx2_interrupt(int irq, void *dev_instance)
3108 {
3109         struct bnx2_napi *bnapi = dev_instance;
3110         struct bnx2 *bp = bnapi->bp;
3111         struct status_block *sblk = bnapi->status_blk.msi;
3112
3113         /* When using INTx, it is possible for the interrupt to arrive
3114          * at the CPU before the status block posted prior to the
3115          * interrupt. Reading a register will flush the status block.
3116          * When using MSI, the MSI message will always complete after
3117          * the status block write.
3118          */
3119         if ((sblk->status_idx == bnapi->last_status_idx) &&
3120             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3121              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3122                 return IRQ_NONE;
3123
3124         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3125                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3126                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3127
3128         /* Read back to deassert IRQ immediately to avoid too many
3129          * spurious interrupts.
3130          */
3131         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3132
3133         /* Return here if interrupt is shared and is disabled. */
3134         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3135                 return IRQ_HANDLED;
3136
3137         if (napi_schedule_prep(&bnapi->napi)) {
3138                 bnapi->last_status_idx = sblk->status_idx;
3139                 __napi_schedule(&bnapi->napi);
3140         }
3141
3142         return IRQ_HANDLED;
3143 }
3144
3145 static inline int
3146 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3147 {
3148         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3149         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3150
3151         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3152             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3153                 return 1;
3154         return 0;
3155 }
3156
3157 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3158                                  STATUS_ATTN_BITS_TIMER_ABORT)
3159
3160 static inline int
3161 bnx2_has_work(struct bnx2_napi *bnapi)
3162 {
3163         struct status_block *sblk = bnapi->status_blk.msi;
3164
3165         if (bnx2_has_fast_work(bnapi))
3166                 return 1;
3167
3168         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3169             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3170                 return 1;
3171
3172         return 0;
3173 }
3174
3175 static void
3176 bnx2_chk_missed_msi(struct bnx2 *bp)
3177 {
3178         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3179         u32 msi_ctrl;
3180
3181         if (bnx2_has_work(bnapi)) {
3182                 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3183                 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3184                         return;
3185
3186                 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3187                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3188                                ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3189                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3190                         bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3191                 }
3192         }
3193
3194         bp->idle_chk_status_idx = bnapi->last_status_idx;
3195 }
3196
3197 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3198 {
3199         struct status_block *sblk = bnapi->status_blk.msi;
3200         u32 status_attn_bits = sblk->status_attn_bits;
3201         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3202
3203         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3204             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3205
3206                 bnx2_phy_int(bp, bnapi);
3207
3208                 /* This is needed to take care of transient status
3209                  * during link changes.
3210                  */
3211                 REG_WR(bp, BNX2_HC_COMMAND,
3212                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3213                 REG_RD(bp, BNX2_HC_COMMAND);
3214         }
3215 }
3216
3217 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3218                           int work_done, int budget)
3219 {
3220         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3221         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3222
3223         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3224                 bnx2_tx_int(bp, bnapi, 0);
3225
3226         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3227                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3228
3229         return work_done;
3230 }
3231
3232 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3233 {
3234         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3235         struct bnx2 *bp = bnapi->bp;
3236         int work_done = 0;
3237         struct status_block_msix *sblk = bnapi->status_blk.msix;
3238
3239         while (1) {
3240                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3241                 if (unlikely(work_done >= budget))
3242                         break;
3243
3244                 bnapi->last_status_idx = sblk->status_idx;
3245                 /* status idx must be read before checking for more work. */
3246                 rmb();
3247                 if (likely(!bnx2_has_fast_work(bnapi))) {
3248
3249                         napi_complete(napi);
3250                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3251                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3252                                bnapi->last_status_idx);
3253                         break;
3254                 }
3255         }
3256         return work_done;
3257 }
3258
3259 static int bnx2_poll(struct napi_struct *napi, int budget)
3260 {
3261         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3262         struct bnx2 *bp = bnapi->bp;
3263         int work_done = 0;
3264         struct status_block *sblk = bnapi->status_blk.msi;
3265
3266         while (1) {
3267                 bnx2_poll_link(bp, bnapi);
3268
3269                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3270
3271                 /* bnapi->last_status_idx is used below to tell the hw how
3272                  * much work has been processed, so we must read it before
3273                  * checking for more work.
3274                  */
3275                 bnapi->last_status_idx = sblk->status_idx;
3276
3277                 if (unlikely(work_done >= budget))
3278                         break;
3279
3280                 rmb();
3281                 if (likely(!bnx2_has_work(bnapi))) {
3282                         napi_complete(napi);
3283                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3284                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3285                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3286                                        bnapi->last_status_idx);
3287                                 break;
3288                         }
3289                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3290                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3291                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3292                                bnapi->last_status_idx);
3293
3294                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3295                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3296                                bnapi->last_status_idx);
3297                         break;
3298                 }
3299         }
3300
3301         return work_done;
3302 }
3303
3304 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3305  * from set_multicast.
3306  */
3307 static void
3308 bnx2_set_rx_mode(struct net_device *dev)
3309 {
3310         struct bnx2 *bp = netdev_priv(dev);
3311         u32 rx_mode, sort_mode;
3312         struct netdev_hw_addr *ha;
3313         int i;
3314
3315         if (!netif_running(dev))
3316                 return;
3317
3318         spin_lock_bh(&bp->phy_lock);
3319
3320         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3321                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3322         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3323 #ifdef BCM_VLAN
3324         if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3325                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3326 #else
3327         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3328                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3329 #endif
3330         if (dev->flags & IFF_PROMISC) {
3331                 /* Promiscuous mode. */
3332                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3333                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3334                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3335         }
3336         else if (dev->flags & IFF_ALLMULTI) {
3337                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3338                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3339                                0xffffffff);
3340                 }
3341                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3342         }
3343         else {
3344                 /* Accept one or more multicast(s). */
3345                 struct dev_mc_list *mclist;
3346                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3347                 u32 regidx;
3348                 u32 bit;
3349                 u32 crc;
3350
3351                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3352
3353                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3354                      i++, mclist = mclist->next) {
3355
3356                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3357                         bit = crc & 0xff;
3358                         regidx = (bit & 0xe0) >> 5;
3359                         bit &= 0x1f;
3360                         mc_filter[regidx] |= (1 << bit);
3361                 }
3362
3363                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3364                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3365                                mc_filter[i]);
3366                 }
3367
3368                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3369         }
3370
3371         if (dev->uc_count > BNX2_MAX_UNICAST_ADDRESSES) {
3372                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3373                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3374                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3375         } else if (!(dev->flags & IFF_PROMISC)) {
3376                 /* Add all entries into to the match filter list */
3377                 i = 0;
3378                 list_for_each_entry(ha, &dev->uc_list, list) {
3379                         bnx2_set_mac_addr(bp, ha->addr,
3380                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3381                         sort_mode |= (1 <<
3382                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3383                         i++;
3384                 }
3385
3386         }
3387
3388         if (rx_mode != bp->rx_mode) {
3389                 bp->rx_mode = rx_mode;
3390                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3391         }
3392
3393         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3394         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3395         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3396
3397         spin_unlock_bh(&bp->phy_lock);
3398 }
3399
3400 static int __devinit
3401 check_fw_section(const struct firmware *fw,
3402                  const struct bnx2_fw_file_section *section,
3403                  u32 alignment, bool non_empty)
3404 {
3405         u32 offset = be32_to_cpu(section->offset);
3406         u32 len = be32_to_cpu(section->len);
3407
3408         if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3409                 return -EINVAL;
3410         if ((non_empty && len == 0) || len > fw->size - offset ||
3411             len & (alignment - 1))
3412                 return -EINVAL;
3413         return 0;
3414 }
3415
3416 static int __devinit
3417 check_mips_fw_entry(const struct firmware *fw,
3418                     const struct bnx2_mips_fw_file_entry *entry)
3419 {
3420         if (check_fw_section(fw, &entry->text, 4, true) ||
3421             check_fw_section(fw, &entry->data, 4, false) ||
3422             check_fw_section(fw, &entry->rodata, 4, false))
3423                 return -EINVAL;
3424         return 0;
3425 }
3426
3427 static int __devinit
3428 bnx2_request_firmware(struct bnx2 *bp)
3429 {
3430         const char *mips_fw_file, *rv2p_fw_file;
3431         const struct bnx2_mips_fw_file *mips_fw;
3432         const struct bnx2_rv2p_fw_file *rv2p_fw;
3433         int rc;
3434
3435         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3436                 mips_fw_file = FW_MIPS_FILE_09;
3437                 rv2p_fw_file = FW_RV2P_FILE_09;
3438         } else {
3439                 mips_fw_file = FW_MIPS_FILE_06;
3440                 rv2p_fw_file = FW_RV2P_FILE_06;
3441         }
3442
3443         rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3444         if (rc) {
3445                 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3446                        mips_fw_file);
3447                 return rc;
3448         }
3449
3450         rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3451         if (rc) {
3452                 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3453                        rv2p_fw_file);
3454                 return rc;
3455         }
3456         mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3457         rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3458         if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3459             check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3460             check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3461             check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3462             check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3463             check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3464                 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3465                        mips_fw_file);
3466                 return -EINVAL;
3467         }
3468         if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3469             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3470             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3471                 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3472                        rv2p_fw_file);
3473                 return -EINVAL;
3474         }
3475
3476         return 0;
3477 }
3478
3479 static u32
3480 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3481 {
3482         switch (idx) {
3483         case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3484                 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3485                 rv2p_code |= RV2P_BD_PAGE_SIZE;
3486                 break;
3487         }
3488         return rv2p_code;
3489 }
3490
3491 static int
3492 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3493              const struct bnx2_rv2p_fw_file_entry *fw_entry)
3494 {
3495         u32 rv2p_code_len, file_offset;
3496         __be32 *rv2p_code;
3497         int i;
3498         u32 val, cmd, addr;
3499
3500         rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3501         file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3502
3503         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3504
3505         if (rv2p_proc == RV2P_PROC1) {
3506                 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3507                 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3508         } else {
3509                 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3510                 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3511         }
3512
3513         for (i = 0; i < rv2p_code_len; i += 8) {
3514                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3515                 rv2p_code++;
3516                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3517                 rv2p_code++;
3518
3519                 val = (i / 8) | cmd;
3520                 REG_WR(bp, addr, val);
3521         }
3522
3523         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3524         for (i = 0; i < 8; i++) {
3525                 u32 loc, code;
3526
3527                 loc = be32_to_cpu(fw_entry->fixup[i]);
3528                 if (loc && ((loc * 4) < rv2p_code_len)) {
3529                         code = be32_to_cpu(*(rv2p_code + loc - 1));
3530                         REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3531                         code = be32_to_cpu(*(rv2p_code + loc));
3532                         code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3533                         REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3534
3535                         val = (loc / 2) | cmd;
3536                         REG_WR(bp, addr, val);
3537                 }
3538         }
3539
3540         /* Reset the processor, un-stall is done later. */
3541         if (rv2p_proc == RV2P_PROC1) {
3542                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3543         }
3544         else {
3545                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3546         }
3547
3548         return 0;
3549 }
3550
3551 static int
3552 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3553             const struct bnx2_mips_fw_file_entry *fw_entry)
3554 {
3555         u32 addr, len, file_offset;
3556         __be32 *data;
3557         u32 offset;
3558         u32 val;
3559
3560         /* Halt the CPU. */
3561         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3562         val |= cpu_reg->mode_value_halt;
3563         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3564         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3565
3566         /* Load the Text area. */
3567         addr = be32_to_cpu(fw_entry->text.addr);
3568         len = be32_to_cpu(fw_entry->text.len);
3569         file_offset = be32_to_cpu(fw_entry->text.offset);
3570         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3571
3572         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3573         if (len) {
3574                 int j;
3575
3576                 for (j = 0; j < (len / 4); j++, offset += 4)
3577                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3578         }
3579
3580         /* Load the Data area. */
3581         addr = be32_to_cpu(fw_entry->data.addr);
3582         len = be32_to_cpu(fw_entry->data.len);
3583         file_offset = be32_to_cpu(fw_entry->data.offset);
3584         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3585
3586         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3587         if (len) {
3588                 int j;
3589
3590                 for (j = 0; j < (len / 4); j++, offset += 4)
3591                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3592         }
3593
3594         /* Load the Read-Only area. */
3595         addr = be32_to_cpu(fw_entry->rodata.addr);
3596         len = be32_to_cpu(fw_entry->rodata.len);
3597         file_offset = be32_to_cpu(fw_entry->rodata.offset);
3598         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3599
3600         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3601         if (len) {
3602                 int j;
3603
3604                 for (j = 0; j < (len / 4); j++, offset += 4)
3605                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3606         }
3607
3608         /* Clear the pre-fetch instruction. */
3609         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3610
3611         val = be32_to_cpu(fw_entry->start_addr);
3612         bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3613
3614         /* Start the CPU. */
3615         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3616         val &= ~cpu_reg->mode_value_halt;
3617         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3618         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3619
3620         return 0;
3621 }
3622
3623 static int
3624 bnx2_init_cpus(struct bnx2 *bp)
3625 {
3626         const struct bnx2_mips_fw_file *mips_fw =
3627                 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3628         const struct bnx2_rv2p_fw_file *rv2p_fw =
3629                 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3630         int rc;
3631
3632         /* Initialize the RV2P processor. */
3633         load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3634         load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3635
3636         /* Initialize the RX Processor. */
3637         rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3638         if (rc)
3639                 goto init_cpu_err;
3640
3641         /* Initialize the TX Processor. */
3642         rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3643         if (rc)
3644                 goto init_cpu_err;
3645
3646         /* Initialize the TX Patch-up Processor. */
3647         rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3648         if (rc)
3649                 goto init_cpu_err;
3650
3651         /* Initialize the Completion Processor. */
3652         rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3653         if (rc)
3654                 goto init_cpu_err;
3655
3656         /* Initialize the Command Processor. */
3657         rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3658
3659 init_cpu_err:
3660         return rc;
3661 }
3662
3663 static int
3664 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3665 {
3666         u16 pmcsr;
3667
3668         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3669
3670         switch (state) {
3671         case PCI_D0: {
3672                 u32 val;
3673
3674                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3675                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3676                         PCI_PM_CTRL_PME_STATUS);
3677
3678                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3679                         /* delay required during transition out of D3hot */
3680                         msleep(20);
3681
3682                 val = REG_RD(bp, BNX2_EMAC_MODE);
3683                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3684                 val &= ~BNX2_EMAC_MODE_MPKT;
3685                 REG_WR(bp, BNX2_EMAC_MODE, val);
3686
3687                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3688                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3689                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3690                 break;
3691         }
3692         case PCI_D3hot: {
3693                 int i;
3694                 u32 val, wol_msg;
3695
3696                 if (bp->wol) {
3697                         u32 advertising;
3698                         u8 autoneg;
3699
3700                         autoneg = bp->autoneg;
3701                         advertising = bp->advertising;
3702
3703                         if (bp->phy_port == PORT_TP) {
3704                                 bp->autoneg = AUTONEG_SPEED;
3705                                 bp->advertising = ADVERTISED_10baseT_Half |
3706                                         ADVERTISED_10baseT_Full |
3707                                         ADVERTISED_100baseT_Half |
3708                                         ADVERTISED_100baseT_Full |
3709                                         ADVERTISED_Autoneg;
3710                         }
3711
3712                         spin_lock_bh(&bp->phy_lock);
3713                         bnx2_setup_phy(bp, bp->phy_port);
3714                         spin_unlock_bh(&bp->phy_lock);
3715
3716                         bp->autoneg = autoneg;
3717                         bp->advertising = advertising;
3718
3719                         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3720
3721                         val = REG_RD(bp, BNX2_EMAC_MODE);
3722
3723                         /* Enable port mode. */
3724                         val &= ~BNX2_EMAC_MODE_PORT;
3725                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3726                                BNX2_EMAC_MODE_ACPI_RCVD |
3727                                BNX2_EMAC_MODE_MPKT;
3728                         if (bp->phy_port == PORT_TP)
3729                                 val |= BNX2_EMAC_MODE_PORT_MII;
3730                         else {
3731                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3732                                 if (bp->line_speed == SPEED_2500)
3733                                         val |= BNX2_EMAC_MODE_25G_MODE;
3734                         }
3735
3736                         REG_WR(bp, BNX2_EMAC_MODE, val);
3737
3738                         /* receive all multicast */
3739                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3740                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3741                                        0xffffffff);
3742                         }
3743                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3744                                BNX2_EMAC_RX_MODE_SORT_MODE);
3745
3746                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3747                               BNX2_RPM_SORT_USER0_MC_EN;
3748                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3749                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3750                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3751                                BNX2_RPM_SORT_USER0_ENA);
3752
3753                         /* Need to enable EMAC and RPM for WOL. */
3754                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3755                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3756                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3757                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3758
3759                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3760                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3761                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3762
3763                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3764                 }
3765                 else {
3766                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3767                 }
3768
3769                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3770                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3771                                      1, 0);
3772
3773                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3774                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3775                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3776
3777                         if (bp->wol)
3778                                 pmcsr |= 3;
3779                 }
3780                 else {
3781                         pmcsr |= 3;
3782                 }
3783                 if (bp->wol) {
3784                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3785                 }
3786                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3787                                       pmcsr);
3788
3789                 /* No more memory access after this point until
3790                  * device is brought back to D0.
3791                  */
3792                 udelay(50);
3793                 break;
3794         }
3795         default:
3796                 return -EINVAL;
3797         }
3798         return 0;
3799 }
3800
3801 static int
3802 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3803 {
3804         u32 val;
3805         int j;
3806
3807         /* Request access to the flash interface. */
3808         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3809         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3810                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3811                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3812                         break;
3813
3814                 udelay(5);
3815         }
3816
3817         if (j >= NVRAM_TIMEOUT_COUNT)
3818                 return -EBUSY;
3819
3820         return 0;
3821 }
3822
3823 static int
3824 bnx2_release_nvram_lock(struct bnx2 *bp)
3825 {
3826         int j;
3827         u32 val;
3828
3829         /* Relinquish nvram interface. */
3830         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3831
3832         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3833                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3834                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3835                         break;
3836
3837                 udelay(5);
3838         }
3839
3840         if (j >= NVRAM_TIMEOUT_COUNT)
3841                 return -EBUSY;
3842
3843         return 0;
3844 }
3845
3846
3847 static int
3848 bnx2_enable_nvram_write(struct bnx2 *bp)
3849 {
3850         u32 val;
3851
3852         val = REG_RD(bp, BNX2_MISC_CFG);
3853         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3854
3855         if (bp->flash_info->flags & BNX2_NV_WREN) {
3856                 int j;
3857
3858                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3859                 REG_WR(bp, BNX2_NVM_COMMAND,
3860                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3861
3862                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3863                         udelay(5);
3864
3865                         val = REG_RD(bp, BNX2_NVM_COMMAND);
3866                         if (val & BNX2_NVM_COMMAND_DONE)
3867                                 break;
3868                 }
3869
3870                 if (j >= NVRAM_TIMEOUT_COUNT)
3871                         return -EBUSY;
3872         }
3873         return 0;
3874 }
3875
3876 static void
3877 bnx2_disable_nvram_write(struct bnx2 *bp)
3878 {
3879         u32 val;
3880
3881         val = REG_RD(bp, BNX2_MISC_CFG);
3882         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3883 }
3884
3885
3886 static void
3887 bnx2_enable_nvram_access(struct bnx2 *bp)
3888 {
3889         u32 val;
3890
3891         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3892         /* Enable both bits, even on read. */
3893         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3894                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3895 }
3896
3897 static void
3898 bnx2_disable_nvram_access(struct bnx2 *bp)
3899 {
3900         u32 val;
3901
3902         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3903         /* Disable both bits, even after read. */
3904         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3905                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3906                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3907 }
3908
3909 static int
3910 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3911 {
3912         u32 cmd;
3913         int j;
3914
3915         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3916                 /* Buffered flash, no erase needed */
3917                 return 0;
3918
3919         /* Build an erase command */
3920         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3921               BNX2_NVM_COMMAND_DOIT;
3922
3923         /* Need to clear DONE bit separately. */
3924         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3925
3926         /* Address of the NVRAM to read from. */
3927         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3928
3929         /* Issue an erase command. */
3930         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3931
3932         /* Wait for completion. */
3933         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3934                 u32 val;
3935
3936                 udelay(5);
3937
3938                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3939                 if (val & BNX2_NVM_COMMAND_DONE)
3940                         break;
3941         }
3942
3943         if (j >= NVRAM_TIMEOUT_COUNT)
3944                 return -EBUSY;
3945
3946         return 0;
3947 }
3948
3949 static int
3950 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3951 {
3952         u32 cmd;
3953         int j;
3954
3955         /* Build the command word. */
3956         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3957
3958         /* Calculate an offset of a buffered flash, not needed for 5709. */
3959         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3960                 offset = ((offset / bp->flash_info->page_size) <<
3961                            bp->flash_info->page_bits) +
3962                           (offset % bp->flash_info->page_size);
3963         }
3964
3965         /* Need to clear DONE bit separately. */
3966         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3967
3968         /* Address of the NVRAM to read from. */
3969         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3970
3971         /* Issue a read command. */
3972         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3973
3974         /* Wait for completion. */
3975         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3976                 u32 val;
3977
3978                 udelay(5);
3979
3980                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3981                 if (val & BNX2_NVM_COMMAND_DONE) {
3982                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
3983                         memcpy(ret_val, &v, 4);
3984                         break;
3985                 }
3986         }
3987         if (j >= NVRAM_TIMEOUT_COUNT)
3988                 return -EBUSY;
3989
3990         return 0;
3991 }
3992
3993
3994 static int
3995 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3996 {
3997         u32 cmd;
3998         __be32 val32;
3999         int j;
4000
4001         /* Build the command word. */
4002         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4003
4004         /* Calculate an offset of a buffered flash, not needed for 5709. */
4005         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4006                 offset = ((offset / bp->flash_info->page_size) <<
4007                           bp->flash_info->page_bits) +
4008                          (offset % bp->flash_info->page_size);
4009         }
4010
4011         /* Need to clear DONE bit separately. */
4012         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4013
4014         memcpy(&val32, val, 4);
4015
4016         /* Write the data. */
4017         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4018
4019         /* Address of the NVRAM to write to. */
4020         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4021
4022         /* Issue the write command. */
4023         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4024
4025         /* Wait for completion. */
4026         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4027                 udelay(5);
4028
4029                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4030                         break;
4031         }
4032         if (j >= NVRAM_TIMEOUT_COUNT)
4033                 return -EBUSY;
4034
4035         return 0;
4036 }
4037
4038 static int
4039 bnx2_init_nvram(struct bnx2 *bp)
4040 {
4041         u32 val;
4042         int j, entry_count, rc = 0;
4043         struct flash_spec *flash;
4044
4045         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4046                 bp->flash_info = &flash_5709;
4047                 goto get_flash_size;
4048         }
4049
4050         /* Determine the selected interface. */
4051         val = REG_RD(bp, BNX2_NVM_CFG1);
4052
4053         entry_count = ARRAY_SIZE(flash_table);
4054
4055         if (val & 0x40000000) {
4056
4057                 /* Flash interface has been reconfigured */
4058                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4059                      j++, flash++) {
4060                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
4061                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4062                                 bp->flash_info = flash;
4063                                 break;
4064                         }
4065                 }
4066         }
4067         else {
4068                 u32 mask;
4069                 /* Not yet been reconfigured */
4070
4071                 if (val & (1 << 23))
4072                         mask = FLASH_BACKUP_STRAP_MASK;
4073                 else
4074                         mask = FLASH_STRAP_MASK;
4075
4076                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4077                         j++, flash++) {
4078
4079                         if ((val & mask) == (flash->strapping & mask)) {
4080                                 bp->flash_info = flash;
4081
4082                                 /* Request access to the flash interface. */
4083                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4084                                         return rc;
4085
4086                                 /* Enable access to flash interface */
4087                                 bnx2_enable_nvram_access(bp);
4088
4089                                 /* Reconfigure the flash interface */
4090                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4091                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4092                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4093                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4094
4095                                 /* Disable access to flash interface */
4096                                 bnx2_disable_nvram_access(bp);
4097                                 bnx2_release_nvram_lock(bp);
4098
4099                                 break;
4100                         }
4101                 }
4102         } /* if (val & 0x40000000) */
4103
4104         if (j == entry_count) {
4105                 bp->flash_info = NULL;
4106                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
4107                 return -ENODEV;
4108         }
4109
4110 get_flash_size:
4111         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4112         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4113         if (val)
4114                 bp->flash_size = val;
4115         else
4116                 bp->flash_size = bp->flash_info->total_size;
4117
4118         return rc;
4119 }
4120
4121 static int
4122 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4123                 int buf_size)
4124 {
4125         int rc = 0;
4126         u32 cmd_flags, offset32, len32, extra;
4127
4128         if (buf_size == 0)
4129                 return 0;
4130
4131         /* Request access to the flash interface. */
4132         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4133                 return rc;
4134
4135         /* Enable access to flash interface */
4136         bnx2_enable_nvram_access(bp);
4137
4138         len32 = buf_size;
4139         offset32 = offset;
4140         extra = 0;
4141
4142         cmd_flags = 0;
4143
4144         if (offset32 & 3) {
4145                 u8 buf[4];
4146                 u32 pre_len;
4147
4148                 offset32 &= ~3;
4149                 pre_len = 4 - (offset & 3);
4150
4151                 if (pre_len >= len32) {
4152                         pre_len = len32;
4153                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4154                                     BNX2_NVM_COMMAND_LAST;
4155                 }
4156                 else {
4157                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4158                 }
4159
4160                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4161
4162                 if (rc)
4163                         return rc;
4164
4165                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4166
4167                 offset32 += 4;
4168                 ret_buf += pre_len;
4169                 len32 -= pre_len;
4170         }
4171         if (len32 & 3) {
4172                 extra = 4 - (len32 & 3);
4173                 len32 = (len32 + 4) & ~3;
4174         }
4175
4176         if (len32 == 4) {
4177                 u8 buf[4];
4178
4179                 if (cmd_flags)
4180                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4181                 else
4182                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4183                                     BNX2_NVM_COMMAND_LAST;
4184
4185                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4186
4187                 memcpy(ret_buf, buf, 4 - extra);
4188         }
4189         else if (len32 > 0) {
4190                 u8 buf[4];
4191
4192                 /* Read the first word. */
4193                 if (cmd_flags)
4194                         cmd_flags = 0;
4195                 else
4196                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4197
4198                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4199
4200                 /* Advance to the next dword. */
4201                 offset32 += 4;
4202                 ret_buf += 4;
4203                 len32 -= 4;
4204
4205                 while (len32 > 4 && rc == 0) {
4206                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4207
4208                         /* Advance to the next dword. */
4209                         offset32 += 4;
4210                         ret_buf += 4;
4211                         len32 -= 4;
4212                 }
4213
4214                 if (rc)
4215                         return rc;
4216
4217                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4218                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4219
4220                 memcpy(ret_buf, buf, 4 - extra);
4221         }
4222
4223         /* Disable access to flash interface */
4224         bnx2_disable_nvram_access(bp);
4225
4226         bnx2_release_nvram_lock(bp);
4227
4228         return rc;
4229 }
4230
4231 static int
4232 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4233                 int buf_size)
4234 {
4235         u32 written, offset32, len32;
4236         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4237         int rc = 0;
4238         int align_start, align_end;
4239
4240         buf = data_buf;
4241         offset32 = offset;
4242         len32 = buf_size;
4243         align_start = align_end = 0;
4244
4245         if ((align_start = (offset32 & 3))) {
4246                 offset32 &= ~3;
4247                 len32 += align_start;
4248                 if (len32 < 4)
4249                         len32 = 4;
4250                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4251                         return rc;
4252         }
4253
4254         if (len32 & 3) {
4255                 align_end = 4 - (len32 & 3);
4256                 len32 += align_end;
4257                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4258                         return rc;
4259         }
4260
4261         if (align_start || align_end) {
4262                 align_buf = kmalloc(len32, GFP_KERNEL);
4263                 if (align_buf == NULL)
4264                         return -ENOMEM;
4265                 if (align_start) {
4266                         memcpy(align_buf, start, 4);
4267                 }
4268                 if (align_end) {
4269                         memcpy(align_buf + len32 - 4, end, 4);
4270                 }
4271                 memcpy(align_buf + align_start, data_buf, buf_size);
4272                 buf = align_buf;
4273         }
4274
4275         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4276                 flash_buffer = kmalloc(264, GFP_KERNEL);
4277                 if (flash_buffer == NULL) {
4278                         rc = -ENOMEM;
4279                         goto nvram_write_end;
4280                 }
4281         }
4282
4283         written = 0;
4284         while ((written < len32) && (rc == 0)) {
4285                 u32 page_start, page_end, data_start, data_end;
4286                 u32 addr, cmd_flags;
4287                 int i;
4288
4289                 /* Find the page_start addr */
4290                 page_start = offset32 + written;
4291                 page_start -= (page_start % bp->flash_info->page_size);
4292                 /* Find the page_end addr */
4293                 page_end = page_start + bp->flash_info->page_size;
4294                 /* Find the data_start addr */
4295                 data_start = (written == 0) ? offset32 : page_start;
4296                 /* Find the data_end addr */
4297                 data_end = (page_end > offset32 + len32) ?
4298                         (offset32 + len32) : page_end;
4299
4300                 /* Request access to the flash interface. */
4301                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4302                         goto nvram_write_end;
4303
4304                 /* Enable access to flash interface */
4305                 bnx2_enable_nvram_access(bp);
4306
4307                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4308                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4309                         int j;
4310
4311                         /* Read the whole page into the buffer
4312                          * (non-buffer flash only) */
4313                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4314                                 if (j == (bp->flash_info->page_size - 4)) {
4315                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4316                                 }
4317                                 rc = bnx2_nvram_read_dword(bp,
4318                                         page_start + j,
4319                                         &flash_buffer[j],
4320                                         cmd_flags);
4321
4322                                 if (rc)
4323                                         goto nvram_write_end;
4324
4325                                 cmd_flags = 0;
4326                         }
4327                 }
4328
4329                 /* Enable writes to flash interface (unlock write-protect) */
4330                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4331                         goto nvram_write_end;
4332
4333                 /* Loop to write back the buffer data from page_start to
4334                  * data_start */
4335                 i = 0;
4336                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4337                         /* Erase the page */
4338                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4339                                 goto nvram_write_end;
4340
4341                         /* Re-enable the write again for the actual write */
4342                         bnx2_enable_nvram_write(bp);
4343
4344                         for (addr = page_start; addr < data_start;
4345                                 addr += 4, i += 4) {
4346
4347                                 rc = bnx2_nvram_write_dword(bp, addr,
4348                                         &flash_buffer[i], cmd_flags);
4349
4350                                 if (rc != 0)
4351                                         goto nvram_write_end;
4352
4353                                 cmd_flags = 0;
4354                         }
4355                 }
4356
4357                 /* Loop to write the new data from data_start to data_end */
4358                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4359                         if ((addr == page_end - 4) ||
4360                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4361                                  (addr == data_end - 4))) {
4362
4363                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4364                         }
4365                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4366                                 cmd_flags);
4367
4368                         if (rc != 0)
4369                                 goto nvram_write_end;
4370
4371                         cmd_flags = 0;
4372                         buf += 4;
4373                 }
4374
4375                 /* Loop to write back the buffer data from data_end
4376                  * to page_end */
4377                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4378                         for (addr = data_end; addr < page_end;
4379                                 addr += 4, i += 4) {
4380
4381                                 if (addr == page_end-4) {
4382                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4383                                 }
4384                                 rc = bnx2_nvram_write_dword(bp, addr,
4385                                         &flash_buffer[i], cmd_flags);
4386
4387                                 if (rc != 0)
4388                                         goto nvram_write_end;
4389
4390                                 cmd_flags = 0;
4391                         }
4392                 }
4393
4394                 /* Disable writes to flash interface (lock write-protect) */
4395                 bnx2_disable_nvram_write(bp);
4396
4397                 /* Disable access to flash interface */
4398                 bnx2_disable_nvram_access(bp);
4399                 bnx2_release_nvram_lock(bp);
4400
4401                 /* Increment written */
4402                 written += data_end - data_start;
4403         }
4404
4405 nvram_write_end:
4406         kfree(flash_buffer);
4407         kfree(align_buf);
4408         return rc;
4409 }
4410
4411 static void
4412 bnx2_init_fw_cap(struct bnx2 *bp)
4413 {
4414         u32 val, sig = 0;
4415
4416         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4417         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4418
4419         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4420                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4421
4422         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4423         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4424                 return;
4425
4426         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4427                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4428                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4429         }
4430
4431         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4432             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4433                 u32 link;
4434
4435                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4436
4437                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4438                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4439                         bp->phy_port = PORT_FIBRE;
4440                 else
4441                         bp->phy_port = PORT_TP;
4442
4443                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4444                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4445         }
4446
4447         if (netif_running(bp->dev) && sig)
4448                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4449 }
4450
4451 static void
4452 bnx2_setup_msix_tbl(struct bnx2 *bp)
4453 {
4454         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4455
4456         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4457         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4458 }
4459
4460 static int
4461 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4462 {
4463         u32 val;
4464         int i, rc = 0;
4465         u8 old_port;
4466
4467         /* Wait for the current PCI transaction to complete before
4468          * issuing a reset. */
4469         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4470                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4471                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4472                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4473                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4474         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4475         udelay(5);
4476
4477         /* Wait for the firmware to tell us it is ok to issue a reset. */
4478         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4479
4480         /* Deposit a driver reset signature so the firmware knows that
4481          * this is a soft reset. */
4482         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4483                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4484
4485         /* Do a dummy read to force the chip to complete all current transaction
4486          * before we issue a reset. */
4487         val = REG_RD(bp, BNX2_MISC_ID);
4488
4489         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4490                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4491                 REG_RD(bp, BNX2_MISC_COMMAND);
4492                 udelay(5);
4493
4494                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4495                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4496
4497                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4498
4499         } else {
4500                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4501                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4502                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4503
4504                 /* Chip reset. */
4505                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4506
4507                 /* Reading back any register after chip reset will hang the
4508                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4509                  * of margin for write posting.
4510                  */
4511                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4512                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4513                         msleep(20);
4514
4515                 /* Reset takes approximate 30 usec */
4516                 for (i = 0; i < 10; i++) {
4517                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4518                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4519                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4520                                 break;
4521                         udelay(10);
4522                 }
4523
4524                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4525                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4526                         printk(KERN_ERR PFX "Chip reset did not complete\n");
4527                         return -EBUSY;
4528                 }
4529         }
4530
4531         /* Make sure byte swapping is properly configured. */
4532         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4533         if (val != 0x01020304) {
4534                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4535                 return -ENODEV;
4536         }
4537
4538         /* Wait for the firmware to finish its initialization. */
4539         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4540         if (rc)
4541                 return rc;
4542
4543         spin_lock_bh(&bp->phy_lock);
4544         old_port = bp->phy_port;
4545         bnx2_init_fw_cap(bp);
4546         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4547             old_port != bp->phy_port)
4548                 bnx2_set_default_remote_link(bp);
4549         spin_unlock_bh(&bp->phy_lock);
4550
4551         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4552                 /* Adjust the voltage regular to two steps lower.  The default
4553                  * of this register is 0x0000000e. */
4554                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4555
4556                 /* Remove bad rbuf memory from the free pool. */
4557                 rc = bnx2_alloc_bad_rbuf(bp);
4558         }
4559
4560         if (bp->flags & BNX2_FLAG_USING_MSIX)
4561                 bnx2_setup_msix_tbl(bp);
4562
4563         return rc;
4564 }
4565
4566 static int
4567 bnx2_init_chip(struct bnx2 *bp)
4568 {
4569         u32 val, mtu;
4570         int rc, i;
4571
4572         /* Make sure the interrupt is not active. */
4573         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4574
4575         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4576               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4577 #ifdef __BIG_ENDIAN
4578               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4579 #endif
4580               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4581               DMA_READ_CHANS << 12 |
4582               DMA_WRITE_CHANS << 16;
4583
4584         val |= (0x2 << 20) | (1 << 11);
4585
4586         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4587                 val |= (1 << 23);
4588
4589         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4590             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4591                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4592
4593         REG_WR(bp, BNX2_DMA_CONFIG, val);
4594
4595         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4596                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4597                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4598                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4599         }
4600
4601         if (bp->flags & BNX2_FLAG_PCIX) {
4602                 u16 val16;
4603
4604                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4605                                      &val16);
4606                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4607                                       val16 & ~PCI_X_CMD_ERO);
4608         }
4609
4610         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4611                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4612                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4613                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4614
4615         /* Initialize context mapping and zero out the quick contexts.  The
4616          * context block must have already been enabled. */
4617         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4618                 rc = bnx2_init_5709_context(bp);
4619                 if (rc)
4620                         return rc;
4621         } else
4622                 bnx2_init_context(bp);
4623
4624         if ((rc = bnx2_init_cpus(bp)) != 0)
4625                 return rc;
4626
4627         bnx2_init_nvram(bp);
4628
4629         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4630
4631         val = REG_RD(bp, BNX2_MQ_CONFIG);
4632         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4633         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4634         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4635                 val |= BNX2_MQ_CONFIG_HALT_DIS;
4636
4637         REG_WR(bp, BNX2_MQ_CONFIG, val);
4638
4639         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4640         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4641         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4642
4643         val = (BCM_PAGE_BITS - 8) << 24;
4644         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4645
4646         /* Configure page size. */
4647         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4648         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4649         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4650         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4651
4652         val = bp->mac_addr[0] +
4653               (bp->mac_addr[1] << 8) +
4654               (bp->mac_addr[2] << 16) +
4655               bp->mac_addr[3] +
4656               (bp->mac_addr[4] << 8) +
4657               (bp->mac_addr[5] << 16);
4658         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4659
4660         /* Program the MTU.  Also include 4 bytes for CRC32. */
4661         mtu = bp->dev->mtu;
4662         val = mtu + ETH_HLEN + ETH_FCS_LEN;
4663         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4664                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4665         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4666
4667         if (mtu < 1500)
4668                 mtu = 1500;
4669
4670         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4671         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4672         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4673
4674         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4675                 bp->bnx2_napi[i].last_status_idx = 0;
4676
4677         bp->idle_chk_status_idx = 0xffff;
4678
4679         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4680
4681         /* Set up how to generate a link change interrupt. */
4682         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4683
4684         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4685                (u64) bp->status_blk_mapping & 0xffffffff);
4686         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4687
4688         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4689                (u64) bp->stats_blk_mapping & 0xffffffff);
4690         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4691                (u64) bp->stats_blk_mapping >> 32);
4692
4693         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4694                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4695
4696         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4697                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4698
4699         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4700                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4701
4702         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4703
4704         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4705
4706         REG_WR(bp, BNX2_HC_COM_TICKS,
4707                (bp->com_ticks_int << 16) | bp->com_ticks);
4708
4709         REG_WR(bp, BNX2_HC_CMD_TICKS,
4710                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4711
4712         if (CHIP_NUM(bp) == CHIP_NUM_5708)
4713                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4714         else
4715                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4716         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4717
4718         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4719                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4720         else {
4721                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4722                       BNX2_HC_CONFIG_COLLECT_STATS;
4723         }
4724
4725         if (bp->irq_nvecs > 1) {
4726                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4727                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4728
4729                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4730         }
4731
4732         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4733                 val |= BNX2_HC_CONFIG_ONE_SHOT;
4734
4735         REG_WR(bp, BNX2_HC_CONFIG, val);
4736
4737         for (i = 1; i < bp->irq_nvecs; i++) {
4738                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4739                            BNX2_HC_SB_CONFIG_1;
4740
4741                 REG_WR(bp, base,
4742                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4743                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4744                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4745
4746                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4747                         (bp->tx_quick_cons_trip_int << 16) |
4748                          bp->tx_quick_cons_trip);
4749
4750                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4751                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4752
4753                 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4754                        (bp->rx_quick_cons_trip_int << 16) |
4755                         bp->rx_quick_cons_trip);
4756
4757                 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4758                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
4759         }
4760
4761         /* Clear internal stats counters. */
4762         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4763
4764         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4765
4766         /* Initialize the receive filter. */
4767         bnx2_set_rx_mode(bp->dev);
4768
4769         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4770                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4771                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4772                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4773         }
4774         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4775                           1, 0);
4776
4777         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4778         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4779
4780         udelay(20);
4781
4782         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4783
4784         return rc;
4785 }
4786
4787 static void
4788 bnx2_clear_ring_states(struct bnx2 *bp)
4789 {
4790         struct bnx2_napi *bnapi;
4791         struct bnx2_tx_ring_info *txr;
4792         struct bnx2_rx_ring_info *rxr;
4793         int i;
4794
4795         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4796                 bnapi = &bp->bnx2_napi[i];
4797                 txr = &bnapi->tx_ring;
4798                 rxr = &bnapi->rx_ring;
4799
4800                 txr->tx_cons = 0;
4801                 txr->hw_tx_cons = 0;
4802                 rxr->rx_prod_bseq = 0;
4803                 rxr->rx_prod = 0;
4804                 rxr->rx_cons = 0;
4805                 rxr->rx_pg_prod = 0;
4806                 rxr->rx_pg_cons = 0;
4807         }
4808 }
4809
4810 static void
4811 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
4812 {
4813         u32 val, offset0, offset1, offset2, offset3;
4814         u32 cid_addr = GET_CID_ADDR(cid);
4815
4816         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4817                 offset0 = BNX2_L2CTX_TYPE_XI;
4818                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4819                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4820                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4821         } else {
4822                 offset0 = BNX2_L2CTX_TYPE;
4823                 offset1 = BNX2_L2CTX_CMD_TYPE;
4824                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4825                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4826         }
4827         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4828         bnx2_ctx_wr(bp, cid_addr, offset0, val);
4829
4830         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4831         bnx2_ctx_wr(bp, cid_addr, offset1, val);
4832
4833         val = (u64) txr->tx_desc_mapping >> 32;
4834         bnx2_ctx_wr(bp, cid_addr, offset2, val);
4835
4836         val = (u64) txr->tx_desc_mapping & 0xffffffff;
4837         bnx2_ctx_wr(bp, cid_addr, offset3, val);
4838 }
4839
4840 static void
4841 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
4842 {
4843         struct tx_bd *txbd;
4844         u32 cid = TX_CID;
4845         struct bnx2_napi *bnapi;
4846         struct bnx2_tx_ring_info *txr;
4847
4848         bnapi = &bp->bnx2_napi[ring_num];
4849         txr = &bnapi->tx_ring;
4850
4851         if (ring_num == 0)
4852                 cid = TX_CID;
4853         else
4854                 cid = TX_TSS_CID + ring_num - 1;
4855
4856         bp->tx_wake_thresh = bp->tx_ring_size / 2;
4857
4858         txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
4859
4860         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
4861         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
4862
4863         txr->tx_prod = 0;
4864         txr->tx_prod_bseq = 0;
4865
4866         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4867         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4868
4869         bnx2_init_tx_context(bp, cid, txr);
4870 }
4871
4872 static void
4873 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4874                      int num_rings)
4875 {
4876         int i;
4877         struct rx_bd *rxbd;
4878
4879         for (i = 0; i < num_rings; i++) {
4880                 int j;
4881
4882                 rxbd = &rx_ring[i][0];
4883                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4884                         rxbd->rx_bd_len = buf_size;
4885                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4886                 }
4887                 if (i == (num_rings - 1))
4888                         j = 0;
4889                 else
4890                         j = i + 1;
4891                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4892                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4893         }
4894 }
4895
4896 static void
4897 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
4898 {
4899         int i;
4900         u16 prod, ring_prod;
4901         u32 cid, rx_cid_addr, val;
4902         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
4903         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4904
4905         if (ring_num == 0)
4906                 cid = RX_CID;
4907         else
4908                 cid = RX_RSS_CID + ring_num - 1;
4909
4910         rx_cid_addr = GET_CID_ADDR(cid);
4911
4912         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
4913                              bp->rx_buf_use_size, bp->rx_max_ring);
4914
4915         bnx2_init_rx_context(bp, cid);
4916
4917         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4918                 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
4919                 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
4920         }
4921
4922         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4923         if (bp->rx_pg_ring_size) {
4924                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
4925                                      rxr->rx_pg_desc_mapping,
4926                                      PAGE_SIZE, bp->rx_max_pg_ring);
4927                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4928                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4929                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4930                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
4931
4932                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
4933                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4934
4935                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
4936                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4937
4938                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4939                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4940         }
4941
4942         val = (u64) rxr->rx_desc_mapping[0] >> 32;
4943         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4944
4945         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
4946         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4947
4948         ring_prod = prod = rxr->rx_pg_prod;
4949         for (i = 0; i < bp->rx_pg_ring_size; i++) {
4950                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
4951                         break;
4952                 prod = NEXT_RX_BD(prod);
4953                 ring_prod = RX_PG_RING_IDX(prod);
4954         }
4955         rxr->rx_pg_prod = prod;
4956
4957         ring_prod = prod = rxr->rx_prod;
4958         for (i = 0; i < bp->rx_ring_size; i++) {
4959                 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
4960                         break;
4961                 prod = NEXT_RX_BD(prod);
4962                 ring_prod = RX_RING_IDX(prod);
4963         }
4964         rxr->rx_prod = prod;
4965
4966         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
4967         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
4968         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
4969
4970         REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
4971         REG_WR16(bp, rxr->rx_bidx_addr, prod);
4972
4973         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
4974 }
4975
4976 static void
4977 bnx2_init_all_rings(struct bnx2 *bp)
4978 {
4979         int i;
4980         u32 val;
4981
4982         bnx2_clear_ring_states(bp);
4983
4984         REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
4985         for (i = 0; i < bp->num_tx_rings; i++)
4986                 bnx2_init_tx_ring(bp, i);
4987
4988         if (bp->num_tx_rings > 1)
4989                 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
4990                        (TX_TSS_CID << 7));
4991
4992         REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
4993         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
4994
4995         for (i = 0; i < bp->num_rx_rings; i++)
4996                 bnx2_init_rx_ring(bp, i);
4997
4998         if (bp->num_rx_rings > 1) {
4999                 u32 tbl_32;
5000                 u8 *tbl = (u8 *) &tbl_32;
5001
5002                 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5003                                 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5004
5005                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5006                         tbl[i % 4] = i % (bp->num_rx_rings - 1);
5007                         if ((i % 4) == 3)
5008                                 bnx2_reg_wr_ind(bp,
5009                                                 BNX2_RXP_SCRATCH_RSS_TBL + i,
5010                                                 cpu_to_be32(tbl_32));
5011                 }
5012
5013                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5014                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5015
5016                 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5017
5018         }
5019 }
5020
5021 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5022 {
5023         u32 max, num_rings = 1;
5024
5025         while (ring_size > MAX_RX_DESC_CNT) {
5026                 ring_size -= MAX_RX_DESC_CNT;
5027                 num_rings++;
5028         }
5029         /* round to next power of 2 */
5030         max = max_size;
5031         while ((max & num_rings) == 0)
5032                 max >>= 1;
5033
5034         if (num_rings != max)
5035                 max <<= 1;
5036
5037         return max;
5038 }
5039
5040 static void
5041 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5042 {
5043         u32 rx_size, rx_space, jumbo_size;
5044
5045         /* 8 for CRC and VLAN */
5046         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5047
5048         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5049                 sizeof(struct skb_shared_info);
5050
5051         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5052         bp->rx_pg_ring_size = 0;
5053         bp->rx_max_pg_ring = 0;
5054         bp->rx_max_pg_ring_idx = 0;
5055         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5056                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5057
5058                 jumbo_size = size * pages;
5059                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5060                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5061
5062                 bp->rx_pg_ring_size = jumbo_size;
5063                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5064                                                         MAX_RX_PG_RINGS);
5065                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5066                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5067                 bp->rx_copy_thresh = 0;
5068         }
5069
5070         bp->rx_buf_use_size = rx_size;
5071         /* hw alignment */
5072         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5073         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5074         bp->rx_ring_size = size;
5075         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5076         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5077 }
5078
5079 static void
5080 bnx2_free_tx_skbs(struct bnx2 *bp)
5081 {
5082         int i;
5083
5084         for (i = 0; i < bp->num_tx_rings; i++) {
5085                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5086                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5087                 int j;
5088
5089                 if (txr->tx_buf_ring == NULL)
5090                         continue;
5091
5092                 for (j = 0; j < TX_DESC_CNT; ) {
5093                         struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5094                         struct sk_buff *skb = tx_buf->skb;
5095
5096                         if (skb == NULL) {
5097                                 j++;
5098                                 continue;
5099                         }
5100
5101                         skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
5102
5103                         tx_buf->skb = NULL;
5104
5105                         j += skb_shinfo(skb)->nr_frags + 1;
5106                         dev_kfree_skb(skb);
5107                 }
5108         }
5109 }
5110
5111 static void
5112 bnx2_free_rx_skbs(struct bnx2 *bp)
5113 {
5114         int i;
5115
5116         for (i = 0; i < bp->num_rx_rings; i++) {
5117                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5118                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5119                 int j;
5120
5121                 if (rxr->rx_buf_ring == NULL)
5122                         return;
5123
5124                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5125                         struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5126                         struct sk_buff *skb = rx_buf->skb;
5127
5128                         if (skb == NULL)
5129                                 continue;
5130
5131                         pci_unmap_single(bp->pdev,
5132                                          pci_unmap_addr(rx_buf, mapping),
5133                                          bp->rx_buf_use_size,
5134                                          PCI_DMA_FROMDEVICE);
5135
5136                         rx_buf->skb = NULL;
5137
5138                         dev_kfree_skb(skb);
5139                 }
5140                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5141                         bnx2_free_rx_page(bp, rxr, j);
5142         }
5143 }
5144
5145 static void
5146 bnx2_free_skbs(struct bnx2 *bp)
5147 {
5148         bnx2_free_tx_skbs(bp);
5149         bnx2_free_rx_skbs(bp);
5150 }
5151
5152 static int
5153 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5154 {
5155         int rc;
5156
5157         rc = bnx2_reset_chip(bp, reset_code);
5158         bnx2_free_skbs(bp);
5159         if (rc)
5160                 return rc;
5161
5162         if ((rc = bnx2_init_chip(bp)) != 0)
5163                 return rc;
5164
5165         bnx2_init_all_rings(bp);
5166         return 0;
5167 }
5168
5169 static int
5170 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5171 {
5172         int rc;
5173
5174         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5175                 return rc;
5176
5177         spin_lock_bh(&bp->phy_lock);
5178         bnx2_init_phy(bp, reset_phy);
5179         bnx2_set_link(bp);
5180         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5181                 bnx2_remote_phy_event(bp);
5182         spin_unlock_bh(&bp->phy_lock);
5183         return 0;
5184 }
5185
5186 static int
5187 bnx2_shutdown_chip(struct bnx2 *bp)
5188 {
5189         u32 reset_code;
5190
5191         if (bp->flags & BNX2_FLAG_NO_WOL)
5192                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5193         else if (bp->wol)
5194                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5195         else
5196                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5197
5198         return bnx2_reset_chip(bp, reset_code);
5199 }
5200
5201 static int
5202 bnx2_test_registers(struct bnx2 *bp)
5203 {
5204         int ret;
5205         int i, is_5709;
5206         static const struct {
5207                 u16   offset;
5208                 u16   flags;
5209 #define BNX2_FL_NOT_5709        1
5210                 u32   rw_mask;
5211                 u32   ro_mask;
5212         } reg_tbl[] = {
5213                 { 0x006c, 0, 0x00000000, 0x0000003f },
5214                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5215                 { 0x0094, 0, 0x00000000, 0x00000000 },
5216
5217                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5218                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5219                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5220                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5221                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5222                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5223                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5224                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5225                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5226
5227                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5228                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5229                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5230                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5231                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5232                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5233
5234                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5235                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5236                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5237
5238                 { 0x1000, 0, 0x00000000, 0x00000001 },
5239                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5240
5241                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5242                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5243                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5244                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5245                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5246                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5247                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5248                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5249                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5250                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5251
5252                 { 0x1800, 0, 0x00000000, 0x00000001 },
5253                 { 0x1804, 0, 0x00000000, 0x00000003 },
5254
5255                 { 0x2800, 0, 0x00000000, 0x00000001 },
5256                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5257                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5258                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5259                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5260                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5261                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5262                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5263                 { 0x2840, 0, 0x00000000, 0xffffffff },
5264                 { 0x2844, 0, 0x00000000, 0xffffffff },
5265                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5266                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5267
5268                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5269                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5270
5271                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5272                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5273                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5274                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5275                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5276                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5277                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5278                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5279                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5280
5281                 { 0x5004, 0, 0x00000000, 0x0000007f },
5282                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5283
5284                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5285                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5286                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5287                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5288                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5289                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5290                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5291                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5292                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5293
5294                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5295                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5296                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5297                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5298                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5299                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5300                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5301                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5302                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5303                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5304                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5305                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5306                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5307                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5308                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5309                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5310                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5311                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5312                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5313                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5314                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5315                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5316                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5317
5318                 { 0xffff, 0, 0x00000000, 0x00000000 },
5319         };
5320
5321         ret = 0;
5322         is_5709 = 0;
5323         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5324                 is_5709 = 1;
5325
5326         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5327                 u32 offset, rw_mask, ro_mask, save_val, val;
5328                 u16 flags = reg_tbl[i].flags;
5329
5330                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5331                         continue;
5332
5333                 offset = (u32) reg_tbl[i].offset;
5334                 rw_mask = reg_tbl[i].rw_mask;
5335                 ro_mask = reg_tbl[i].ro_mask;
5336
5337                 save_val = readl(bp->regview + offset);
5338
5339                 writel(0, bp->regview + offset);
5340
5341                 val = readl(bp->regview + offset);
5342                 if ((val & rw_mask) != 0) {
5343                         goto reg_test_err;
5344                 }
5345
5346                 if ((val & ro_mask) != (save_val & ro_mask)) {
5347                         goto reg_test_err;
5348                 }
5349
5350                 writel(0xffffffff, bp->regview + offset);
5351
5352                 val = readl(bp->regview + offset);
5353                 if ((val & rw_mask) != rw_mask) {
5354                         goto reg_test_err;
5355                 }
5356
5357                 if ((val & ro_mask) != (save_val & ro_mask)) {
5358                         goto reg_test_err;
5359                 }
5360
5361                 writel(save_val, bp->regview + offset);
5362                 continue;
5363
5364 reg_test_err:
5365                 writel(save_val, bp->regview + offset);
5366                 ret = -ENODEV;
5367                 break;
5368         }
5369         return ret;
5370 }
5371
5372 static int
5373 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5374 {
5375         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5376                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5377         int i;
5378
5379         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5380                 u32 offset;
5381
5382                 for (offset = 0; offset < size; offset += 4) {
5383
5384                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5385
5386                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5387                                 test_pattern[i]) {
5388                                 return -ENODEV;
5389                         }
5390                 }
5391         }
5392         return 0;
5393 }
5394
5395 static int
5396 bnx2_test_memory(struct bnx2 *bp)
5397 {
5398         int ret = 0;
5399         int i;
5400         static struct mem_entry {
5401                 u32   offset;
5402                 u32   len;
5403         } mem_tbl_5706[] = {
5404                 { 0x60000,  0x4000 },
5405                 { 0xa0000,  0x3000 },
5406                 { 0xe0000,  0x4000 },
5407                 { 0x120000, 0x4000 },
5408                 { 0x1a0000, 0x4000 },
5409                 { 0x160000, 0x4000 },
5410                 { 0xffffffff, 0    },
5411         },
5412         mem_tbl_5709[] = {
5413                 { 0x60000,  0x4000 },
5414                 { 0xa0000,  0x3000 },
5415                 { 0xe0000,  0x4000 },
5416                 { 0x120000, 0x4000 },
5417                 { 0x1a0000, 0x4000 },
5418                 { 0xffffffff, 0    },
5419         };
5420         struct mem_entry *mem_tbl;
5421
5422         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5423                 mem_tbl = mem_tbl_5709;
5424         else
5425                 mem_tbl = mem_tbl_5706;
5426
5427         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5428                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5429                         mem_tbl[i].len)) != 0) {
5430                         return ret;
5431                 }
5432         }
5433
5434         return ret;
5435 }
5436
5437 #define BNX2_MAC_LOOPBACK       0
5438 #define BNX2_PHY_LOOPBACK       1
5439
5440 static int
5441 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5442 {
5443         unsigned int pkt_size, num_pkts, i;
5444         struct sk_buff *skb, *rx_skb;
5445         unsigned char *packet;
5446         u16 rx_start_idx, rx_idx;
5447         dma_addr_t map;
5448         struct tx_bd *txbd;
5449         struct sw_bd *rx_buf;
5450         struct l2_fhdr *rx_hdr;
5451         int ret = -ENODEV;
5452         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5453         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5454         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5455
5456         tx_napi = bnapi;
5457
5458         txr = &tx_napi->tx_ring;
5459         rxr = &bnapi->rx_ring;
5460         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5461                 bp->loopback = MAC_LOOPBACK;
5462                 bnx2_set_mac_loopback(bp);
5463         }
5464         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5465                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5466                         return 0;
5467
5468                 bp->loopback = PHY_LOOPBACK;
5469                 bnx2_set_phy_loopback(bp);
5470         }
5471         else
5472                 return -EINVAL;
5473
5474         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5475         skb = netdev_alloc_skb(bp->dev, pkt_size);
5476         if (!skb)
5477                 return -ENOMEM;
5478         packet = skb_put(skb, pkt_size);
5479         memcpy(packet, bp->dev->dev_addr, 6);
5480         memset(packet + 6, 0x0, 8);
5481         for (i = 14; i < pkt_size; i++)
5482                 packet[i] = (unsigned char) (i & 0xff);
5483
5484         if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
5485                 dev_kfree_skb(skb);
5486                 return -EIO;
5487         }
5488         map = skb_shinfo(skb)->dma_head;
5489
5490         REG_WR(bp, BNX2_HC_COMMAND,
5491                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5492
5493         REG_RD(bp, BNX2_HC_COMMAND);
5494
5495         udelay(5);
5496         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5497
5498         num_pkts = 0;
5499
5500         txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5501
5502         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5503         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5504         txbd->tx_bd_mss_nbytes = pkt_size;
5505         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5506
5507         num_pkts++;
5508         txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5509         txr->tx_prod_bseq += pkt_size;
5510
5511         REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5512         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5513
5514         udelay(100);
5515
5516         REG_WR(bp, BNX2_HC_COMMAND,
5517                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5518
5519         REG_RD(bp, BNX2_HC_COMMAND);
5520
5521         udelay(5);
5522
5523         skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
5524         dev_kfree_skb(skb);
5525
5526         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5527                 goto loopback_test_done;
5528
5529         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5530         if (rx_idx != rx_start_idx + num_pkts) {
5531                 goto loopback_test_done;
5532         }
5533
5534         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5535         rx_skb = rx_buf->skb;
5536
5537         rx_hdr = (struct l2_fhdr *) rx_skb->data;
5538         skb_reserve(rx_skb, BNX2_RX_OFFSET);
5539
5540         pci_dma_sync_single_for_cpu(bp->pdev,
5541                 pci_unmap_addr(rx_buf, mapping),
5542                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5543
5544         if (rx_hdr->l2_fhdr_status &
5545                 (L2_FHDR_ERRORS_BAD_CRC |
5546                 L2_FHDR_ERRORS_PHY_DECODE |
5547                 L2_FHDR_ERRORS_ALIGNMENT |
5548                 L2_FHDR_ERRORS_TOO_SHORT |
5549                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5550
5551                 goto loopback_test_done;
5552         }
5553
5554         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5555                 goto loopback_test_done;
5556         }
5557
5558         for (i = 14; i < pkt_size; i++) {
5559                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5560                         goto loopback_test_done;
5561                 }
5562         }
5563
5564         ret = 0;
5565
5566 loopback_test_done:
5567         bp->loopback = 0;
5568         return ret;
5569 }
5570
5571 #define BNX2_MAC_LOOPBACK_FAILED        1
5572 #define BNX2_PHY_LOOPBACK_FAILED        2
5573 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5574                                          BNX2_PHY_LOOPBACK_FAILED)
5575
5576 static int
5577 bnx2_test_loopback(struct bnx2 *bp)
5578 {
5579         int rc = 0;
5580
5581         if (!netif_running(bp->dev))
5582                 return BNX2_LOOPBACK_FAILED;
5583
5584         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5585         spin_lock_bh(&bp->phy_lock);
5586         bnx2_init_phy(bp, 1);
5587         spin_unlock_bh(&bp->phy_lock);
5588         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5589                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5590         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5591                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5592         return rc;
5593 }
5594
5595 #define NVRAM_SIZE 0x200
5596 #define CRC32_RESIDUAL 0xdebb20e3
5597
5598 static int
5599 bnx2_test_nvram(struct bnx2 *bp)
5600 {
5601         __be32 buf[NVRAM_SIZE / 4];
5602         u8 *data = (u8 *) buf;
5603         int rc = 0;
5604         u32 magic, csum;
5605
5606         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5607                 goto test_nvram_done;
5608
5609         magic = be32_to_cpu(buf[0]);
5610         if (magic != 0x669955aa) {
5611                 rc = -ENODEV;
5612                 goto test_nvram_done;
5613         }
5614
5615         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5616                 goto test_nvram_done;
5617
5618         csum = ether_crc_le(0x100, data);
5619         if (csum != CRC32_RESIDUAL) {
5620                 rc = -ENODEV;
5621                 goto test_nvram_done;
5622         }
5623
5624         csum = ether_crc_le(0x100, data + 0x100);
5625         if (csum != CRC32_RESIDUAL) {
5626                 rc = -ENODEV;
5627         }
5628
5629 test_nvram_done:
5630         return rc;
5631 }
5632
5633 static int
5634 bnx2_test_link(struct bnx2 *bp)
5635 {
5636         u32 bmsr;
5637
5638         if (!netif_running(bp->dev))
5639                 return -ENODEV;
5640
5641         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5642                 if (bp->link_up)
5643                         return 0;
5644                 return -ENODEV;
5645         }
5646         spin_lock_bh(&bp->phy_lock);
5647         bnx2_enable_bmsr1(bp);
5648         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5649         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5650         bnx2_disable_bmsr1(bp);
5651         spin_unlock_bh(&bp->phy_lock);
5652
5653         if (bmsr & BMSR_LSTATUS) {
5654                 return 0;
5655         }
5656         return -ENODEV;
5657 }
5658
5659 static int
5660 bnx2_test_intr(struct bnx2 *bp)
5661 {
5662         int i;
5663         u16 status_idx;
5664
5665         if (!netif_running(bp->dev))
5666                 return -ENODEV;
5667
5668         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5669
5670         /* This register is not touched during run-time. */
5671         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5672         REG_RD(bp, BNX2_HC_COMMAND);
5673
5674         for (i = 0; i < 10; i++) {
5675                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5676                         status_idx) {
5677
5678                         break;
5679                 }
5680
5681                 msleep_interruptible(10);
5682         }
5683         if (i < 10)
5684                 return 0;
5685
5686         return -ENODEV;
5687 }
5688
5689 /* Determining link for parallel detection. */
5690 static int
5691 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5692 {
5693         u32 mode_ctl, an_dbg, exp;
5694
5695         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5696                 return 0;
5697
5698         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5699         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5700
5701         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5702                 return 0;
5703
5704         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5705         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5706         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5707
5708         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5709                 return 0;
5710
5711         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5712         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5713         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5714
5715         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5716                 return 0;
5717
5718         return 1;
5719 }
5720
5721 static void
5722 bnx2_5706_serdes_timer(struct bnx2 *bp)
5723 {
5724         int check_link = 1;
5725
5726         spin_lock(&bp->phy_lock);
5727         if (bp->serdes_an_pending) {
5728                 bp->serdes_an_pending--;
5729                 check_link = 0;
5730         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5731                 u32 bmcr;
5732
5733                 bp->current_interval = BNX2_TIMER_INTERVAL;
5734
5735                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5736
5737                 if (bmcr & BMCR_ANENABLE) {
5738                         if (bnx2_5706_serdes_has_link(bp)) {
5739                                 bmcr &= ~BMCR_ANENABLE;
5740                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5741                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5742                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5743                         }
5744                 }
5745         }
5746         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5747                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5748                 u32 phy2;
5749
5750                 bnx2_write_phy(bp, 0x17, 0x0f01);
5751                 bnx2_read_phy(bp, 0x15, &phy2);
5752                 if (phy2 & 0x20) {
5753                         u32 bmcr;
5754
5755                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5756                         bmcr |= BMCR_ANENABLE;
5757                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5758
5759                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5760                 }
5761         } else
5762                 bp->current_interval = BNX2_TIMER_INTERVAL;
5763
5764         if (check_link) {
5765                 u32 val;
5766
5767                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5768                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5769                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5770
5771                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5772                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5773                                 bnx2_5706s_force_link_dn(bp, 1);
5774                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5775                         } else
5776                                 bnx2_set_link(bp);
5777                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5778                         bnx2_set_link(bp);
5779         }
5780         spin_unlock(&bp->phy_lock);
5781 }
5782
5783 static void
5784 bnx2_5708_serdes_timer(struct bnx2 *bp)
5785 {
5786         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5787                 return;
5788
5789         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5790                 bp->serdes_an_pending = 0;
5791                 return;
5792         }
5793
5794         spin_lock(&bp->phy_lock);
5795         if (bp->serdes_an_pending)
5796                 bp->serdes_an_pending--;
5797         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5798                 u32 bmcr;
5799
5800                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5801                 if (bmcr & BMCR_ANENABLE) {
5802                         bnx2_enable_forced_2g5(bp);
5803                         bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
5804                 } else {
5805                         bnx2_disable_forced_2g5(bp);
5806                         bp->serdes_an_pending = 2;
5807                         bp->current_interval = BNX2_TIMER_INTERVAL;
5808                 }
5809
5810         } else
5811                 bp->current_interval = BNX2_TIMER_INTERVAL;
5812
5813         spin_unlock(&bp->phy_lock);
5814 }
5815
5816 static void
5817 bnx2_timer(unsigned long data)
5818 {
5819         struct bnx2 *bp = (struct bnx2 *) data;
5820
5821         if (!netif_running(bp->dev))
5822                 return;
5823
5824         if (atomic_read(&bp->intr_sem) != 0)
5825                 goto bnx2_restart_timer;
5826
5827         if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
5828              BNX2_FLAG_USING_MSI)
5829                 bnx2_chk_missed_msi(bp);
5830
5831         bnx2_send_heart_beat(bp);
5832
5833         bp->stats_blk->stat_FwRxDrop =
5834                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
5835
5836         /* workaround occasional corrupted counters */
5837         if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5838                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5839                                             BNX2_HC_COMMAND_STATS_NOW);
5840
5841         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5842                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5843                         bnx2_5706_serdes_timer(bp);
5844                 else
5845                         bnx2_5708_serdes_timer(bp);
5846         }
5847
5848 bnx2_restart_timer:
5849         mod_timer(&bp->timer, jiffies + bp->current_interval);
5850 }
5851
5852 static int
5853 bnx2_request_irq(struct bnx2 *bp)
5854 {
5855         unsigned long flags;
5856         struct bnx2_irq *irq;
5857         int rc = 0, i;
5858
5859         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
5860                 flags = 0;
5861         else
5862                 flags = IRQF_SHARED;
5863
5864         for (i = 0; i < bp->irq_nvecs; i++) {
5865                 irq = &bp->irq_tbl[i];
5866                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5867                                  &bp->bnx2_napi[i]);
5868                 if (rc)
5869                         break;
5870                 irq->requested = 1;
5871         }
5872         return rc;
5873 }
5874
5875 static void
5876 bnx2_free_irq(struct bnx2 *bp)
5877 {
5878         struct bnx2_irq *irq;
5879         int i;
5880
5881         for (i = 0; i < bp->irq_nvecs; i++) {
5882                 irq = &bp->irq_tbl[i];
5883                 if (irq->requested)
5884                         free_irq(irq->vector, &bp->bnx2_napi[i]);
5885                 irq->requested = 0;
5886         }
5887         if (bp->flags & BNX2_FLAG_USING_MSI)
5888                 pci_disable_msi(bp->pdev);
5889         else if (bp->flags & BNX2_FLAG_USING_MSIX)
5890                 pci_disable_msix(bp->pdev);
5891
5892         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
5893 }
5894
5895 static void
5896 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
5897 {
5898         int i, rc;
5899         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5900         struct net_device *dev = bp->dev;
5901         const int len = sizeof(bp->irq_tbl[0].name);
5902
5903         bnx2_setup_msix_tbl(bp);
5904         REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5905         REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5906         REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5907
5908         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5909                 msix_ent[i].entry = i;
5910                 msix_ent[i].vector = 0;
5911         }
5912
5913         rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5914         if (rc != 0)
5915                 return;
5916
5917         bp->irq_nvecs = msix_vecs;
5918         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
5919         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5920                 bp->irq_tbl[i].vector = msix_ent[i].vector;
5921                 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
5922                 bp->irq_tbl[i].handler = bnx2_msi_1shot;
5923         }
5924 }
5925
5926 static void
5927 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5928 {
5929         int cpus = num_online_cpus();
5930         int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
5931
5932         bp->irq_tbl[0].handler = bnx2_interrupt;
5933         strcpy(bp->irq_tbl[0].name, bp->dev->name);
5934         bp->irq_nvecs = 1;
5935         bp->irq_tbl[0].vector = bp->pdev->irq;
5936
5937         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
5938                 bnx2_enable_msix(bp, msix_vecs);
5939
5940         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5941             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
5942                 if (pci_enable_msi(bp->pdev) == 0) {
5943                         bp->flags |= BNX2_FLAG_USING_MSI;
5944                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5945                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
5946                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5947                         } else
5948                                 bp->irq_tbl[0].handler = bnx2_msi;
5949
5950                         bp->irq_tbl[0].vector = bp->pdev->irq;
5951                 }
5952         }
5953
5954         bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
5955         bp->dev->real_num_tx_queues = bp->num_tx_rings;
5956
5957         bp->num_rx_rings = bp->irq_nvecs;
5958 }
5959
5960 /* Called with rtnl_lock */
5961 static int
5962 bnx2_open(struct net_device *dev)
5963 {
5964         struct bnx2 *bp = netdev_priv(dev);
5965         int rc;
5966
5967         netif_carrier_off(dev);
5968
5969         bnx2_set_power_state(bp, PCI_D0);
5970         bnx2_disable_int(bp);
5971
5972         bnx2_setup_int_mode(bp, disable_msi);
5973         bnx2_napi_enable(bp);
5974         rc = bnx2_alloc_mem(bp);
5975         if (rc)
5976                 goto open_err;
5977
5978         rc = bnx2_request_irq(bp);
5979         if (rc)
5980                 goto open_err;
5981
5982         rc = bnx2_init_nic(bp, 1);
5983         if (rc)
5984                 goto open_err;
5985
5986         mod_timer(&bp->timer, jiffies + bp->current_interval);
5987
5988         atomic_set(&bp->intr_sem, 0);
5989
5990         bnx2_enable_int(bp);
5991
5992         if (bp->flags & BNX2_FLAG_USING_MSI) {
5993                 /* Test MSI to make sure it is working
5994                  * If MSI test fails, go back to INTx mode
5995                  */
5996                 if (bnx2_test_intr(bp) != 0) {
5997                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
5998                                " using MSI, switching to INTx mode. Please"
5999                                " report this failure to the PCI maintainer"
6000                                " and include system chipset information.\n",
6001                                bp->dev->name);
6002
6003                         bnx2_disable_int(bp);
6004                         bnx2_free_irq(bp);
6005
6006                         bnx2_setup_int_mode(bp, 1);
6007
6008                         rc = bnx2_init_nic(bp, 0);
6009
6010                         if (!rc)
6011                                 rc = bnx2_request_irq(bp);
6012
6013                         if (rc) {
6014                                 del_timer_sync(&bp->timer);
6015                                 goto open_err;
6016                         }
6017                         bnx2_enable_int(bp);
6018                 }
6019         }
6020         if (bp->flags & BNX2_FLAG_USING_MSI)
6021                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
6022         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6023                 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
6024
6025         netif_tx_start_all_queues(dev);
6026
6027         return 0;
6028
6029 open_err:
6030         bnx2_napi_disable(bp);
6031         bnx2_free_skbs(bp);
6032         bnx2_free_irq(bp);
6033         bnx2_free_mem(bp);
6034         return rc;
6035 }
6036
6037 static void
6038 bnx2_reset_task(struct work_struct *work)
6039 {
6040         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6041
6042         if (!netif_running(bp->dev))
6043                 return;
6044
6045         bnx2_netif_stop(bp);
6046
6047         bnx2_init_nic(bp, 1);
6048
6049         atomic_set(&bp->intr_sem, 1);
6050         bnx2_netif_start(bp);
6051 }
6052
6053 static void
6054 bnx2_tx_timeout(struct net_device *dev)
6055 {
6056         struct bnx2 *bp = netdev_priv(dev);
6057
6058         /* This allows the netif to be shutdown gracefully before resetting */
6059         schedule_work(&bp->reset_task);
6060 }
6061
6062 #ifdef BCM_VLAN
6063 /* Called with rtnl_lock */
6064 static void
6065 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6066 {
6067         struct bnx2 *bp = netdev_priv(dev);
6068
6069         bnx2_netif_stop(bp);
6070
6071         bp->vlgrp = vlgrp;
6072         bnx2_set_rx_mode(dev);
6073         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6074                 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6075
6076         bnx2_netif_start(bp);
6077 }
6078 #endif
6079
6080 /* Called with netif_tx_lock.
6081  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6082  * netif_wake_queue().
6083  */
6084 static int
6085 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6086 {
6087         struct bnx2 *bp = netdev_priv(dev);
6088         dma_addr_t mapping;
6089         struct tx_bd *txbd;
6090         struct sw_tx_bd *tx_buf;
6091         u32 len, vlan_tag_flags, last_frag, mss;
6092         u16 prod, ring_prod;
6093         int i;
6094         struct bnx2_napi *bnapi;
6095         struct bnx2_tx_ring_info *txr;
6096         struct netdev_queue *txq;
6097         struct skb_shared_info *sp;
6098
6099         /*  Determine which tx ring we will be placed on */
6100         i = skb_get_queue_mapping(skb);
6101         bnapi = &bp->bnx2_napi[i];
6102         txr = &bnapi->tx_ring;
6103         txq = netdev_get_tx_queue(dev, i);
6104
6105         if (unlikely(bnx2_tx_avail(bp, txr) <
6106             (skb_shinfo(skb)->nr_frags + 1))) {
6107                 netif_tx_stop_queue(txq);
6108                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
6109                         dev->name);
6110
6111                 return NETDEV_TX_BUSY;
6112         }
6113         len = skb_headlen(skb);
6114         prod = txr->tx_prod;
6115         ring_prod = TX_RING_IDX(prod);
6116
6117         vlan_tag_flags = 0;
6118         if (skb->ip_summed == CHECKSUM_PARTIAL) {
6119                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6120         }
6121
6122 #ifdef BCM_VLAN
6123         if (bp->vlgrp && vlan_tx_tag_present(skb)) {
6124                 vlan_tag_flags |=
6125                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6126         }
6127 #endif
6128         if ((mss = skb_shinfo(skb)->gso_size)) {
6129                 u32 tcp_opt_len;
6130                 struct iphdr *iph;
6131
6132                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6133
6134                 tcp_opt_len = tcp_optlen(skb);
6135
6136                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6137                         u32 tcp_off = skb_transport_offset(skb) -
6138                                       sizeof(struct ipv6hdr) - ETH_HLEN;
6139
6140                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6141                                           TX_BD_FLAGS_SW_FLAGS;
6142                         if (likely(tcp_off == 0))
6143                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6144                         else {
6145                                 tcp_off >>= 3;
6146                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
6147                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
6148                                                   ((tcp_off & 0x10) <<
6149                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
6150                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6151                         }
6152                 } else {
6153                         iph = ip_hdr(skb);
6154                         if (tcp_opt_len || (iph->ihl > 5)) {
6155                                 vlan_tag_flags |= ((iph->ihl - 5) +
6156                                                    (tcp_opt_len >> 2)) << 8;
6157                         }
6158                 }
6159         } else
6160                 mss = 0;
6161
6162         if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
6163                 dev_kfree_skb(skb);
6164                 return NETDEV_TX_OK;
6165         }
6166
6167         sp = skb_shinfo(skb);
6168         mapping = sp->dma_head;
6169
6170         tx_buf = &txr->tx_buf_ring[ring_prod];
6171         tx_buf->skb = skb;
6172
6173         txbd = &txr->tx_desc_ring[ring_prod];
6174
6175         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6176         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6177         txbd->tx_bd_mss_nbytes = len | (mss << 16);
6178         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6179
6180         last_frag = skb_shinfo(skb)->nr_frags;
6181         tx_buf->nr_frags = last_frag;
6182         tx_buf->is_gso = skb_is_gso(skb);
6183
6184         for (i = 0; i < last_frag; i++) {
6185                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6186
6187                 prod = NEXT_TX_BD(prod);
6188                 ring_prod = TX_RING_IDX(prod);
6189                 txbd = &txr->tx_desc_ring[ring_prod];
6190
6191                 len = frag->size;
6192                 mapping = sp->dma_maps[i];
6193
6194                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6195                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6196                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6197                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6198
6199         }
6200         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6201
6202         prod = NEXT_TX_BD(prod);
6203         txr->tx_prod_bseq += skb->len;
6204
6205         REG_WR16(bp, txr->tx_bidx_addr, prod);
6206         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6207
6208         mmiowb();
6209
6210         txr->tx_prod = prod;
6211
6212         if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6213                 netif_tx_stop_queue(txq);
6214                 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6215                         netif_tx_wake_queue(txq);
6216         }
6217
6218         return NETDEV_TX_OK;
6219 }
6220
6221 /* Called with rtnl_lock */
6222 static int
6223 bnx2_close(struct net_device *dev)
6224 {
6225         struct bnx2 *bp = netdev_priv(dev);
6226
6227         cancel_work_sync(&bp->reset_task);
6228
6229         bnx2_disable_int_sync(bp);
6230         bnx2_napi_disable(bp);
6231         del_timer_sync(&bp->timer);
6232         bnx2_shutdown_chip(bp);
6233         bnx2_free_irq(bp);
6234         bnx2_free_skbs(bp);
6235         bnx2_free_mem(bp);
6236         bp->link_up = 0;
6237         netif_carrier_off(bp->dev);
6238         bnx2_set_power_state(bp, PCI_D3hot);
6239         return 0;
6240 }
6241
6242 #define GET_NET_STATS64(ctr)                                    \
6243         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
6244         (unsigned long) (ctr##_lo)
6245
6246 #define GET_NET_STATS32(ctr)            \
6247         (ctr##_lo)
6248
6249 #if (BITS_PER_LONG == 64)
6250 #define GET_NET_STATS   GET_NET_STATS64
6251 #else
6252 #define GET_NET_STATS   GET_NET_STATS32
6253 #endif
6254
6255 static struct net_device_stats *
6256 bnx2_get_stats(struct net_device *dev)
6257 {
6258         struct bnx2 *bp = netdev_priv(dev);
6259         struct statistics_block *stats_blk = bp->stats_blk;
6260         struct net_device_stats *net_stats = &dev->stats;
6261
6262         if (bp->stats_blk == NULL) {
6263                 return net_stats;
6264         }
6265         net_stats->rx_packets =
6266                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
6267                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
6268                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
6269
6270         net_stats->tx_packets =
6271                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
6272                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
6273                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
6274
6275         net_stats->rx_bytes =
6276                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
6277
6278         net_stats->tx_bytes =
6279                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
6280
6281         net_stats->multicast =
6282                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
6283
6284         net_stats->collisions =
6285                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
6286
6287         net_stats->rx_length_errors =
6288                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
6289                 stats_blk->stat_EtherStatsOverrsizePkts);
6290
6291         net_stats->rx_over_errors =
6292                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
6293
6294         net_stats->rx_frame_errors =
6295                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
6296
6297         net_stats->rx_crc_errors =
6298                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
6299
6300         net_stats->rx_errors = net_stats->rx_length_errors +
6301                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6302                 net_stats->rx_crc_errors;
6303
6304         net_stats->tx_aborted_errors =
6305                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
6306                 stats_blk->stat_Dot3StatsLateCollisions);
6307
6308         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6309             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6310                 net_stats->tx_carrier_errors = 0;
6311         else {
6312                 net_stats->tx_carrier_errors =
6313                         (unsigned long)
6314                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
6315         }
6316
6317         net_stats->tx_errors =
6318                 (unsigned long)
6319                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6320                 +
6321                 net_stats->tx_aborted_errors +
6322                 net_stats->tx_carrier_errors;
6323
6324         net_stats->rx_missed_errors =
6325                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
6326                 stats_blk->stat_FwRxDrop);
6327
6328         return net_stats;
6329 }
6330
6331 /* All ethtool functions called with rtnl_lock */
6332
6333 static int
6334 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6335 {
6336         struct bnx2 *bp = netdev_priv(dev);
6337         int support_serdes = 0, support_copper = 0;
6338
6339         cmd->supported = SUPPORTED_Autoneg;
6340         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6341                 support_serdes = 1;
6342                 support_copper = 1;
6343         } else if (bp->phy_port == PORT_FIBRE)
6344                 support_serdes = 1;
6345         else
6346                 support_copper = 1;
6347
6348         if (support_serdes) {
6349                 cmd->supported |= SUPPORTED_1000baseT_Full |
6350                         SUPPORTED_FIBRE;
6351                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6352                         cmd->supported |= SUPPORTED_2500baseX_Full;
6353
6354         }
6355         if (support_copper) {
6356                 cmd->supported |= SUPPORTED_10baseT_Half |
6357                         SUPPORTED_10baseT_Full |
6358                         SUPPORTED_100baseT_Half |
6359                         SUPPORTED_100baseT_Full |
6360                         SUPPORTED_1000baseT_Full |
6361                         SUPPORTED_TP;
6362
6363         }
6364
6365         spin_lock_bh(&bp->phy_lock);
6366         cmd->port = bp->phy_port;
6367         cmd->advertising = bp->advertising;
6368
6369         if (bp->autoneg & AUTONEG_SPEED) {
6370                 cmd->autoneg = AUTONEG_ENABLE;
6371         }
6372         else {
6373                 cmd->autoneg = AUTONEG_DISABLE;
6374         }
6375
6376         if (netif_carrier_ok(dev)) {
6377                 cmd->speed = bp->line_speed;
6378                 cmd->duplex = bp->duplex;
6379         }
6380         else {
6381                 cmd->speed = -1;
6382                 cmd->duplex = -1;
6383         }
6384         spin_unlock_bh(&bp->phy_lock);
6385
6386         cmd->transceiver = XCVR_INTERNAL;
6387         cmd->phy_address = bp->phy_addr;
6388
6389         return 0;
6390 }
6391
6392 static int
6393 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6394 {
6395         struct bnx2 *bp = netdev_priv(dev);
6396         u8 autoneg = bp->autoneg;
6397         u8 req_duplex = bp->req_duplex;
6398         u16 req_line_speed = bp->req_line_speed;
6399         u32 advertising = bp->advertising;
6400         int err = -EINVAL;
6401
6402         spin_lock_bh(&bp->phy_lock);
6403
6404         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6405                 goto err_out_unlock;
6406
6407         if (cmd->port != bp->phy_port &&
6408             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6409                 goto err_out_unlock;
6410
6411         /* If device is down, we can store the settings only if the user
6412          * is setting the currently active port.
6413          */
6414         if (!netif_running(dev) && cmd->port != bp->phy_port)
6415                 goto err_out_unlock;
6416
6417         if (cmd->autoneg == AUTONEG_ENABLE) {
6418                 autoneg |= AUTONEG_SPEED;
6419
6420                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6421
6422                 /* allow advertising 1 speed */
6423                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6424                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
6425                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
6426                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
6427
6428                         if (cmd->port == PORT_FIBRE)
6429                                 goto err_out_unlock;
6430
6431                         advertising = cmd->advertising;
6432
6433                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6434                         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6435                             (cmd->port == PORT_TP))
6436                                 goto err_out_unlock;
6437                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6438                         advertising = cmd->advertising;
6439                 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6440                         goto err_out_unlock;
6441                 else {
6442                         if (cmd->port == PORT_FIBRE)
6443                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6444                         else
6445                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6446                 }
6447                 advertising |= ADVERTISED_Autoneg;
6448         }
6449         else {
6450                 if (cmd->port == PORT_FIBRE) {
6451                         if ((cmd->speed != SPEED_1000 &&
6452                              cmd->speed != SPEED_2500) ||
6453                             (cmd->duplex != DUPLEX_FULL))
6454                                 goto err_out_unlock;
6455
6456                         if (cmd->speed == SPEED_2500 &&
6457                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6458                                 goto err_out_unlock;
6459                 }
6460                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6461                         goto err_out_unlock;
6462
6463                 autoneg &= ~AUTONEG_SPEED;
6464                 req_line_speed = cmd->speed;
6465                 req_duplex = cmd->duplex;
6466                 advertising = 0;
6467         }
6468
6469         bp->autoneg = autoneg;
6470         bp->advertising = advertising;
6471         bp->req_line_speed = req_line_speed;
6472         bp->req_duplex = req_duplex;
6473
6474         err = 0;
6475         /* If device is down, the new settings will be picked up when it is
6476          * brought up.
6477          */
6478         if (netif_running(dev))
6479                 err = bnx2_setup_phy(bp, cmd->port);
6480
6481 err_out_unlock:
6482         spin_unlock_bh(&bp->phy_lock);
6483
6484         return err;
6485 }
6486
6487 static void
6488 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6489 {
6490         struct bnx2 *bp = netdev_priv(dev);
6491
6492         strcpy(info->driver, DRV_MODULE_NAME);
6493         strcpy(info->version, DRV_MODULE_VERSION);
6494         strcpy(info->bus_info, pci_name(bp->pdev));
6495         strcpy(info->fw_version, bp->fw_version);
6496 }
6497
6498 #define BNX2_REGDUMP_LEN                (32 * 1024)
6499
6500 static int
6501 bnx2_get_regs_len(struct net_device *dev)
6502 {
6503         return BNX2_REGDUMP_LEN;
6504 }
6505
6506 static void
6507 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6508 {
6509         u32 *p = _p, i, offset;
6510         u8 *orig_p = _p;
6511         struct bnx2 *bp = netdev_priv(dev);
6512         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6513                                  0x0800, 0x0880, 0x0c00, 0x0c10,
6514                                  0x0c30, 0x0d08, 0x1000, 0x101c,
6515                                  0x1040, 0x1048, 0x1080, 0x10a4,
6516                                  0x1400, 0x1490, 0x1498, 0x14f0,
6517                                  0x1500, 0x155c, 0x1580, 0x15dc,
6518                                  0x1600, 0x1658, 0x1680, 0x16d8,
6519                                  0x1800, 0x1820, 0x1840, 0x1854,
6520                                  0x1880, 0x1894, 0x1900, 0x1984,
6521                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6522                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
6523                                  0x2000, 0x2030, 0x23c0, 0x2400,
6524                                  0x2800, 0x2820, 0x2830, 0x2850,
6525                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
6526                                  0x3c00, 0x3c94, 0x4000, 0x4010,
6527                                  0x4080, 0x4090, 0x43c0, 0x4458,
6528                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
6529                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
6530                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
6531                                  0x5fc0, 0x6000, 0x6400, 0x6428,
6532                                  0x6800, 0x6848, 0x684c, 0x6860,
6533                                  0x6888, 0x6910, 0x8000 };
6534
6535         regs->version = 0;
6536
6537         memset(p, 0, BNX2_REGDUMP_LEN);
6538
6539         if (!netif_running(bp->dev))
6540                 return;
6541
6542         i = 0;
6543         offset = reg_boundaries[0];
6544         p += offset;
6545         while (offset < BNX2_REGDUMP_LEN) {
6546                 *p++ = REG_RD(bp, offset);
6547                 offset += 4;
6548                 if (offset == reg_boundaries[i + 1]) {
6549                         offset = reg_boundaries[i + 2];
6550                         p = (u32 *) (orig_p + offset);
6551                         i += 2;
6552                 }
6553         }
6554 }
6555
6556 static void
6557 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6558 {
6559         struct bnx2 *bp = netdev_priv(dev);
6560
6561         if (bp->flags & BNX2_FLAG_NO_WOL) {
6562                 wol->supported = 0;
6563                 wol->wolopts = 0;
6564         }
6565         else {
6566                 wol->supported = WAKE_MAGIC;
6567                 if (bp->wol)
6568                         wol->wolopts = WAKE_MAGIC;
6569                 else
6570                         wol->wolopts = 0;
6571         }
6572         memset(&wol->sopass, 0, sizeof(wol->sopass));
6573 }
6574
6575 static int
6576 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6577 {
6578         struct bnx2 *bp = netdev_priv(dev);
6579
6580         if (wol->wolopts & ~WAKE_MAGIC)
6581                 return -EINVAL;
6582
6583         if (wol->wolopts & WAKE_MAGIC) {
6584                 if (bp->flags & BNX2_FLAG_NO_WOL)
6585                         return -EINVAL;
6586
6587                 bp->wol = 1;
6588         }
6589         else {
6590                 bp->wol = 0;
6591         }
6592         return 0;
6593 }
6594
6595 static int
6596 bnx2_nway_reset(struct net_device *dev)
6597 {
6598         struct bnx2 *bp = netdev_priv(dev);
6599         u32 bmcr;
6600
6601         if (!netif_running(dev))
6602                 return -EAGAIN;
6603
6604         if (!(bp->autoneg & AUTONEG_SPEED)) {
6605                 return -EINVAL;
6606         }
6607
6608         spin_lock_bh(&bp->phy_lock);
6609
6610         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6611                 int rc;
6612
6613                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6614                 spin_unlock_bh(&bp->phy_lock);
6615                 return rc;
6616         }
6617
6618         /* Force a link down visible on the other side */
6619         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6620                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6621                 spin_unlock_bh(&bp->phy_lock);
6622
6623                 msleep(20);
6624
6625                 spin_lock_bh(&bp->phy_lock);
6626
6627                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6628                 bp->serdes_an_pending = 1;
6629                 mod_timer(&bp->timer, jiffies + bp->current_interval);
6630         }
6631
6632         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6633         bmcr &= ~BMCR_LOOPBACK;
6634         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6635
6636         spin_unlock_bh(&bp->phy_lock);
6637
6638         return 0;
6639 }
6640
6641 static int
6642 bnx2_get_eeprom_len(struct net_device *dev)
6643 {
6644         struct bnx2 *bp = netdev_priv(dev);
6645
6646         if (bp->flash_info == NULL)
6647                 return 0;
6648
6649         return (int) bp->flash_size;
6650 }
6651
6652 static int
6653 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6654                 u8 *eebuf)
6655 {
6656         struct bnx2 *bp = netdev_priv(dev);
6657         int rc;
6658
6659         if (!netif_running(dev))
6660                 return -EAGAIN;
6661
6662         /* parameters already validated in ethtool_get_eeprom */
6663
6664         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6665
6666         return rc;
6667 }
6668
6669 static int
6670 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6671                 u8 *eebuf)
6672 {
6673         struct bnx2 *bp = netdev_priv(dev);
6674         int rc;
6675
6676         if (!netif_running(dev))
6677                 return -EAGAIN;
6678
6679         /* parameters already validated in ethtool_set_eeprom */
6680
6681         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6682
6683         return rc;
6684 }
6685
6686 static int
6687 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6688 {
6689         struct bnx2 *bp = netdev_priv(dev);
6690
6691         memset(coal, 0, sizeof(struct ethtool_coalesce));
6692
6693         coal->rx_coalesce_usecs = bp->rx_ticks;
6694         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6695         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6696         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6697
6698         coal->tx_coalesce_usecs = bp->tx_ticks;
6699         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6700         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6701         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6702
6703         coal->stats_block_coalesce_usecs = bp->stats_ticks;
6704
6705         return 0;
6706 }
6707
6708 static int
6709 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6710 {
6711         struct bnx2 *bp = netdev_priv(dev);
6712
6713         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6714         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6715
6716         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6717         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6718
6719         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6720         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6721
6722         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6723         if (bp->rx_quick_cons_trip_int > 0xff)
6724                 bp->rx_quick_cons_trip_int = 0xff;
6725
6726         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6727         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6728
6729         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6730         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6731
6732         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6733         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6734
6735         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6736         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6737                 0xff;
6738
6739         bp->stats_ticks = coal->stats_block_coalesce_usecs;
6740         if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6741                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6742                         bp->stats_ticks = USEC_PER_SEC;
6743         }
6744         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6745                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6746         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6747
6748         if (netif_running(bp->dev)) {
6749                 bnx2_netif_stop(bp);
6750                 bnx2_init_nic(bp, 0);
6751                 bnx2_netif_start(bp);
6752         }
6753
6754         return 0;
6755 }
6756
6757 static void
6758 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6759 {
6760         struct bnx2 *bp = netdev_priv(dev);
6761
6762         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6763         ering->rx_mini_max_pending = 0;
6764         ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6765
6766         ering->rx_pending = bp->rx_ring_size;
6767         ering->rx_mini_pending = 0;
6768         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6769
6770         ering->tx_max_pending = MAX_TX_DESC_CNT;
6771         ering->tx_pending = bp->tx_ring_size;
6772 }
6773
6774 static int
6775 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6776 {
6777         if (netif_running(bp->dev)) {
6778                 bnx2_netif_stop(bp);
6779                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6780                 bnx2_free_skbs(bp);
6781                 bnx2_free_mem(bp);
6782         }
6783
6784         bnx2_set_rx_ring_size(bp, rx);
6785         bp->tx_ring_size = tx;
6786
6787         if (netif_running(bp->dev)) {
6788                 int rc;
6789
6790                 rc = bnx2_alloc_mem(bp);
6791                 if (rc)
6792                         return rc;
6793                 bnx2_init_nic(bp, 0);
6794                 bnx2_netif_start(bp);
6795         }
6796         return 0;
6797 }
6798
6799 static int
6800 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6801 {
6802         struct bnx2 *bp = netdev_priv(dev);
6803         int rc;
6804
6805         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6806                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6807                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6808
6809                 return -EINVAL;
6810         }
6811         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6812         return rc;
6813 }
6814
6815 static void
6816 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6817 {
6818         struct bnx2 *bp = netdev_priv(dev);
6819
6820         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6821         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6822         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6823 }
6824
6825 static int
6826 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6827 {
6828         struct bnx2 *bp = netdev_priv(dev);
6829
6830         bp->req_flow_ctrl = 0;
6831         if (epause->rx_pause)
6832                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6833         if (epause->tx_pause)
6834                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6835
6836         if (epause->autoneg) {
6837                 bp->autoneg |= AUTONEG_FLOW_CTRL;
6838         }
6839         else {
6840                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6841         }
6842
6843         if (netif_running(dev)) {
6844                 spin_lock_bh(&bp->phy_lock);
6845                 bnx2_setup_phy(bp, bp->phy_port);
6846                 spin_unlock_bh(&bp->phy_lock);
6847         }
6848
6849         return 0;
6850 }
6851
6852 static u32
6853 bnx2_get_rx_csum(struct net_device *dev)
6854 {
6855         struct bnx2 *bp = netdev_priv(dev);
6856
6857         return bp->rx_csum;
6858 }
6859
6860 static int
6861 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6862 {
6863         struct bnx2 *bp = netdev_priv(dev);
6864
6865         bp->rx_csum = data;
6866         return 0;
6867 }
6868
6869 static int
6870 bnx2_set_tso(struct net_device *dev, u32 data)
6871 {
6872         struct bnx2 *bp = netdev_priv(dev);
6873
6874         if (data) {
6875                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6876                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6877                         dev->features |= NETIF_F_TSO6;
6878         } else
6879                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6880                                    NETIF_F_TSO_ECN);
6881         return 0;
6882 }
6883
6884 #define BNX2_NUM_STATS 46
6885
6886 static struct {
6887         char string[ETH_GSTRING_LEN];
6888 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6889         { "rx_bytes" },
6890         { "rx_error_bytes" },
6891         { "tx_bytes" },
6892         { "tx_error_bytes" },
6893         { "rx_ucast_packets" },
6894         { "rx_mcast_packets" },
6895         { "rx_bcast_packets" },
6896         { "tx_ucast_packets" },
6897         { "tx_mcast_packets" },
6898         { "tx_bcast_packets" },
6899         { "tx_mac_errors" },
6900         { "tx_carrier_errors" },
6901         { "rx_crc_errors" },
6902         { "rx_align_errors" },
6903         { "tx_single_collisions" },
6904         { "tx_multi_collisions" },
6905         { "tx_deferred" },
6906         { "tx_excess_collisions" },
6907         { "tx_late_collisions" },
6908         { "tx_total_collisions" },
6909         { "rx_fragments" },
6910         { "rx_jabbers" },
6911         { "rx_undersize_packets" },
6912         { "rx_oversize_packets" },
6913         { "rx_64_byte_packets" },
6914         { "rx_65_to_127_byte_packets" },
6915         { "rx_128_to_255_byte_packets" },
6916         { "rx_256_to_511_byte_packets" },
6917         { "rx_512_to_1023_byte_packets" },
6918         { "rx_1024_to_1522_byte_packets" },
6919         { "rx_1523_to_9022_byte_packets" },
6920         { "tx_64_byte_packets" },
6921         { "tx_65_to_127_byte_packets" },
6922         { "tx_128_to_255_byte_packets" },
6923         { "tx_256_to_511_byte_packets" },
6924         { "tx_512_to_1023_byte_packets" },
6925         { "tx_1024_to_1522_byte_packets" },
6926         { "tx_1523_to_9022_byte_packets" },
6927         { "rx_xon_frames" },
6928         { "rx_xoff_frames" },
6929         { "tx_xon_frames" },
6930         { "tx_xoff_frames" },
6931         { "rx_mac_ctrl_frames" },
6932         { "rx_filtered_packets" },
6933         { "rx_discards" },
6934         { "rx_fw_discards" },
6935 };
6936
6937 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6938
6939 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6940     STATS_OFFSET32(stat_IfHCInOctets_hi),
6941     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6942     STATS_OFFSET32(stat_IfHCOutOctets_hi),
6943     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6944     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6945     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6946     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6947     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6948     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6949     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6950     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6951     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6952     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6953     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6954     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6955     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6956     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6957     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6958     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6959     STATS_OFFSET32(stat_EtherStatsCollisions),
6960     STATS_OFFSET32(stat_EtherStatsFragments),
6961     STATS_OFFSET32(stat_EtherStatsJabbers),
6962     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6963     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6964     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6965     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6966     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6967     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6968     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6969     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6970     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6971     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6972     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6973     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6974     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6975     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6976     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6977     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6978     STATS_OFFSET32(stat_XonPauseFramesReceived),
6979     STATS_OFFSET32(stat_XoffPauseFramesReceived),
6980     STATS_OFFSET32(stat_OutXonSent),
6981     STATS_OFFSET32(stat_OutXoffSent),
6982     STATS_OFFSET32(stat_MacControlFramesReceived),
6983     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6984     STATS_OFFSET32(stat_IfInMBUFDiscards),
6985     STATS_OFFSET32(stat_FwRxDrop),
6986 };
6987
6988 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6989  * skipped because of errata.
6990  */
6991 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6992         8,0,8,8,8,8,8,8,8,8,
6993         4,0,4,4,4,4,4,4,4,4,
6994         4,4,4,4,4,4,4,4,4,4,
6995         4,4,4,4,4,4,4,4,4,4,
6996         4,4,4,4,4,4,
6997 };
6998
6999 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7000         8,0,8,8,8,8,8,8,8,8,
7001         4,4,4,4,4,4,4,4,4,4,
7002         4,4,4,4,4,4,4,4,4,4,
7003         4,4,4,4,4,4,4,4,4,4,
7004         4,4,4,4,4,4,
7005 };
7006
7007 #define BNX2_NUM_TESTS 6
7008
7009 static struct {
7010         char string[ETH_GSTRING_LEN];
7011 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7012         { "register_test (offline)" },
7013         { "memory_test (offline)" },
7014         { "loopback_test (offline)" },
7015         { "nvram_test (online)" },
7016         { "interrupt_test (online)" },
7017         { "link_test (online)" },
7018 };
7019
7020 static int
7021 bnx2_get_sset_count(struct net_device *dev, int sset)
7022 {
7023         switch (sset) {
7024         case ETH_SS_TEST:
7025                 return BNX2_NUM_TESTS;
7026         case ETH_SS_STATS:
7027                 return BNX2_NUM_STATS;
7028         default:
7029                 return -EOPNOTSUPP;
7030         }
7031 }
7032
7033 static void
7034 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7035 {
7036         struct bnx2 *bp = netdev_priv(dev);
7037
7038         bnx2_set_power_state(bp, PCI_D0);
7039
7040         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7041         if (etest->flags & ETH_TEST_FL_OFFLINE) {
7042                 int i;
7043
7044                 bnx2_netif_stop(bp);
7045                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7046                 bnx2_free_skbs(bp);
7047
7048                 if (bnx2_test_registers(bp) != 0) {
7049                         buf[0] = 1;
7050                         etest->flags |= ETH_TEST_FL_FAILED;
7051                 }
7052                 if (bnx2_test_memory(bp) != 0) {
7053                         buf[1] = 1;
7054                         etest->flags |= ETH_TEST_FL_FAILED;
7055                 }
7056                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7057                         etest->flags |= ETH_TEST_FL_FAILED;
7058
7059                 if (!netif_running(bp->dev))
7060                         bnx2_shutdown_chip(bp);
7061                 else {
7062                         bnx2_init_nic(bp, 1);
7063                         bnx2_netif_start(bp);
7064                 }
7065
7066                 /* wait for link up */
7067                 for (i = 0; i < 7; i++) {
7068                         if (bp->link_up)
7069                                 break;
7070                         msleep_interruptible(1000);
7071                 }
7072         }
7073
7074         if (bnx2_test_nvram(bp) != 0) {
7075                 buf[3] = 1;
7076                 etest->flags |= ETH_TEST_FL_FAILED;
7077         }
7078         if (bnx2_test_intr(bp) != 0) {
7079                 buf[4] = 1;
7080                 etest->flags |= ETH_TEST_FL_FAILED;
7081         }
7082
7083         if (bnx2_test_link(bp) != 0) {
7084                 buf[5] = 1;
7085                 etest->flags |= ETH_TEST_FL_FAILED;
7086
7087         }
7088         if (!netif_running(bp->dev))
7089                 bnx2_set_power_state(bp, PCI_D3hot);
7090 }
7091
7092 static void
7093 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7094 {
7095         switch (stringset) {
7096         case ETH_SS_STATS:
7097                 memcpy(buf, bnx2_stats_str_arr,
7098                         sizeof(bnx2_stats_str_arr));
7099                 break;
7100         case ETH_SS_TEST:
7101                 memcpy(buf, bnx2_tests_str_arr,
7102                         sizeof(bnx2_tests_str_arr));
7103                 break;
7104         }
7105 }
7106
7107 static void
7108 bnx2_get_ethtool_stats(struct net_device *dev,
7109                 struct ethtool_stats *stats, u64 *buf)
7110 {
7111         struct bnx2 *bp = netdev_priv(dev);
7112         int i;
7113         u32 *hw_stats = (u32 *) bp->stats_blk;
7114         u8 *stats_len_arr = NULL;
7115
7116         if (hw_stats == NULL) {
7117                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7118                 return;
7119         }
7120
7121         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7122             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7123             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7124             (CHIP_ID(bp) == CHIP_ID_5708_A0))
7125                 stats_len_arr = bnx2_5706_stats_len_arr;
7126         else
7127                 stats_len_arr = bnx2_5708_stats_len_arr;
7128
7129         for (i = 0; i < BNX2_NUM_STATS; i++) {
7130                 if (stats_len_arr[i] == 0) {
7131                         /* skip this counter */
7132                         buf[i] = 0;
7133                         continue;
7134                 }
7135                 if (stats_len_arr[i] == 4) {
7136                         /* 4-byte counter */
7137                         buf[i] = (u64)
7138                                 *(hw_stats + bnx2_stats_offset_arr[i]);
7139                         continue;
7140                 }
7141                 /* 8-byte counter */
7142                 buf[i] = (((u64) *(hw_stats +
7143                                         bnx2_stats_offset_arr[i])) << 32) +
7144                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
7145         }
7146 }
7147
7148 static int
7149 bnx2_phys_id(struct net_device *dev, u32 data)
7150 {
7151         struct bnx2 *bp = netdev_priv(dev);
7152         int i;
7153         u32 save;
7154
7155         bnx2_set_power_state(bp, PCI_D0);
7156
7157         if (data == 0)
7158                 data = 2;
7159
7160         save = REG_RD(bp, BNX2_MISC_CFG);
7161         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7162
7163         for (i = 0; i < (data * 2); i++) {
7164                 if ((i % 2) == 0) {
7165                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7166                 }
7167                 else {
7168                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7169                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
7170                                 BNX2_EMAC_LED_100MB_OVERRIDE |
7171                                 BNX2_EMAC_LED_10MB_OVERRIDE |
7172                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7173                                 BNX2_EMAC_LED_TRAFFIC);
7174                 }
7175                 msleep_interruptible(500);
7176                 if (signal_pending(current))
7177                         break;
7178         }
7179         REG_WR(bp, BNX2_EMAC_LED, 0);
7180         REG_WR(bp, BNX2_MISC_CFG, save);
7181
7182         if (!netif_running(dev))
7183                 bnx2_set_power_state(bp, PCI_D3hot);
7184
7185         return 0;
7186 }
7187
7188 static int
7189 bnx2_set_tx_csum(struct net_device *dev, u32 data)
7190 {
7191         struct bnx2 *bp = netdev_priv(dev);
7192
7193         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7194                 return (ethtool_op_set_tx_ipv6_csum(dev, data));
7195         else
7196                 return (ethtool_op_set_tx_csum(dev, data));
7197 }
7198
7199 static const struct ethtool_ops bnx2_ethtool_ops = {
7200         .get_settings           = bnx2_get_settings,
7201         .set_settings           = bnx2_set_settings,
7202         .get_drvinfo            = bnx2_get_drvinfo,
7203         .get_regs_len           = bnx2_get_regs_len,
7204         .get_regs               = bnx2_get_regs,
7205         .get_wol                = bnx2_get_wol,
7206         .set_wol                = bnx2_set_wol,
7207         .nway_reset             = bnx2_nway_reset,
7208         .get_link               = ethtool_op_get_link,
7209         .get_eeprom_len         = bnx2_get_eeprom_len,
7210         .get_eeprom             = bnx2_get_eeprom,
7211         .set_eeprom             = bnx2_set_eeprom,
7212         .get_coalesce           = bnx2_get_coalesce,
7213         .set_coalesce           = bnx2_set_coalesce,
7214         .get_ringparam          = bnx2_get_ringparam,
7215         .set_ringparam          = bnx2_set_ringparam,
7216         .get_pauseparam         = bnx2_get_pauseparam,
7217         .set_pauseparam         = bnx2_set_pauseparam,
7218         .get_rx_csum            = bnx2_get_rx_csum,
7219         .set_rx_csum            = bnx2_set_rx_csum,
7220         .set_tx_csum            = bnx2_set_tx_csum,
7221         .set_sg                 = ethtool_op_set_sg,
7222         .set_tso                = bnx2_set_tso,
7223         .self_test              = bnx2_self_test,
7224         .get_strings            = bnx2_get_strings,
7225         .phys_id                = bnx2_phys_id,
7226         .get_ethtool_stats      = bnx2_get_ethtool_stats,
7227         .get_sset_count         = bnx2_get_sset_count,
7228 };
7229
7230 /* Called with rtnl_lock */
7231 static int
7232 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7233 {
7234         struct mii_ioctl_data *data = if_mii(ifr);
7235         struct bnx2 *bp = netdev_priv(dev);
7236         int err;
7237
7238         switch(cmd) {
7239         case SIOCGMIIPHY:
7240                 data->phy_id = bp->phy_addr;
7241
7242                 /* fallthru */
7243         case SIOCGMIIREG: {
7244                 u32 mii_regval;
7245
7246                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7247                         return -EOPNOTSUPP;
7248
7249                 if (!netif_running(dev))
7250                         return -EAGAIN;
7251
7252                 spin_lock_bh(&bp->phy_lock);
7253                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7254                 spin_unlock_bh(&bp->phy_lock);
7255
7256                 data->val_out = mii_regval;
7257
7258                 return err;
7259         }
7260
7261         case SIOCSMIIREG:
7262                 if (!capable(CAP_NET_ADMIN))
7263                         return -EPERM;
7264
7265                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7266                         return -EOPNOTSUPP;
7267
7268                 if (!netif_running(dev))
7269                         return -EAGAIN;
7270
7271                 spin_lock_bh(&bp->phy_lock);
7272                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7273                 spin_unlock_bh(&bp->phy_lock);
7274
7275                 return err;
7276
7277         default:
7278                 /* do nothing */
7279                 break;
7280         }
7281         return -EOPNOTSUPP;
7282 }
7283
7284 /* Called with rtnl_lock */
7285 static int
7286 bnx2_change_mac_addr(struct net_device *dev, void *p)
7287 {
7288         struct sockaddr *addr = p;
7289         struct bnx2 *bp = netdev_priv(dev);
7290
7291         if (!is_valid_ether_addr(addr->sa_data))
7292                 return -EINVAL;
7293
7294         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7295         if (netif_running(dev))
7296                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7297
7298         return 0;
7299 }
7300
7301 /* Called with rtnl_lock */
7302 static int
7303 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7304 {
7305         struct bnx2 *bp = netdev_priv(dev);
7306
7307         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7308                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7309                 return -EINVAL;
7310
7311         dev->mtu = new_mtu;
7312         return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7313 }
7314
7315 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7316 static void
7317 poll_bnx2(struct net_device *dev)
7318 {
7319         struct bnx2 *bp = netdev_priv(dev);
7320         int i;
7321
7322         for (i = 0; i < bp->irq_nvecs; i++) {
7323                 disable_irq(bp->irq_tbl[i].vector);
7324                 bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]);
7325                 enable_irq(bp->irq_tbl[i].vector);
7326         }
7327 }
7328 #endif
7329
7330 static void __devinit
7331 bnx2_get_5709_media(struct bnx2 *bp)
7332 {
7333         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7334         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7335         u32 strap;
7336
7337         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7338                 return;
7339         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7340                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7341                 return;
7342         }
7343
7344         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7345                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7346         else
7347                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7348
7349         if (PCI_FUNC(bp->pdev->devfn) == 0) {
7350                 switch (strap) {
7351                 case 0x4:
7352                 case 0x5:
7353                 case 0x6:
7354                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7355                         return;
7356                 }
7357         } else {
7358                 switch (strap) {
7359                 case 0x1:
7360                 case 0x2:
7361                 case 0x4:
7362                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7363                         return;
7364                 }
7365         }
7366 }
7367
7368 static void __devinit
7369 bnx2_get_pci_speed(struct bnx2 *bp)
7370 {
7371         u32 reg;
7372
7373         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7374         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7375                 u32 clkreg;
7376
7377                 bp->flags |= BNX2_FLAG_PCIX;
7378
7379                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7380
7381                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7382                 switch (clkreg) {
7383                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7384                         bp->bus_speed_mhz = 133;
7385                         break;
7386
7387                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7388                         bp->bus_speed_mhz = 100;
7389                         break;
7390
7391                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7392                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7393                         bp->bus_speed_mhz = 66;
7394                         break;
7395
7396                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7397                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7398                         bp->bus_speed_mhz = 50;
7399                         break;
7400
7401                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7402                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7403                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7404                         bp->bus_speed_mhz = 33;
7405                         break;
7406                 }
7407         }
7408         else {
7409                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7410                         bp->bus_speed_mhz = 66;
7411                 else
7412                         bp->bus_speed_mhz = 33;
7413         }
7414
7415         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7416                 bp->flags |= BNX2_FLAG_PCI_32BIT;
7417
7418 }
7419
7420 static int __devinit
7421 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7422 {
7423         struct bnx2 *bp;
7424         unsigned long mem_len;
7425         int rc, i, j;
7426         u32 reg;
7427         u64 dma_mask, persist_dma_mask;
7428
7429         SET_NETDEV_DEV(dev, &pdev->dev);
7430         bp = netdev_priv(dev);
7431
7432         bp->flags = 0;
7433         bp->phy_flags = 0;
7434
7435         /* enable device (incl. PCI PM wakeup), and bus-mastering */
7436         rc = pci_enable_device(pdev);
7437         if (rc) {
7438                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7439                 goto err_out;
7440         }
7441
7442         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7443                 dev_err(&pdev->dev,
7444                         "Cannot find PCI device base address, aborting.\n");
7445                 rc = -ENODEV;
7446                 goto err_out_disable;
7447         }
7448
7449         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7450         if (rc) {
7451                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7452                 goto err_out_disable;
7453         }
7454
7455         pci_set_master(pdev);
7456         pci_save_state(pdev);
7457
7458         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7459         if (bp->pm_cap == 0) {
7460                 dev_err(&pdev->dev,
7461                         "Cannot find power management capability, aborting.\n");
7462                 rc = -EIO;
7463                 goto err_out_release;
7464         }
7465
7466         bp->dev = dev;
7467         bp->pdev = pdev;
7468
7469         spin_lock_init(&bp->phy_lock);
7470         spin_lock_init(&bp->indirect_lock);
7471         INIT_WORK(&bp->reset_task, bnx2_reset_task);
7472
7473         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7474         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS);
7475         dev->mem_end = dev->mem_start + mem_len;
7476         dev->irq = pdev->irq;
7477
7478         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7479
7480         if (!bp->regview) {
7481                 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7482                 rc = -ENOMEM;
7483                 goto err_out_release;
7484         }
7485
7486         /* Configure byte swap and enable write to the reg_window registers.
7487          * Rely on CPU to do target byte swapping on big endian systems
7488          * The chip's target access swapping will not swap all accesses
7489          */
7490         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7491                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7492                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7493
7494         bnx2_set_power_state(bp, PCI_D0);
7495
7496         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7497
7498         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7499                 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7500                         dev_err(&pdev->dev,
7501                                 "Cannot find PCIE capability, aborting.\n");
7502                         rc = -EIO;
7503                         goto err_out_unmap;
7504                 }
7505                 bp->flags |= BNX2_FLAG_PCIE;
7506                 if (CHIP_REV(bp) == CHIP_REV_Ax)
7507                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7508         } else {
7509                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7510                 if (bp->pcix_cap == 0) {
7511                         dev_err(&pdev->dev,
7512                                 "Cannot find PCIX capability, aborting.\n");
7513                         rc = -EIO;
7514                         goto err_out_unmap;
7515                 }
7516         }
7517
7518         if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7519                 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7520                         bp->flags |= BNX2_FLAG_MSIX_CAP;
7521         }
7522
7523         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7524                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7525                         bp->flags |= BNX2_FLAG_MSI_CAP;
7526         }
7527
7528         /* 5708 cannot support DMA addresses > 40-bit.  */
7529         if (CHIP_NUM(bp) == CHIP_NUM_5708)
7530                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
7531         else
7532                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
7533
7534         /* Configure DMA attributes. */
7535         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7536                 dev->features |= NETIF_F_HIGHDMA;
7537                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7538                 if (rc) {
7539                         dev_err(&pdev->dev,
7540                                 "pci_set_consistent_dma_mask failed, aborting.\n");
7541                         goto err_out_unmap;
7542                 }
7543         } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
7544                 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7545                 goto err_out_unmap;
7546         }
7547
7548         if (!(bp->flags & BNX2_FLAG_PCIE))
7549                 bnx2_get_pci_speed(bp);
7550
7551         /* 5706A0 may falsely detect SERR and PERR. */
7552         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7553                 reg = REG_RD(bp, PCI_COMMAND);
7554                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7555                 REG_WR(bp, PCI_COMMAND, reg);
7556         }
7557         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7558                 !(bp->flags & BNX2_FLAG_PCIX)) {
7559
7560                 dev_err(&pdev->dev,
7561                         "5706 A1 can only be used in a PCIX bus, aborting.\n");
7562                 goto err_out_unmap;
7563         }
7564
7565         bnx2_init_nvram(bp);
7566
7567         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7568
7569         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7570             BNX2_SHM_HDR_SIGNATURE_SIG) {
7571                 u32 off = PCI_FUNC(pdev->devfn) << 2;
7572
7573                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7574         } else
7575                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7576
7577         /* Get the permanent MAC address.  First we need to make sure the
7578          * firmware is actually running.
7579          */
7580         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7581
7582         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7583             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7584                 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7585                 rc = -ENODEV;
7586                 goto err_out_unmap;
7587         }
7588
7589         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7590         for (i = 0, j = 0; i < 3; i++) {
7591                 u8 num, k, skip0;
7592
7593                 num = (u8) (reg >> (24 - (i * 8)));
7594                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7595                         if (num >= k || !skip0 || k == 1) {
7596                                 bp->fw_version[j++] = (num / k) + '0';
7597                                 skip0 = 0;
7598                         }
7599                 }
7600                 if (i != 2)
7601                         bp->fw_version[j++] = '.';
7602         }
7603         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
7604         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7605                 bp->wol = 1;
7606
7607         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7608                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
7609
7610                 for (i = 0; i < 30; i++) {
7611                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7612                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7613                                 break;
7614                         msleep(10);
7615                 }
7616         }
7617         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7618         reg &= BNX2_CONDITION_MFW_RUN_MASK;
7619         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7620             reg != BNX2_CONDITION_MFW_RUN_NONE) {
7621                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7622
7623                 bp->fw_version[j++] = ' ';
7624                 for (i = 0; i < 3; i++) {
7625                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
7626                         reg = swab32(reg);
7627                         memcpy(&bp->fw_version[j], &reg, 4);
7628                         j += 4;
7629                 }
7630         }
7631
7632         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
7633         bp->mac_addr[0] = (u8) (reg >> 8);
7634         bp->mac_addr[1] = (u8) reg;
7635
7636         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
7637         bp->mac_addr[2] = (u8) (reg >> 24);
7638         bp->mac_addr[3] = (u8) (reg >> 16);
7639         bp->mac_addr[4] = (u8) (reg >> 8);
7640         bp->mac_addr[5] = (u8) reg;
7641
7642         bp->tx_ring_size = MAX_TX_DESC_CNT;
7643         bnx2_set_rx_ring_size(bp, 255);
7644
7645         bp->rx_csum = 1;
7646
7647         bp->tx_quick_cons_trip_int = 20;
7648         bp->tx_quick_cons_trip = 20;
7649         bp->tx_ticks_int = 80;
7650         bp->tx_ticks = 80;
7651
7652         bp->rx_quick_cons_trip_int = 6;
7653         bp->rx_quick_cons_trip = 6;
7654         bp->rx_ticks_int = 18;
7655         bp->rx_ticks = 18;
7656
7657         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7658
7659         bp->current_interval = BNX2_TIMER_INTERVAL;
7660
7661         bp->phy_addr = 1;
7662
7663         /* Disable WOL support if we are running on a SERDES chip. */
7664         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7665                 bnx2_get_5709_media(bp);
7666         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7667                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7668
7669         bp->phy_port = PORT_TP;
7670         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7671                 bp->phy_port = PORT_FIBRE;
7672                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
7673                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7674                         bp->flags |= BNX2_FLAG_NO_WOL;
7675                         bp->wol = 0;
7676                 }
7677                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7678                         /* Don't do parallel detect on this board because of
7679                          * some board problems.  The link will not go down
7680                          * if we do parallel detect.
7681                          */
7682                         if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7683                             pdev->subsystem_device == 0x310c)
7684                                 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7685                 } else {
7686                         bp->phy_addr = 2;
7687                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7688                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7689                 }
7690         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7691                    CHIP_NUM(bp) == CHIP_NUM_5708)
7692                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
7693         else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7694                  (CHIP_REV(bp) == CHIP_REV_Ax ||
7695                   CHIP_REV(bp) == CHIP_REV_Bx))
7696                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7697
7698         bnx2_init_fw_cap(bp);
7699
7700         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7701             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7702             (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
7703             !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
7704                 bp->flags |= BNX2_FLAG_NO_WOL;
7705                 bp->wol = 0;
7706         }
7707
7708         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7709                 bp->tx_quick_cons_trip_int =
7710                         bp->tx_quick_cons_trip;
7711                 bp->tx_ticks_int = bp->tx_ticks;
7712                 bp->rx_quick_cons_trip_int =
7713                         bp->rx_quick_cons_trip;
7714                 bp->rx_ticks_int = bp->rx_ticks;
7715                 bp->comp_prod_trip_int = bp->comp_prod_trip;
7716                 bp->com_ticks_int = bp->com_ticks;
7717                 bp->cmd_ticks_int = bp->cmd_ticks;
7718         }
7719
7720         /* Disable MSI on 5706 if AMD 8132 bridge is found.
7721          *
7722          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
7723          * with byte enables disabled on the unused 32-bit word.  This is legal
7724          * but causes problems on the AMD 8132 which will eventually stop
7725          * responding after a while.
7726          *
7727          * AMD believes this incompatibility is unique to the 5706, and
7728          * prefers to locally disable MSI rather than globally disabling it.
7729          */
7730         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7731                 struct pci_dev *amd_8132 = NULL;
7732
7733                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7734                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
7735                                                   amd_8132))) {
7736
7737                         if (amd_8132->revision >= 0x10 &&
7738                             amd_8132->revision <= 0x13) {
7739                                 disable_msi = 1;
7740                                 pci_dev_put(amd_8132);
7741                                 break;
7742                         }
7743                 }
7744         }
7745
7746         bnx2_set_default_link(bp);
7747         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7748
7749         init_timer(&bp->timer);
7750         bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
7751         bp->timer.data = (unsigned long) bp;
7752         bp->timer.function = bnx2_timer;
7753
7754         return 0;
7755
7756 err_out_unmap:
7757         if (bp->regview) {
7758                 iounmap(bp->regview);
7759                 bp->regview = NULL;
7760         }
7761
7762 err_out_release:
7763         pci_release_regions(pdev);
7764
7765 err_out_disable:
7766         pci_disable_device(pdev);
7767         pci_set_drvdata(pdev, NULL);
7768
7769 err_out:
7770         return rc;
7771 }
7772
7773 static char * __devinit
7774 bnx2_bus_string(struct bnx2 *bp, char *str)
7775 {
7776         char *s = str;
7777
7778         if (bp->flags & BNX2_FLAG_PCIE) {
7779                 s += sprintf(s, "PCI Express");
7780         } else {
7781                 s += sprintf(s, "PCI");
7782                 if (bp->flags & BNX2_FLAG_PCIX)
7783                         s += sprintf(s, "-X");
7784                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
7785                         s += sprintf(s, " 32-bit");
7786                 else
7787                         s += sprintf(s, " 64-bit");
7788                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7789         }
7790         return str;
7791 }
7792
7793 static void __devinit
7794 bnx2_init_napi(struct bnx2 *bp)
7795 {
7796         int i;
7797
7798         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7799                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
7800                 int (*poll)(struct napi_struct *, int);
7801
7802                 if (i == 0)
7803                         poll = bnx2_poll;
7804                 else
7805                         poll = bnx2_poll_msix;
7806
7807                 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
7808                 bnapi->bp = bp;
7809         }
7810 }
7811
7812 static const struct net_device_ops bnx2_netdev_ops = {
7813         .ndo_open               = bnx2_open,
7814         .ndo_start_xmit         = bnx2_start_xmit,
7815         .ndo_stop               = bnx2_close,
7816         .ndo_get_stats          = bnx2_get_stats,
7817         .ndo_set_rx_mode        = bnx2_set_rx_mode,
7818         .ndo_do_ioctl           = bnx2_ioctl,
7819         .ndo_validate_addr      = eth_validate_addr,
7820         .ndo_set_mac_address    = bnx2_change_mac_addr,
7821         .ndo_change_mtu         = bnx2_change_mtu,
7822         .ndo_tx_timeout         = bnx2_tx_timeout,
7823 #ifdef BCM_VLAN
7824         .ndo_vlan_rx_register   = bnx2_vlan_rx_register,
7825 #endif
7826 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7827         .ndo_poll_controller    = poll_bnx2,
7828 #endif
7829 };
7830
7831 static int __devinit
7832 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7833 {
7834         static int version_printed = 0;
7835         struct net_device *dev = NULL;
7836         struct bnx2 *bp;
7837         int rc;
7838         char str[40];
7839
7840         if (version_printed++ == 0)
7841                 printk(KERN_INFO "%s", version);
7842
7843         /* dev zeroed in init_etherdev */
7844         dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
7845
7846         if (!dev)
7847                 return -ENOMEM;
7848
7849         rc = bnx2_init_board(pdev, dev);
7850         if (rc < 0) {
7851                 free_netdev(dev);
7852                 return rc;
7853         }
7854
7855         dev->netdev_ops = &bnx2_netdev_ops;
7856         dev->watchdog_timeo = TX_TIMEOUT;
7857         dev->ethtool_ops = &bnx2_ethtool_ops;
7858
7859         bp = netdev_priv(dev);
7860         bnx2_init_napi(bp);
7861
7862         pci_set_drvdata(pdev, dev);
7863
7864         rc = bnx2_request_firmware(bp);
7865         if (rc)
7866                 goto error;
7867
7868         memcpy(dev->dev_addr, bp->mac_addr, 6);
7869         memcpy(dev->perm_addr, bp->mac_addr, 6);
7870
7871         dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7872         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7873                 dev->features |= NETIF_F_IPV6_CSUM;
7874
7875 #ifdef BCM_VLAN
7876         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7877 #endif
7878         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7879         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7880                 dev->features |= NETIF_F_TSO6;
7881
7882         if ((rc = register_netdev(dev))) {
7883                 dev_err(&pdev->dev, "Cannot register net device\n");
7884                 goto error;
7885         }
7886
7887         printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7888                 "IRQ %d, node addr %pM\n",
7889                 dev->name,
7890                 board_info[ent->driver_data].name,
7891                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7892                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7893                 bnx2_bus_string(bp, str),
7894                 dev->base_addr,
7895                 bp->pdev->irq, dev->dev_addr);
7896
7897         return 0;
7898
7899 error:
7900         if (bp->mips_firmware)
7901                 release_firmware(bp->mips_firmware);
7902         if (bp->rv2p_firmware)
7903                 release_firmware(bp->rv2p_firmware);
7904
7905         if (bp->regview)
7906                 iounmap(bp->regview);
7907         pci_release_regions(pdev);
7908         pci_disable_device(pdev);
7909         pci_set_drvdata(pdev, NULL);
7910         free_netdev(dev);
7911         return rc;
7912 }
7913
7914 static void __devexit
7915 bnx2_remove_one(struct pci_dev *pdev)
7916 {
7917         struct net_device *dev = pci_get_drvdata(pdev);
7918         struct bnx2 *bp = netdev_priv(dev);
7919
7920         flush_scheduled_work();
7921
7922         unregister_netdev(dev);
7923
7924         if (bp->mips_firmware)
7925                 release_firmware(bp->mips_firmware);
7926         if (bp->rv2p_firmware)
7927                 release_firmware(bp->rv2p_firmware);
7928
7929         if (bp->regview)
7930                 iounmap(bp->regview);
7931
7932         free_netdev(dev);
7933         pci_release_regions(pdev);
7934         pci_disable_device(pdev);
7935         pci_set_drvdata(pdev, NULL);
7936 }
7937
7938 static int
7939 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7940 {
7941         struct net_device *dev = pci_get_drvdata(pdev);
7942         struct bnx2 *bp = netdev_priv(dev);
7943
7944         /* PCI register 4 needs to be saved whether netif_running() or not.
7945          * MSI address and data need to be saved if using MSI and
7946          * netif_running().
7947          */
7948         pci_save_state(pdev);
7949         if (!netif_running(dev))
7950                 return 0;
7951
7952         flush_scheduled_work();
7953         bnx2_netif_stop(bp);
7954         netif_device_detach(dev);
7955         del_timer_sync(&bp->timer);
7956         bnx2_shutdown_chip(bp);
7957         bnx2_free_skbs(bp);
7958         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7959         return 0;
7960 }
7961
7962 static int
7963 bnx2_resume(struct pci_dev *pdev)
7964 {
7965         struct net_device *dev = pci_get_drvdata(pdev);
7966         struct bnx2 *bp = netdev_priv(dev);
7967
7968         pci_restore_state(pdev);
7969         if (!netif_running(dev))
7970                 return 0;
7971
7972         bnx2_set_power_state(bp, PCI_D0);
7973         netif_device_attach(dev);
7974         bnx2_init_nic(bp, 1);
7975         bnx2_netif_start(bp);
7976         return 0;
7977 }
7978
7979 /**
7980  * bnx2_io_error_detected - called when PCI error is detected
7981  * @pdev: Pointer to PCI device
7982  * @state: The current pci connection state
7983  *
7984  * This function is called after a PCI bus error affecting
7985  * this device has been detected.
7986  */
7987 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
7988                                                pci_channel_state_t state)
7989 {
7990         struct net_device *dev = pci_get_drvdata(pdev);
7991         struct bnx2 *bp = netdev_priv(dev);
7992
7993         rtnl_lock();
7994         netif_device_detach(dev);
7995
7996         if (netif_running(dev)) {
7997                 bnx2_netif_stop(bp);
7998                 del_timer_sync(&bp->timer);
7999                 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8000         }
8001
8002         pci_disable_device(pdev);
8003         rtnl_unlock();
8004
8005         /* Request a slot slot reset. */
8006         return PCI_ERS_RESULT_NEED_RESET;
8007 }
8008
8009 /**
8010  * bnx2_io_slot_reset - called after the pci bus has been reset.
8011  * @pdev: Pointer to PCI device
8012  *
8013  * Restart the card from scratch, as if from a cold-boot.
8014  */
8015 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8016 {
8017         struct net_device *dev = pci_get_drvdata(pdev);
8018         struct bnx2 *bp = netdev_priv(dev);
8019
8020         rtnl_lock();
8021         if (pci_enable_device(pdev)) {
8022                 dev_err(&pdev->dev,
8023                         "Cannot re-enable PCI device after reset.\n");
8024                 rtnl_unlock();
8025                 return PCI_ERS_RESULT_DISCONNECT;
8026         }
8027         pci_set_master(pdev);
8028         pci_restore_state(pdev);
8029
8030         if (netif_running(dev)) {
8031                 bnx2_set_power_state(bp, PCI_D0);
8032                 bnx2_init_nic(bp, 1);
8033         }
8034
8035         rtnl_unlock();
8036         return PCI_ERS_RESULT_RECOVERED;
8037 }
8038
8039 /**
8040  * bnx2_io_resume - called when traffic can start flowing again.
8041  * @pdev: Pointer to PCI device
8042  *
8043  * This callback is called when the error recovery driver tells us that
8044  * its OK to resume normal operation.
8045  */
8046 static void bnx2_io_resume(struct pci_dev *pdev)
8047 {
8048         struct net_device *dev = pci_get_drvdata(pdev);
8049         struct bnx2 *bp = netdev_priv(dev);
8050
8051         rtnl_lock();
8052         if (netif_running(dev))
8053                 bnx2_netif_start(bp);
8054
8055         netif_device_attach(dev);
8056         rtnl_unlock();
8057 }
8058
8059 static struct pci_error_handlers bnx2_err_handler = {
8060         .error_detected = bnx2_io_error_detected,
8061         .slot_reset     = bnx2_io_slot_reset,
8062         .resume         = bnx2_io_resume,
8063 };
8064
8065 static struct pci_driver bnx2_pci_driver = {
8066         .name           = DRV_MODULE_NAME,
8067         .id_table       = bnx2_pci_tbl,
8068         .probe          = bnx2_init_one,
8069         .remove         = __devexit_p(bnx2_remove_one),
8070         .suspend        = bnx2_suspend,
8071         .resume         = bnx2_resume,
8072         .err_handler    = &bnx2_err_handler,
8073 };
8074
8075 static int __init bnx2_init(void)
8076 {
8077         return pci_register_driver(&bnx2_pci_driver);
8078 }
8079
8080 static void __exit bnx2_cleanup(void)
8081 {
8082         pci_unregister_driver(&bnx2_pci_driver);
8083 }
8084
8085 module_init(bnx2_init);
8086 module_exit(bnx2_cleanup);
8087
8088
8089