[BNX2]: Refine tx coalescing setup.
[linux-2.6] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2008 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define FW_BUF_SIZE             0x10000
56
57 #define DRV_MODULE_NAME         "bnx2"
58 #define PFX DRV_MODULE_NAME     ": "
59 #define DRV_MODULE_VERSION      "1.7.2"
60 #define DRV_MODULE_RELDATE      "January 21, 2008"
61
62 #define RUN_AT(x) (jiffies + (x))
63
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT  (5*HZ)
66
67 static const char version[] __devinitdata =
68         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
74
75 static int disable_msi = 0;
76
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80 typedef enum {
81         BCM5706 = 0,
82         NC370T,
83         NC370I,
84         BCM5706S,
85         NC370F,
86         BCM5708,
87         BCM5708S,
88         BCM5709,
89         BCM5709S,
90 } board_t;
91
92 /* indexed by board_t, above */
93 static const struct {
94         char *name;
95 } board_info[] __devinitdata = {
96         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97         { "HP NC370T Multifunction Gigabit Server Adapter" },
98         { "HP NC370i Multifunction Gigabit Server Adapter" },
99         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100         { "HP NC370F Multifunction Gigabit Server Adapter" },
101         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
105         };
106
107 static struct pci_device_id bnx2_pci_tbl[] = {
108         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
126         { 0, }
127 };
128
129 static struct flash_spec flash_table[] =
130 {
131 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
133         /* Slow EEPROM */
134         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
137          "EEPROM - slow"},
138         /* Expansion entry 0001 */
139         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
142          "Entry 0001"},
143         /* Saifun SA25F010 (non-buffered flash) */
144         /* strap, cfg1, & write1 need updates */
145         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148          "Non-buffered flash (128kB)"},
149         /* Saifun SA25F020 (non-buffered flash) */
150         /* strap, cfg1, & write1 need updates */
151         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154          "Non-buffered flash (256kB)"},
155         /* Expansion entry 0100 */
156         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159          "Entry 0100"},
160         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170         /* Saifun SA25F005 (non-buffered flash) */
171         /* strap, cfg1, & write1 need updates */
172         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175          "Non-buffered flash (64kB)"},
176         /* Fast EEPROM */
177         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180          "EEPROM - fast"},
181         /* Expansion entry 1001 */
182         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185          "Entry 1001"},
186         /* Expansion entry 1010 */
187         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190          "Entry 1010"},
191         /* ATMEL AT45DB011B (buffered flash) */
192         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195          "Buffered flash (128kB)"},
196         /* Expansion entry 1100 */
197         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200          "Entry 1100"},
201         /* Expansion entry 1101 */
202         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1101"},
206         /* Ateml Expansion entry 1110 */
207         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1110 (Atmel)"},
211         /* ATMEL AT45DB021B (buffered flash) */
212         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215          "Buffered flash (256kB)"},
216 };
217
218 static struct flash_spec flash_5709 = {
219         .flags          = BNX2_NV_BUFFERED,
220         .page_bits      = BCM5709_FLASH_PAGE_BITS,
221         .page_size      = BCM5709_FLASH_PAGE_SIZE,
222         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
223         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
224         .name           = "5709 Buffered flash (256kB)",
225 };
226
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
228
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_napi *bnapi)
230 {
231         u32 diff;
232
233         smp_mb();
234
235         /* The ring uses 256 indices for 255 entries, one of them
236          * needs to be skipped.
237          */
238         diff = bp->tx_prod - bnapi->tx_cons;
239         if (unlikely(diff >= TX_DESC_CNT)) {
240                 diff &= 0xffff;
241                 if (diff == TX_DESC_CNT)
242                         diff = MAX_TX_DESC_CNT;
243         }
244         return (bp->tx_ring_size - diff);
245 }
246
247 static u32
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
249 {
250         u32 val;
251
252         spin_lock_bh(&bp->indirect_lock);
253         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255         spin_unlock_bh(&bp->indirect_lock);
256         return val;
257 }
258
259 static void
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
261 {
262         spin_lock_bh(&bp->indirect_lock);
263         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265         spin_unlock_bh(&bp->indirect_lock);
266 }
267
268 static void
269 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
270 {
271         offset += cid_addr;
272         spin_lock_bh(&bp->indirect_lock);
273         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
274                 int i;
275
276                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
277                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
278                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
279                 for (i = 0; i < 5; i++) {
280                         u32 val;
281                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
282                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
283                                 break;
284                         udelay(5);
285                 }
286         } else {
287                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
288                 REG_WR(bp, BNX2_CTX_DATA, val);
289         }
290         spin_unlock_bh(&bp->indirect_lock);
291 }
292
293 static int
294 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
295 {
296         u32 val1;
297         int i, ret;
298
299         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
300                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
302
303                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
304                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
305
306                 udelay(40);
307         }
308
309         val1 = (bp->phy_addr << 21) | (reg << 16) |
310                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
311                 BNX2_EMAC_MDIO_COMM_START_BUSY;
312         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
313
314         for (i = 0; i < 50; i++) {
315                 udelay(10);
316
317                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
318                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
319                         udelay(5);
320
321                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
323
324                         break;
325                 }
326         }
327
328         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
329                 *val = 0x0;
330                 ret = -EBUSY;
331         }
332         else {
333                 *val = val1;
334                 ret = 0;
335         }
336
337         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
338                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
340
341                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
342                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
343
344                 udelay(40);
345         }
346
347         return ret;
348 }
349
350 static int
351 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
352 {
353         u32 val1;
354         int i, ret;
355
356         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
357                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
359
360                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
362
363                 udelay(40);
364         }
365
366         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
367                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
368                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
369         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
370
371         for (i = 0; i < 50; i++) {
372                 udelay(10);
373
374                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
375                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
376                         udelay(5);
377                         break;
378                 }
379         }
380
381         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
382                 ret = -EBUSY;
383         else
384                 ret = 0;
385
386         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
387                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
389
390                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
391                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
392
393                 udelay(40);
394         }
395
396         return ret;
397 }
398
399 static void
400 bnx2_disable_int(struct bnx2 *bp)
401 {
402         int i;
403         struct bnx2_napi *bnapi;
404
405         for (i = 0; i < bp->irq_nvecs; i++) {
406                 bnapi = &bp->bnx2_napi[i];
407                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
408                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
409         }
410         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
411 }
412
413 static void
414 bnx2_enable_int(struct bnx2 *bp)
415 {
416         int i;
417         struct bnx2_napi *bnapi;
418
419         for (i = 0; i < bp->irq_nvecs; i++) {
420                 bnapi = &bp->bnx2_napi[i];
421
422                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
423                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
424                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
425                        bnapi->last_status_idx);
426
427                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
428                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
429                        bnapi->last_status_idx);
430         }
431         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
432 }
433
434 static void
435 bnx2_disable_int_sync(struct bnx2 *bp)
436 {
437         int i;
438
439         atomic_inc(&bp->intr_sem);
440         bnx2_disable_int(bp);
441         for (i = 0; i < bp->irq_nvecs; i++)
442                 synchronize_irq(bp->irq_tbl[i].vector);
443 }
444
445 static void
446 bnx2_napi_disable(struct bnx2 *bp)
447 {
448         int i;
449
450         for (i = 0; i < bp->irq_nvecs; i++)
451                 napi_disable(&bp->bnx2_napi[i].napi);
452 }
453
454 static void
455 bnx2_napi_enable(struct bnx2 *bp)
456 {
457         int i;
458
459         for (i = 0; i < bp->irq_nvecs; i++)
460                 napi_enable(&bp->bnx2_napi[i].napi);
461 }
462
463 static void
464 bnx2_netif_stop(struct bnx2 *bp)
465 {
466         bnx2_disable_int_sync(bp);
467         if (netif_running(bp->dev)) {
468                 bnx2_napi_disable(bp);
469                 netif_tx_disable(bp->dev);
470                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
471         }
472 }
473
474 static void
475 bnx2_netif_start(struct bnx2 *bp)
476 {
477         if (atomic_dec_and_test(&bp->intr_sem)) {
478                 if (netif_running(bp->dev)) {
479                         netif_wake_queue(bp->dev);
480                         bnx2_napi_enable(bp);
481                         bnx2_enable_int(bp);
482                 }
483         }
484 }
485
486 static void
487 bnx2_free_mem(struct bnx2 *bp)
488 {
489         int i;
490
491         for (i = 0; i < bp->ctx_pages; i++) {
492                 if (bp->ctx_blk[i]) {
493                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
494                                             bp->ctx_blk[i],
495                                             bp->ctx_blk_mapping[i]);
496                         bp->ctx_blk[i] = NULL;
497                 }
498         }
499         if (bp->status_blk) {
500                 pci_free_consistent(bp->pdev, bp->status_stats_size,
501                                     bp->status_blk, bp->status_blk_mapping);
502                 bp->status_blk = NULL;
503                 bp->stats_blk = NULL;
504         }
505         if (bp->tx_desc_ring) {
506                 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
507                                     bp->tx_desc_ring, bp->tx_desc_mapping);
508                 bp->tx_desc_ring = NULL;
509         }
510         kfree(bp->tx_buf_ring);
511         bp->tx_buf_ring = NULL;
512         for (i = 0; i < bp->rx_max_ring; i++) {
513                 if (bp->rx_desc_ring[i])
514                         pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
515                                             bp->rx_desc_ring[i],
516                                             bp->rx_desc_mapping[i]);
517                 bp->rx_desc_ring[i] = NULL;
518         }
519         vfree(bp->rx_buf_ring);
520         bp->rx_buf_ring = NULL;
521         for (i = 0; i < bp->rx_max_pg_ring; i++) {
522                 if (bp->rx_pg_desc_ring[i])
523                         pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
524                                             bp->rx_pg_desc_ring[i],
525                                             bp->rx_pg_desc_mapping[i]);
526                 bp->rx_pg_desc_ring[i] = NULL;
527         }
528         if (bp->rx_pg_ring)
529                 vfree(bp->rx_pg_ring);
530         bp->rx_pg_ring = NULL;
531 }
532
533 static int
534 bnx2_alloc_mem(struct bnx2 *bp)
535 {
536         int i, status_blk_size;
537
538         bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
539         if (bp->tx_buf_ring == NULL)
540                 return -ENOMEM;
541
542         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
543                                                 &bp->tx_desc_mapping);
544         if (bp->tx_desc_ring == NULL)
545                 goto alloc_mem_err;
546
547         bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
548         if (bp->rx_buf_ring == NULL)
549                 goto alloc_mem_err;
550
551         memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
552
553         for (i = 0; i < bp->rx_max_ring; i++) {
554                 bp->rx_desc_ring[i] =
555                         pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
556                                              &bp->rx_desc_mapping[i]);
557                 if (bp->rx_desc_ring[i] == NULL)
558                         goto alloc_mem_err;
559
560         }
561
562         if (bp->rx_pg_ring_size) {
563                 bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
564                                          bp->rx_max_pg_ring);
565                 if (bp->rx_pg_ring == NULL)
566                         goto alloc_mem_err;
567
568                 memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
569                        bp->rx_max_pg_ring);
570         }
571
572         for (i = 0; i < bp->rx_max_pg_ring; i++) {
573                 bp->rx_pg_desc_ring[i] =
574                         pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
575                                              &bp->rx_pg_desc_mapping[i]);
576                 if (bp->rx_pg_desc_ring[i] == NULL)
577                         goto alloc_mem_err;
578
579         }
580
581         /* Combine status and statistics blocks into one allocation. */
582         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
583         if (bp->flags & BNX2_FLAG_MSIX_CAP)
584                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
585                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
586         bp->status_stats_size = status_blk_size +
587                                 sizeof(struct statistics_block);
588
589         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
590                                               &bp->status_blk_mapping);
591         if (bp->status_blk == NULL)
592                 goto alloc_mem_err;
593
594         memset(bp->status_blk, 0, bp->status_stats_size);
595
596         bp->bnx2_napi[0].status_blk = bp->status_blk;
597         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
598                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
599                         struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
600
601                         bnapi->status_blk_msix = (void *)
602                                 ((unsigned long) bp->status_blk +
603                                  BNX2_SBLK_MSIX_ALIGN_SIZE * i);
604                         bnapi->int_num = i << 24;
605                 }
606         }
607
608         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
609                                   status_blk_size);
610
611         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
612
613         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
614                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
615                 if (bp->ctx_pages == 0)
616                         bp->ctx_pages = 1;
617                 for (i = 0; i < bp->ctx_pages; i++) {
618                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
619                                                 BCM_PAGE_SIZE,
620                                                 &bp->ctx_blk_mapping[i]);
621                         if (bp->ctx_blk[i] == NULL)
622                                 goto alloc_mem_err;
623                 }
624         }
625         return 0;
626
627 alloc_mem_err:
628         bnx2_free_mem(bp);
629         return -ENOMEM;
630 }
631
632 static void
633 bnx2_report_fw_link(struct bnx2 *bp)
634 {
635         u32 fw_link_status = 0;
636
637         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
638                 return;
639
640         if (bp->link_up) {
641                 u32 bmsr;
642
643                 switch (bp->line_speed) {
644                 case SPEED_10:
645                         if (bp->duplex == DUPLEX_HALF)
646                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
647                         else
648                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
649                         break;
650                 case SPEED_100:
651                         if (bp->duplex == DUPLEX_HALF)
652                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
653                         else
654                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
655                         break;
656                 case SPEED_1000:
657                         if (bp->duplex == DUPLEX_HALF)
658                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
659                         else
660                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
661                         break;
662                 case SPEED_2500:
663                         if (bp->duplex == DUPLEX_HALF)
664                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
665                         else
666                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
667                         break;
668                 }
669
670                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
671
672                 if (bp->autoneg) {
673                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
674
675                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
676                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
677
678                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
679                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
680                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
681                         else
682                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
683                 }
684         }
685         else
686                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
687
688         REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
689 }
690
691 static char *
692 bnx2_xceiver_str(struct bnx2 *bp)
693 {
694         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
695                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
696                  "Copper"));
697 }
698
699 static void
700 bnx2_report_link(struct bnx2 *bp)
701 {
702         if (bp->link_up) {
703                 netif_carrier_on(bp->dev);
704                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
705                        bnx2_xceiver_str(bp));
706
707                 printk("%d Mbps ", bp->line_speed);
708
709                 if (bp->duplex == DUPLEX_FULL)
710                         printk("full duplex");
711                 else
712                         printk("half duplex");
713
714                 if (bp->flow_ctrl) {
715                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
716                                 printk(", receive ");
717                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
718                                         printk("& transmit ");
719                         }
720                         else {
721                                 printk(", transmit ");
722                         }
723                         printk("flow control ON");
724                 }
725                 printk("\n");
726         }
727         else {
728                 netif_carrier_off(bp->dev);
729                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
730                        bnx2_xceiver_str(bp));
731         }
732
733         bnx2_report_fw_link(bp);
734 }
735
736 static void
737 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
738 {
739         u32 local_adv, remote_adv;
740
741         bp->flow_ctrl = 0;
742         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
743                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
744
745                 if (bp->duplex == DUPLEX_FULL) {
746                         bp->flow_ctrl = bp->req_flow_ctrl;
747                 }
748                 return;
749         }
750
751         if (bp->duplex != DUPLEX_FULL) {
752                 return;
753         }
754
755         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
756             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
757                 u32 val;
758
759                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
760                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
761                         bp->flow_ctrl |= FLOW_CTRL_TX;
762                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
763                         bp->flow_ctrl |= FLOW_CTRL_RX;
764                 return;
765         }
766
767         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
768         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
769
770         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
771                 u32 new_local_adv = 0;
772                 u32 new_remote_adv = 0;
773
774                 if (local_adv & ADVERTISE_1000XPAUSE)
775                         new_local_adv |= ADVERTISE_PAUSE_CAP;
776                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
777                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
778                 if (remote_adv & ADVERTISE_1000XPAUSE)
779                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
780                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
781                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
782
783                 local_adv = new_local_adv;
784                 remote_adv = new_remote_adv;
785         }
786
787         /* See Table 28B-3 of 802.3ab-1999 spec. */
788         if (local_adv & ADVERTISE_PAUSE_CAP) {
789                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
790                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
791                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
792                         }
793                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
794                                 bp->flow_ctrl = FLOW_CTRL_RX;
795                         }
796                 }
797                 else {
798                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
799                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
800                         }
801                 }
802         }
803         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
804                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
805                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
806
807                         bp->flow_ctrl = FLOW_CTRL_TX;
808                 }
809         }
810 }
811
812 static int
813 bnx2_5709s_linkup(struct bnx2 *bp)
814 {
815         u32 val, speed;
816
817         bp->link_up = 1;
818
819         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
820         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
821         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
822
823         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
824                 bp->line_speed = bp->req_line_speed;
825                 bp->duplex = bp->req_duplex;
826                 return 0;
827         }
828         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
829         switch (speed) {
830                 case MII_BNX2_GP_TOP_AN_SPEED_10:
831                         bp->line_speed = SPEED_10;
832                         break;
833                 case MII_BNX2_GP_TOP_AN_SPEED_100:
834                         bp->line_speed = SPEED_100;
835                         break;
836                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
837                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
838                         bp->line_speed = SPEED_1000;
839                         break;
840                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
841                         bp->line_speed = SPEED_2500;
842                         break;
843         }
844         if (val & MII_BNX2_GP_TOP_AN_FD)
845                 bp->duplex = DUPLEX_FULL;
846         else
847                 bp->duplex = DUPLEX_HALF;
848         return 0;
849 }
850
851 static int
852 bnx2_5708s_linkup(struct bnx2 *bp)
853 {
854         u32 val;
855
856         bp->link_up = 1;
857         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
858         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
859                 case BCM5708S_1000X_STAT1_SPEED_10:
860                         bp->line_speed = SPEED_10;
861                         break;
862                 case BCM5708S_1000X_STAT1_SPEED_100:
863                         bp->line_speed = SPEED_100;
864                         break;
865                 case BCM5708S_1000X_STAT1_SPEED_1G:
866                         bp->line_speed = SPEED_1000;
867                         break;
868                 case BCM5708S_1000X_STAT1_SPEED_2G5:
869                         bp->line_speed = SPEED_2500;
870                         break;
871         }
872         if (val & BCM5708S_1000X_STAT1_FD)
873                 bp->duplex = DUPLEX_FULL;
874         else
875                 bp->duplex = DUPLEX_HALF;
876
877         return 0;
878 }
879
880 static int
881 bnx2_5706s_linkup(struct bnx2 *bp)
882 {
883         u32 bmcr, local_adv, remote_adv, common;
884
885         bp->link_up = 1;
886         bp->line_speed = SPEED_1000;
887
888         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
889         if (bmcr & BMCR_FULLDPLX) {
890                 bp->duplex = DUPLEX_FULL;
891         }
892         else {
893                 bp->duplex = DUPLEX_HALF;
894         }
895
896         if (!(bmcr & BMCR_ANENABLE)) {
897                 return 0;
898         }
899
900         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
901         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
902
903         common = local_adv & remote_adv;
904         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
905
906                 if (common & ADVERTISE_1000XFULL) {
907                         bp->duplex = DUPLEX_FULL;
908                 }
909                 else {
910                         bp->duplex = DUPLEX_HALF;
911                 }
912         }
913
914         return 0;
915 }
916
917 static int
918 bnx2_copper_linkup(struct bnx2 *bp)
919 {
920         u32 bmcr;
921
922         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
923         if (bmcr & BMCR_ANENABLE) {
924                 u32 local_adv, remote_adv, common;
925
926                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
927                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
928
929                 common = local_adv & (remote_adv >> 2);
930                 if (common & ADVERTISE_1000FULL) {
931                         bp->line_speed = SPEED_1000;
932                         bp->duplex = DUPLEX_FULL;
933                 }
934                 else if (common & ADVERTISE_1000HALF) {
935                         bp->line_speed = SPEED_1000;
936                         bp->duplex = DUPLEX_HALF;
937                 }
938                 else {
939                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
940                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
941
942                         common = local_adv & remote_adv;
943                         if (common & ADVERTISE_100FULL) {
944                                 bp->line_speed = SPEED_100;
945                                 bp->duplex = DUPLEX_FULL;
946                         }
947                         else if (common & ADVERTISE_100HALF) {
948                                 bp->line_speed = SPEED_100;
949                                 bp->duplex = DUPLEX_HALF;
950                         }
951                         else if (common & ADVERTISE_10FULL) {
952                                 bp->line_speed = SPEED_10;
953                                 bp->duplex = DUPLEX_FULL;
954                         }
955                         else if (common & ADVERTISE_10HALF) {
956                                 bp->line_speed = SPEED_10;
957                                 bp->duplex = DUPLEX_HALF;
958                         }
959                         else {
960                                 bp->line_speed = 0;
961                                 bp->link_up = 0;
962                         }
963                 }
964         }
965         else {
966                 if (bmcr & BMCR_SPEED100) {
967                         bp->line_speed = SPEED_100;
968                 }
969                 else {
970                         bp->line_speed = SPEED_10;
971                 }
972                 if (bmcr & BMCR_FULLDPLX) {
973                         bp->duplex = DUPLEX_FULL;
974                 }
975                 else {
976                         bp->duplex = DUPLEX_HALF;
977                 }
978         }
979
980         return 0;
981 }
982
983 static int
984 bnx2_set_mac_link(struct bnx2 *bp)
985 {
986         u32 val;
987
988         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
989         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
990                 (bp->duplex == DUPLEX_HALF)) {
991                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
992         }
993
994         /* Configure the EMAC mode register. */
995         val = REG_RD(bp, BNX2_EMAC_MODE);
996
997         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
998                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
999                 BNX2_EMAC_MODE_25G_MODE);
1000
1001         if (bp->link_up) {
1002                 switch (bp->line_speed) {
1003                         case SPEED_10:
1004                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1005                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1006                                         break;
1007                                 }
1008                                 /* fall through */
1009                         case SPEED_100:
1010                                 val |= BNX2_EMAC_MODE_PORT_MII;
1011                                 break;
1012                         case SPEED_2500:
1013                                 val |= BNX2_EMAC_MODE_25G_MODE;
1014                                 /* fall through */
1015                         case SPEED_1000:
1016                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1017                                 break;
1018                 }
1019         }
1020         else {
1021                 val |= BNX2_EMAC_MODE_PORT_GMII;
1022         }
1023
1024         /* Set the MAC to operate in the appropriate duplex mode. */
1025         if (bp->duplex == DUPLEX_HALF)
1026                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1027         REG_WR(bp, BNX2_EMAC_MODE, val);
1028
1029         /* Enable/disable rx PAUSE. */
1030         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1031
1032         if (bp->flow_ctrl & FLOW_CTRL_RX)
1033                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1034         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1035
1036         /* Enable/disable tx PAUSE. */
1037         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1038         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1039
1040         if (bp->flow_ctrl & FLOW_CTRL_TX)
1041                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1042         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1043
1044         /* Acknowledge the interrupt. */
1045         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1046
1047         return 0;
1048 }
1049
1050 static void
1051 bnx2_enable_bmsr1(struct bnx2 *bp)
1052 {
1053         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1054             (CHIP_NUM(bp) == CHIP_NUM_5709))
1055                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1056                                MII_BNX2_BLK_ADDR_GP_STATUS);
1057 }
1058
1059 static void
1060 bnx2_disable_bmsr1(struct bnx2 *bp)
1061 {
1062         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1063             (CHIP_NUM(bp) == CHIP_NUM_5709))
1064                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1065                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1066 }
1067
1068 static int
1069 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1070 {
1071         u32 up1;
1072         int ret = 1;
1073
1074         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1075                 return 0;
1076
1077         if (bp->autoneg & AUTONEG_SPEED)
1078                 bp->advertising |= ADVERTISED_2500baseX_Full;
1079
1080         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1081                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1082
1083         bnx2_read_phy(bp, bp->mii_up1, &up1);
1084         if (!(up1 & BCM5708S_UP1_2G5)) {
1085                 up1 |= BCM5708S_UP1_2G5;
1086                 bnx2_write_phy(bp, bp->mii_up1, up1);
1087                 ret = 0;
1088         }
1089
1090         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1091                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1092                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1093
1094         return ret;
1095 }
1096
1097 static int
1098 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1099 {
1100         u32 up1;
1101         int ret = 0;
1102
1103         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1104                 return 0;
1105
1106         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1107                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1108
1109         bnx2_read_phy(bp, bp->mii_up1, &up1);
1110         if (up1 & BCM5708S_UP1_2G5) {
1111                 up1 &= ~BCM5708S_UP1_2G5;
1112                 bnx2_write_phy(bp, bp->mii_up1, up1);
1113                 ret = 1;
1114         }
1115
1116         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1117                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1118                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1119
1120         return ret;
1121 }
1122
1123 static void
1124 bnx2_enable_forced_2g5(struct bnx2 *bp)
1125 {
1126         u32 bmcr;
1127
1128         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1129                 return;
1130
1131         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1132                 u32 val;
1133
1134                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1135                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1136                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1137                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1138                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1139                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1140
1141                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1142                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1143                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1144
1145         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1146                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1147                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1148         }
1149
1150         if (bp->autoneg & AUTONEG_SPEED) {
1151                 bmcr &= ~BMCR_ANENABLE;
1152                 if (bp->req_duplex == DUPLEX_FULL)
1153                         bmcr |= BMCR_FULLDPLX;
1154         }
1155         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1156 }
1157
1158 static void
1159 bnx2_disable_forced_2g5(struct bnx2 *bp)
1160 {
1161         u32 bmcr;
1162
1163         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1164                 return;
1165
1166         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1167                 u32 val;
1168
1169                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1170                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1171                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1172                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1173                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1174
1175                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1176                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1177                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1178
1179         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1180                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1181                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1182         }
1183
1184         if (bp->autoneg & AUTONEG_SPEED)
1185                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1186         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1187 }
1188
1189 static void
1190 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1191 {
1192         u32 val;
1193
1194         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1195         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1196         if (start)
1197                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1198         else
1199                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1200 }
1201
1202 static int
1203 bnx2_set_link(struct bnx2 *bp)
1204 {
1205         u32 bmsr;
1206         u8 link_up;
1207
1208         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1209                 bp->link_up = 1;
1210                 return 0;
1211         }
1212
1213         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1214                 return 0;
1215
1216         link_up = bp->link_up;
1217
1218         bnx2_enable_bmsr1(bp);
1219         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1220         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1221         bnx2_disable_bmsr1(bp);
1222
1223         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1224             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1225                 u32 val;
1226
1227                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1228                         bnx2_5706s_force_link_dn(bp, 0);
1229                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1230                 }
1231                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1232                 if (val & BNX2_EMAC_STATUS_LINK)
1233                         bmsr |= BMSR_LSTATUS;
1234                 else
1235                         bmsr &= ~BMSR_LSTATUS;
1236         }
1237
1238         if (bmsr & BMSR_LSTATUS) {
1239                 bp->link_up = 1;
1240
1241                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1242                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1243                                 bnx2_5706s_linkup(bp);
1244                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1245                                 bnx2_5708s_linkup(bp);
1246                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1247                                 bnx2_5709s_linkup(bp);
1248                 }
1249                 else {
1250                         bnx2_copper_linkup(bp);
1251                 }
1252                 bnx2_resolve_flow_ctrl(bp);
1253         }
1254         else {
1255                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1256                     (bp->autoneg & AUTONEG_SPEED))
1257                         bnx2_disable_forced_2g5(bp);
1258
1259                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1260                         u32 bmcr;
1261
1262                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1263                         bmcr |= BMCR_ANENABLE;
1264                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1265
1266                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1267                 }
1268                 bp->link_up = 0;
1269         }
1270
1271         if (bp->link_up != link_up) {
1272                 bnx2_report_link(bp);
1273         }
1274
1275         bnx2_set_mac_link(bp);
1276
1277         return 0;
1278 }
1279
1280 static int
1281 bnx2_reset_phy(struct bnx2 *bp)
1282 {
1283         int i;
1284         u32 reg;
1285
1286         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1287
1288 #define PHY_RESET_MAX_WAIT 100
1289         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1290                 udelay(10);
1291
1292                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1293                 if (!(reg & BMCR_RESET)) {
1294                         udelay(20);
1295                         break;
1296                 }
1297         }
1298         if (i == PHY_RESET_MAX_WAIT) {
1299                 return -EBUSY;
1300         }
1301         return 0;
1302 }
1303
1304 static u32
1305 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1306 {
1307         u32 adv = 0;
1308
1309         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1310                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1311
1312                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1313                         adv = ADVERTISE_1000XPAUSE;
1314                 }
1315                 else {
1316                         adv = ADVERTISE_PAUSE_CAP;
1317                 }
1318         }
1319         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1320                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1321                         adv = ADVERTISE_1000XPSE_ASYM;
1322                 }
1323                 else {
1324                         adv = ADVERTISE_PAUSE_ASYM;
1325                 }
1326         }
1327         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1328                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1329                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1330                 }
1331                 else {
1332                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1333                 }
1334         }
1335         return adv;
1336 }
1337
1338 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1339
1340 static int
1341 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1342 {
1343         u32 speed_arg = 0, pause_adv;
1344
1345         pause_adv = bnx2_phy_get_pause_adv(bp);
1346
1347         if (bp->autoneg & AUTONEG_SPEED) {
1348                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1349                 if (bp->advertising & ADVERTISED_10baseT_Half)
1350                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1351                 if (bp->advertising & ADVERTISED_10baseT_Full)
1352                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1353                 if (bp->advertising & ADVERTISED_100baseT_Half)
1354                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1355                 if (bp->advertising & ADVERTISED_100baseT_Full)
1356                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1357                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1358                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1359                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1360                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1361         } else {
1362                 if (bp->req_line_speed == SPEED_2500)
1363                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1364                 else if (bp->req_line_speed == SPEED_1000)
1365                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1366                 else if (bp->req_line_speed == SPEED_100) {
1367                         if (bp->req_duplex == DUPLEX_FULL)
1368                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1369                         else
1370                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1371                 } else if (bp->req_line_speed == SPEED_10) {
1372                         if (bp->req_duplex == DUPLEX_FULL)
1373                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1374                         else
1375                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1376                 }
1377         }
1378
1379         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1380                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1381         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1382                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1383
1384         if (port == PORT_TP)
1385                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1386                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1387
1388         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1389
1390         spin_unlock_bh(&bp->phy_lock);
1391         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1392         spin_lock_bh(&bp->phy_lock);
1393
1394         return 0;
1395 }
1396
1397 static int
1398 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1399 {
1400         u32 adv, bmcr;
1401         u32 new_adv = 0;
1402
1403         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1404                 return (bnx2_setup_remote_phy(bp, port));
1405
1406         if (!(bp->autoneg & AUTONEG_SPEED)) {
1407                 u32 new_bmcr;
1408                 int force_link_down = 0;
1409
1410                 if (bp->req_line_speed == SPEED_2500) {
1411                         if (!bnx2_test_and_enable_2g5(bp))
1412                                 force_link_down = 1;
1413                 } else if (bp->req_line_speed == SPEED_1000) {
1414                         if (bnx2_test_and_disable_2g5(bp))
1415                                 force_link_down = 1;
1416                 }
1417                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1418                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1419
1420                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1421                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1422                 new_bmcr |= BMCR_SPEED1000;
1423
1424                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1425                         if (bp->req_line_speed == SPEED_2500)
1426                                 bnx2_enable_forced_2g5(bp);
1427                         else if (bp->req_line_speed == SPEED_1000) {
1428                                 bnx2_disable_forced_2g5(bp);
1429                                 new_bmcr &= ~0x2000;
1430                         }
1431
1432                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1433                         if (bp->req_line_speed == SPEED_2500)
1434                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1435                         else
1436                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1437                 }
1438
1439                 if (bp->req_duplex == DUPLEX_FULL) {
1440                         adv |= ADVERTISE_1000XFULL;
1441                         new_bmcr |= BMCR_FULLDPLX;
1442                 }
1443                 else {
1444                         adv |= ADVERTISE_1000XHALF;
1445                         new_bmcr &= ~BMCR_FULLDPLX;
1446                 }
1447                 if ((new_bmcr != bmcr) || (force_link_down)) {
1448                         /* Force a link down visible on the other side */
1449                         if (bp->link_up) {
1450                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1451                                                ~(ADVERTISE_1000XFULL |
1452                                                  ADVERTISE_1000XHALF));
1453                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1454                                         BMCR_ANRESTART | BMCR_ANENABLE);
1455
1456                                 bp->link_up = 0;
1457                                 netif_carrier_off(bp->dev);
1458                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1459                                 bnx2_report_link(bp);
1460                         }
1461                         bnx2_write_phy(bp, bp->mii_adv, adv);
1462                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1463                 } else {
1464                         bnx2_resolve_flow_ctrl(bp);
1465                         bnx2_set_mac_link(bp);
1466                 }
1467                 return 0;
1468         }
1469
1470         bnx2_test_and_enable_2g5(bp);
1471
1472         if (bp->advertising & ADVERTISED_1000baseT_Full)
1473                 new_adv |= ADVERTISE_1000XFULL;
1474
1475         new_adv |= bnx2_phy_get_pause_adv(bp);
1476
1477         bnx2_read_phy(bp, bp->mii_adv, &adv);
1478         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1479
1480         bp->serdes_an_pending = 0;
1481         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1482                 /* Force a link down visible on the other side */
1483                 if (bp->link_up) {
1484                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1485                         spin_unlock_bh(&bp->phy_lock);
1486                         msleep(20);
1487                         spin_lock_bh(&bp->phy_lock);
1488                 }
1489
1490                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1491                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1492                         BMCR_ANENABLE);
1493                 /* Speed up link-up time when the link partner
1494                  * does not autonegotiate which is very common
1495                  * in blade servers. Some blade servers use
1496                  * IPMI for kerboard input and it's important
1497                  * to minimize link disruptions. Autoneg. involves
1498                  * exchanging base pages plus 3 next pages and
1499                  * normally completes in about 120 msec.
1500                  */
1501                 bp->current_interval = SERDES_AN_TIMEOUT;
1502                 bp->serdes_an_pending = 1;
1503                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1504         } else {
1505                 bnx2_resolve_flow_ctrl(bp);
1506                 bnx2_set_mac_link(bp);
1507         }
1508
1509         return 0;
1510 }
1511
1512 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1513         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1514                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1515                 (ADVERTISED_1000baseT_Full)
1516
1517 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1518         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1519         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1520         ADVERTISED_1000baseT_Full)
1521
1522 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1523         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1524
1525 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1526
1527 static void
1528 bnx2_set_default_remote_link(struct bnx2 *bp)
1529 {
1530         u32 link;
1531
1532         if (bp->phy_port == PORT_TP)
1533                 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1534         else
1535                 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1536
1537         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1538                 bp->req_line_speed = 0;
1539                 bp->autoneg |= AUTONEG_SPEED;
1540                 bp->advertising = ADVERTISED_Autoneg;
1541                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1542                         bp->advertising |= ADVERTISED_10baseT_Half;
1543                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1544                         bp->advertising |= ADVERTISED_10baseT_Full;
1545                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1546                         bp->advertising |= ADVERTISED_100baseT_Half;
1547                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1548                         bp->advertising |= ADVERTISED_100baseT_Full;
1549                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1550                         bp->advertising |= ADVERTISED_1000baseT_Full;
1551                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1552                         bp->advertising |= ADVERTISED_2500baseX_Full;
1553         } else {
1554                 bp->autoneg = 0;
1555                 bp->advertising = 0;
1556                 bp->req_duplex = DUPLEX_FULL;
1557                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1558                         bp->req_line_speed = SPEED_10;
1559                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1560                                 bp->req_duplex = DUPLEX_HALF;
1561                 }
1562                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1563                         bp->req_line_speed = SPEED_100;
1564                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1565                                 bp->req_duplex = DUPLEX_HALF;
1566                 }
1567                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1568                         bp->req_line_speed = SPEED_1000;
1569                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1570                         bp->req_line_speed = SPEED_2500;
1571         }
1572 }
1573
1574 static void
1575 bnx2_set_default_link(struct bnx2 *bp)
1576 {
1577         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1578                 return bnx2_set_default_remote_link(bp);
1579
1580         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1581         bp->req_line_speed = 0;
1582         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1583                 u32 reg;
1584
1585                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1586
1587                 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1588                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1589                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1590                         bp->autoneg = 0;
1591                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1592                         bp->req_duplex = DUPLEX_FULL;
1593                 }
1594         } else
1595                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1596 }
1597
1598 static void
1599 bnx2_send_heart_beat(struct bnx2 *bp)
1600 {
1601         u32 msg;
1602         u32 addr;
1603
1604         spin_lock(&bp->indirect_lock);
1605         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1606         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1607         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1608         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1609         spin_unlock(&bp->indirect_lock);
1610 }
1611
1612 static void
1613 bnx2_remote_phy_event(struct bnx2 *bp)
1614 {
1615         u32 msg;
1616         u8 link_up = bp->link_up;
1617         u8 old_port;
1618
1619         msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1620
1621         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1622                 bnx2_send_heart_beat(bp);
1623
1624         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1625
1626         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1627                 bp->link_up = 0;
1628         else {
1629                 u32 speed;
1630
1631                 bp->link_up = 1;
1632                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1633                 bp->duplex = DUPLEX_FULL;
1634                 switch (speed) {
1635                         case BNX2_LINK_STATUS_10HALF:
1636                                 bp->duplex = DUPLEX_HALF;
1637                         case BNX2_LINK_STATUS_10FULL:
1638                                 bp->line_speed = SPEED_10;
1639                                 break;
1640                         case BNX2_LINK_STATUS_100HALF:
1641                                 bp->duplex = DUPLEX_HALF;
1642                         case BNX2_LINK_STATUS_100BASE_T4:
1643                         case BNX2_LINK_STATUS_100FULL:
1644                                 bp->line_speed = SPEED_100;
1645                                 break;
1646                         case BNX2_LINK_STATUS_1000HALF:
1647                                 bp->duplex = DUPLEX_HALF;
1648                         case BNX2_LINK_STATUS_1000FULL:
1649                                 bp->line_speed = SPEED_1000;
1650                                 break;
1651                         case BNX2_LINK_STATUS_2500HALF:
1652                                 bp->duplex = DUPLEX_HALF;
1653                         case BNX2_LINK_STATUS_2500FULL:
1654                                 bp->line_speed = SPEED_2500;
1655                                 break;
1656                         default:
1657                                 bp->line_speed = 0;
1658                                 break;
1659                 }
1660
1661                 spin_lock(&bp->phy_lock);
1662                 bp->flow_ctrl = 0;
1663                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1664                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1665                         if (bp->duplex == DUPLEX_FULL)
1666                                 bp->flow_ctrl = bp->req_flow_ctrl;
1667                 } else {
1668                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1669                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1670                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1671                                 bp->flow_ctrl |= FLOW_CTRL_RX;
1672                 }
1673
1674                 old_port = bp->phy_port;
1675                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1676                         bp->phy_port = PORT_FIBRE;
1677                 else
1678                         bp->phy_port = PORT_TP;
1679
1680                 if (old_port != bp->phy_port)
1681                         bnx2_set_default_link(bp);
1682
1683                 spin_unlock(&bp->phy_lock);
1684         }
1685         if (bp->link_up != link_up)
1686                 bnx2_report_link(bp);
1687
1688         bnx2_set_mac_link(bp);
1689 }
1690
1691 static int
1692 bnx2_set_remote_link(struct bnx2 *bp)
1693 {
1694         u32 evt_code;
1695
1696         evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1697         switch (evt_code) {
1698                 case BNX2_FW_EVT_CODE_LINK_EVENT:
1699                         bnx2_remote_phy_event(bp);
1700                         break;
1701                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1702                 default:
1703                         bnx2_send_heart_beat(bp);
1704                         break;
1705         }
1706         return 0;
1707 }
1708
1709 static int
1710 bnx2_setup_copper_phy(struct bnx2 *bp)
1711 {
1712         u32 bmcr;
1713         u32 new_bmcr;
1714
1715         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1716
1717         if (bp->autoneg & AUTONEG_SPEED) {
1718                 u32 adv_reg, adv1000_reg;
1719                 u32 new_adv_reg = 0;
1720                 u32 new_adv1000_reg = 0;
1721
1722                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1723                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1724                         ADVERTISE_PAUSE_ASYM);
1725
1726                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1727                 adv1000_reg &= PHY_ALL_1000_SPEED;
1728
1729                 if (bp->advertising & ADVERTISED_10baseT_Half)
1730                         new_adv_reg |= ADVERTISE_10HALF;
1731                 if (bp->advertising & ADVERTISED_10baseT_Full)
1732                         new_adv_reg |= ADVERTISE_10FULL;
1733                 if (bp->advertising & ADVERTISED_100baseT_Half)
1734                         new_adv_reg |= ADVERTISE_100HALF;
1735                 if (bp->advertising & ADVERTISED_100baseT_Full)
1736                         new_adv_reg |= ADVERTISE_100FULL;
1737                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1738                         new_adv1000_reg |= ADVERTISE_1000FULL;
1739
1740                 new_adv_reg |= ADVERTISE_CSMA;
1741
1742                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1743
1744                 if ((adv1000_reg != new_adv1000_reg) ||
1745                         (adv_reg != new_adv_reg) ||
1746                         ((bmcr & BMCR_ANENABLE) == 0)) {
1747
1748                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1749                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1750                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1751                                 BMCR_ANENABLE);
1752                 }
1753                 else if (bp->link_up) {
1754                         /* Flow ctrl may have changed from auto to forced */
1755                         /* or vice-versa. */
1756
1757                         bnx2_resolve_flow_ctrl(bp);
1758                         bnx2_set_mac_link(bp);
1759                 }
1760                 return 0;
1761         }
1762
1763         new_bmcr = 0;
1764         if (bp->req_line_speed == SPEED_100) {
1765                 new_bmcr |= BMCR_SPEED100;
1766         }
1767         if (bp->req_duplex == DUPLEX_FULL) {
1768                 new_bmcr |= BMCR_FULLDPLX;
1769         }
1770         if (new_bmcr != bmcr) {
1771                 u32 bmsr;
1772
1773                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1774                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1775
1776                 if (bmsr & BMSR_LSTATUS) {
1777                         /* Force link down */
1778                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1779                         spin_unlock_bh(&bp->phy_lock);
1780                         msleep(50);
1781                         spin_lock_bh(&bp->phy_lock);
1782
1783                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1784                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1785                 }
1786
1787                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1788
1789                 /* Normally, the new speed is setup after the link has
1790                  * gone down and up again. In some cases, link will not go
1791                  * down so we need to set up the new speed here.
1792                  */
1793                 if (bmsr & BMSR_LSTATUS) {
1794                         bp->line_speed = bp->req_line_speed;
1795                         bp->duplex = bp->req_duplex;
1796                         bnx2_resolve_flow_ctrl(bp);
1797                         bnx2_set_mac_link(bp);
1798                 }
1799         } else {
1800                 bnx2_resolve_flow_ctrl(bp);
1801                 bnx2_set_mac_link(bp);
1802         }
1803         return 0;
1804 }
1805
1806 static int
1807 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1808 {
1809         if (bp->loopback == MAC_LOOPBACK)
1810                 return 0;
1811
1812         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1813                 return (bnx2_setup_serdes_phy(bp, port));
1814         }
1815         else {
1816                 return (bnx2_setup_copper_phy(bp));
1817         }
1818 }
1819
1820 static int
1821 bnx2_init_5709s_phy(struct bnx2 *bp)
1822 {
1823         u32 val;
1824
1825         bp->mii_bmcr = MII_BMCR + 0x10;
1826         bp->mii_bmsr = MII_BMSR + 0x10;
1827         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1828         bp->mii_adv = MII_ADVERTISE + 0x10;
1829         bp->mii_lpa = MII_LPA + 0x10;
1830         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1831
1832         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1833         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1834
1835         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1836         bnx2_reset_phy(bp);
1837
1838         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1839
1840         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1841         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1842         val |= MII_BNX2_SD_1000XCTL1_FIBER;
1843         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1844
1845         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1846         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1847         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
1848                 val |= BCM5708S_UP1_2G5;
1849         else
1850                 val &= ~BCM5708S_UP1_2G5;
1851         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1852
1853         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1854         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1855         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1856         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1857
1858         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1859
1860         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1861               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1862         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1863
1864         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1865
1866         return 0;
1867 }
1868
1869 static int
1870 bnx2_init_5708s_phy(struct bnx2 *bp)
1871 {
1872         u32 val;
1873
1874         bnx2_reset_phy(bp);
1875
1876         bp->mii_up1 = BCM5708S_UP1;
1877
1878         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1879         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1880         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1881
1882         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1883         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1884         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1885
1886         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1887         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1888         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1889
1890         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
1891                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1892                 val |= BCM5708S_UP1_2G5;
1893                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1894         }
1895
1896         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1897             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1898             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1899                 /* increase tx signal amplitude */
1900                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1901                                BCM5708S_BLK_ADDR_TX_MISC);
1902                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1903                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1904                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1905                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1906         }
1907
1908         val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1909               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1910
1911         if (val) {
1912                 u32 is_backplane;
1913
1914                 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1915                                           BNX2_SHARED_HW_CFG_CONFIG);
1916                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1917                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1918                                        BCM5708S_BLK_ADDR_TX_MISC);
1919                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1920                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1921                                        BCM5708S_BLK_ADDR_DIG);
1922                 }
1923         }
1924         return 0;
1925 }
1926
1927 static int
1928 bnx2_init_5706s_phy(struct bnx2 *bp)
1929 {
1930         bnx2_reset_phy(bp);
1931
1932         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1933
1934         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1935                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1936
1937         if (bp->dev->mtu > 1500) {
1938                 u32 val;
1939
1940                 /* Set extended packet length bit */
1941                 bnx2_write_phy(bp, 0x18, 0x7);
1942                 bnx2_read_phy(bp, 0x18, &val);
1943                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1944
1945                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1946                 bnx2_read_phy(bp, 0x1c, &val);
1947                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1948         }
1949         else {
1950                 u32 val;
1951
1952                 bnx2_write_phy(bp, 0x18, 0x7);
1953                 bnx2_read_phy(bp, 0x18, &val);
1954                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1955
1956                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1957                 bnx2_read_phy(bp, 0x1c, &val);
1958                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1959         }
1960
1961         return 0;
1962 }
1963
1964 static int
1965 bnx2_init_copper_phy(struct bnx2 *bp)
1966 {
1967         u32 val;
1968
1969         bnx2_reset_phy(bp);
1970
1971         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
1972                 bnx2_write_phy(bp, 0x18, 0x0c00);
1973                 bnx2_write_phy(bp, 0x17, 0x000a);
1974                 bnx2_write_phy(bp, 0x15, 0x310b);
1975                 bnx2_write_phy(bp, 0x17, 0x201f);
1976                 bnx2_write_phy(bp, 0x15, 0x9506);
1977                 bnx2_write_phy(bp, 0x17, 0x401f);
1978                 bnx2_write_phy(bp, 0x15, 0x14e2);
1979                 bnx2_write_phy(bp, 0x18, 0x0400);
1980         }
1981
1982         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
1983                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1984                                MII_BNX2_DSP_EXPAND_REG | 0x8);
1985                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1986                 val &= ~(1 << 8);
1987                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1988         }
1989
1990         if (bp->dev->mtu > 1500) {
1991                 /* Set extended packet length bit */
1992                 bnx2_write_phy(bp, 0x18, 0x7);
1993                 bnx2_read_phy(bp, 0x18, &val);
1994                 bnx2_write_phy(bp, 0x18, val | 0x4000);
1995
1996                 bnx2_read_phy(bp, 0x10, &val);
1997                 bnx2_write_phy(bp, 0x10, val | 0x1);
1998         }
1999         else {
2000                 bnx2_write_phy(bp, 0x18, 0x7);
2001                 bnx2_read_phy(bp, 0x18, &val);
2002                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2003
2004                 bnx2_read_phy(bp, 0x10, &val);
2005                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2006         }
2007
2008         /* ethernet@wirespeed */
2009         bnx2_write_phy(bp, 0x18, 0x7007);
2010         bnx2_read_phy(bp, 0x18, &val);
2011         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2012         return 0;
2013 }
2014
2015
2016 static int
2017 bnx2_init_phy(struct bnx2 *bp)
2018 {
2019         u32 val;
2020         int rc = 0;
2021
2022         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2023         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2024
2025         bp->mii_bmcr = MII_BMCR;
2026         bp->mii_bmsr = MII_BMSR;
2027         bp->mii_bmsr1 = MII_BMSR;
2028         bp->mii_adv = MII_ADVERTISE;
2029         bp->mii_lpa = MII_LPA;
2030
2031         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2032
2033         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2034                 goto setup_phy;
2035
2036         bnx2_read_phy(bp, MII_PHYSID1, &val);
2037         bp->phy_id = val << 16;
2038         bnx2_read_phy(bp, MII_PHYSID2, &val);
2039         bp->phy_id |= val & 0xffff;
2040
2041         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2042                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2043                         rc = bnx2_init_5706s_phy(bp);
2044                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2045                         rc = bnx2_init_5708s_phy(bp);
2046                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2047                         rc = bnx2_init_5709s_phy(bp);
2048         }
2049         else {
2050                 rc = bnx2_init_copper_phy(bp);
2051         }
2052
2053 setup_phy:
2054         if (!rc)
2055                 rc = bnx2_setup_phy(bp, bp->phy_port);
2056
2057         return rc;
2058 }
2059
2060 static int
2061 bnx2_set_mac_loopback(struct bnx2 *bp)
2062 {
2063         u32 mac_mode;
2064
2065         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2066         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2067         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2068         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2069         bp->link_up = 1;
2070         return 0;
2071 }
2072
2073 static int bnx2_test_link(struct bnx2 *);
2074
2075 static int
2076 bnx2_set_phy_loopback(struct bnx2 *bp)
2077 {
2078         u32 mac_mode;
2079         int rc, i;
2080
2081         spin_lock_bh(&bp->phy_lock);
2082         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2083                             BMCR_SPEED1000);
2084         spin_unlock_bh(&bp->phy_lock);
2085         if (rc)
2086                 return rc;
2087
2088         for (i = 0; i < 10; i++) {
2089                 if (bnx2_test_link(bp) == 0)
2090                         break;
2091                 msleep(100);
2092         }
2093
2094         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2095         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2096                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2097                       BNX2_EMAC_MODE_25G_MODE);
2098
2099         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2100         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2101         bp->link_up = 1;
2102         return 0;
2103 }
2104
2105 static int
2106 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2107 {
2108         int i;
2109         u32 val;
2110
2111         bp->fw_wr_seq++;
2112         msg_data |= bp->fw_wr_seq;
2113
2114         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2115
2116         /* wait for an acknowledgement. */
2117         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2118                 msleep(10);
2119
2120                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
2121
2122                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2123                         break;
2124         }
2125         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2126                 return 0;
2127
2128         /* If we timed out, inform the firmware that this is the case. */
2129         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2130                 if (!silent)
2131                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2132                                             "%x\n", msg_data);
2133
2134                 msg_data &= ~BNX2_DRV_MSG_CODE;
2135                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2136
2137                 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2138
2139                 return -EBUSY;
2140         }
2141
2142         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2143                 return -EIO;
2144
2145         return 0;
2146 }
2147
2148 static int
2149 bnx2_init_5709_context(struct bnx2 *bp)
2150 {
2151         int i, ret = 0;
2152         u32 val;
2153
2154         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2155         val |= (BCM_PAGE_BITS - 8) << 16;
2156         REG_WR(bp, BNX2_CTX_COMMAND, val);
2157         for (i = 0; i < 10; i++) {
2158                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2159                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2160                         break;
2161                 udelay(2);
2162         }
2163         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2164                 return -EBUSY;
2165
2166         for (i = 0; i < bp->ctx_pages; i++) {
2167                 int j;
2168
2169                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2170                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2171                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2172                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2173                        (u64) bp->ctx_blk_mapping[i] >> 32);
2174                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2175                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2176                 for (j = 0; j < 10; j++) {
2177
2178                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2179                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2180                                 break;
2181                         udelay(5);
2182                 }
2183                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2184                         ret = -EBUSY;
2185                         break;
2186                 }
2187         }
2188         return ret;
2189 }
2190
2191 static void
2192 bnx2_init_context(struct bnx2 *bp)
2193 {
2194         u32 vcid;
2195
2196         vcid = 96;
2197         while (vcid) {
2198                 u32 vcid_addr, pcid_addr, offset;
2199                 int i;
2200
2201                 vcid--;
2202
2203                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2204                         u32 new_vcid;
2205
2206                         vcid_addr = GET_PCID_ADDR(vcid);
2207                         if (vcid & 0x8) {
2208                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2209                         }
2210                         else {
2211                                 new_vcid = vcid;
2212                         }
2213                         pcid_addr = GET_PCID_ADDR(new_vcid);
2214                 }
2215                 else {
2216                         vcid_addr = GET_CID_ADDR(vcid);
2217                         pcid_addr = vcid_addr;
2218                 }
2219
2220                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2221                         vcid_addr += (i << PHY_CTX_SHIFT);
2222                         pcid_addr += (i << PHY_CTX_SHIFT);
2223
2224                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2225                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2226
2227                         /* Zero out the context. */
2228                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2229                                 CTX_WR(bp, vcid_addr, offset, 0);
2230                 }
2231         }
2232 }
2233
2234 static int
2235 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2236 {
2237         u16 *good_mbuf;
2238         u32 good_mbuf_cnt;
2239         u32 val;
2240
2241         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2242         if (good_mbuf == NULL) {
2243                 printk(KERN_ERR PFX "Failed to allocate memory in "
2244                                     "bnx2_alloc_bad_rbuf\n");
2245                 return -ENOMEM;
2246         }
2247
2248         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2249                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2250
2251         good_mbuf_cnt = 0;
2252
2253         /* Allocate a bunch of mbufs and save the good ones in an array. */
2254         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2255         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2256                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2257
2258                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2259
2260                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2261
2262                 /* The addresses with Bit 9 set are bad memory blocks. */
2263                 if (!(val & (1 << 9))) {
2264                         good_mbuf[good_mbuf_cnt] = (u16) val;
2265                         good_mbuf_cnt++;
2266                 }
2267
2268                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2269         }
2270
2271         /* Free the good ones back to the mbuf pool thus discarding
2272          * all the bad ones. */
2273         while (good_mbuf_cnt) {
2274                 good_mbuf_cnt--;
2275
2276                 val = good_mbuf[good_mbuf_cnt];
2277                 val = (val << 9) | val | 1;
2278
2279                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2280         }
2281         kfree(good_mbuf);
2282         return 0;
2283 }
2284
2285 static void
2286 bnx2_set_mac_addr(struct bnx2 *bp)
2287 {
2288         u32 val;
2289         u8 *mac_addr = bp->dev->dev_addr;
2290
2291         val = (mac_addr[0] << 8) | mac_addr[1];
2292
2293         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2294
2295         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2296                 (mac_addr[4] << 8) | mac_addr[5];
2297
2298         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2299 }
2300
2301 static inline int
2302 bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
2303 {
2304         dma_addr_t mapping;
2305         struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2306         struct rx_bd *rxbd =
2307                 &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2308         struct page *page = alloc_page(GFP_ATOMIC);
2309
2310         if (!page)
2311                 return -ENOMEM;
2312         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2313                                PCI_DMA_FROMDEVICE);
2314         rx_pg->page = page;
2315         pci_unmap_addr_set(rx_pg, mapping, mapping);
2316         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2317         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2318         return 0;
2319 }
2320
2321 static void
2322 bnx2_free_rx_page(struct bnx2 *bp, u16 index)
2323 {
2324         struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2325         struct page *page = rx_pg->page;
2326
2327         if (!page)
2328                 return;
2329
2330         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2331                        PCI_DMA_FROMDEVICE);
2332
2333         __free_page(page);
2334         rx_pg->page = NULL;
2335 }
2336
2337 static inline int
2338 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, u16 index)
2339 {
2340         struct sk_buff *skb;
2341         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2342         dma_addr_t mapping;
2343         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2344         unsigned long align;
2345
2346         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2347         if (skb == NULL) {
2348                 return -ENOMEM;
2349         }
2350
2351         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2352                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2353
2354         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2355                 PCI_DMA_FROMDEVICE);
2356
2357         rx_buf->skb = skb;
2358         pci_unmap_addr_set(rx_buf, mapping, mapping);
2359
2360         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2361         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2362
2363         bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2364
2365         return 0;
2366 }
2367
2368 static int
2369 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2370 {
2371         struct status_block *sblk = bnapi->status_blk;
2372         u32 new_link_state, old_link_state;
2373         int is_set = 1;
2374
2375         new_link_state = sblk->status_attn_bits & event;
2376         old_link_state = sblk->status_attn_bits_ack & event;
2377         if (new_link_state != old_link_state) {
2378                 if (new_link_state)
2379                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2380                 else
2381                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2382         } else
2383                 is_set = 0;
2384
2385         return is_set;
2386 }
2387
2388 static void
2389 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2390 {
2391         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE)) {
2392                 spin_lock(&bp->phy_lock);
2393                 bnx2_set_link(bp);
2394                 spin_unlock(&bp->phy_lock);
2395         }
2396         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2397                 bnx2_set_remote_link(bp);
2398
2399 }
2400
2401 static inline u16
2402 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2403 {
2404         u16 cons;
2405
2406         if (bnapi->int_num == 0)
2407                 cons = bnapi->status_blk->status_tx_quick_consumer_index0;
2408         else
2409                 cons = bnapi->status_blk_msix->status_tx_quick_consumer_index;
2410
2411         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2412                 cons++;
2413         return cons;
2414 }
2415
2416 static int
2417 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2418 {
2419         u16 hw_cons, sw_cons, sw_ring_cons;
2420         int tx_pkt = 0;
2421
2422         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2423         sw_cons = bnapi->tx_cons;
2424
2425         while (sw_cons != hw_cons) {
2426                 struct sw_bd *tx_buf;
2427                 struct sk_buff *skb;
2428                 int i, last;
2429
2430                 sw_ring_cons = TX_RING_IDX(sw_cons);
2431
2432                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2433                 skb = tx_buf->skb;
2434
2435                 /* partial BD completions possible with TSO packets */
2436                 if (skb_is_gso(skb)) {
2437                         u16 last_idx, last_ring_idx;
2438
2439                         last_idx = sw_cons +
2440                                 skb_shinfo(skb)->nr_frags + 1;
2441                         last_ring_idx = sw_ring_cons +
2442                                 skb_shinfo(skb)->nr_frags + 1;
2443                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2444                                 last_idx++;
2445                         }
2446                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2447                                 break;
2448                         }
2449                 }
2450
2451                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2452                         skb_headlen(skb), PCI_DMA_TODEVICE);
2453
2454                 tx_buf->skb = NULL;
2455                 last = skb_shinfo(skb)->nr_frags;
2456
2457                 for (i = 0; i < last; i++) {
2458                         sw_cons = NEXT_TX_BD(sw_cons);
2459
2460                         pci_unmap_page(bp->pdev,
2461                                 pci_unmap_addr(
2462                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2463                                         mapping),
2464                                 skb_shinfo(skb)->frags[i].size,
2465                                 PCI_DMA_TODEVICE);
2466                 }
2467
2468                 sw_cons = NEXT_TX_BD(sw_cons);
2469
2470                 dev_kfree_skb(skb);
2471                 tx_pkt++;
2472                 if (tx_pkt == budget)
2473                         break;
2474
2475                 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2476         }
2477
2478         bnapi->hw_tx_cons = hw_cons;
2479         bnapi->tx_cons = sw_cons;
2480         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2481          * before checking for netif_queue_stopped().  Without the
2482          * memory barrier, there is a small possibility that bnx2_start_xmit()
2483          * will miss it and cause the queue to be stopped forever.
2484          */
2485         smp_mb();
2486
2487         if (unlikely(netif_queue_stopped(bp->dev)) &&
2488                      (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)) {
2489                 netif_tx_lock(bp->dev);
2490                 if ((netif_queue_stopped(bp->dev)) &&
2491                     (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh))
2492                         netif_wake_queue(bp->dev);
2493                 netif_tx_unlock(bp->dev);
2494         }
2495         return tx_pkt;
2496 }
2497
2498 static void
2499 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_napi *bnapi,
2500                         struct sk_buff *skb, int count)
2501 {
2502         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2503         struct rx_bd *cons_bd, *prod_bd;
2504         dma_addr_t mapping;
2505         int i;
2506         u16 hw_prod = bnapi->rx_pg_prod, prod;
2507         u16 cons = bnapi->rx_pg_cons;
2508
2509         for (i = 0; i < count; i++) {
2510                 prod = RX_PG_RING_IDX(hw_prod);
2511
2512                 prod_rx_pg = &bp->rx_pg_ring[prod];
2513                 cons_rx_pg = &bp->rx_pg_ring[cons];
2514                 cons_bd = &bp->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2515                 prod_bd = &bp->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2516
2517                 if (i == 0 && skb) {
2518                         struct page *page;
2519                         struct skb_shared_info *shinfo;
2520
2521                         shinfo = skb_shinfo(skb);
2522                         shinfo->nr_frags--;
2523                         page = shinfo->frags[shinfo->nr_frags].page;
2524                         shinfo->frags[shinfo->nr_frags].page = NULL;
2525                         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2526                                                PCI_DMA_FROMDEVICE);
2527                         cons_rx_pg->page = page;
2528                         pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2529                         dev_kfree_skb(skb);
2530                 }
2531                 if (prod != cons) {
2532                         prod_rx_pg->page = cons_rx_pg->page;
2533                         cons_rx_pg->page = NULL;
2534                         pci_unmap_addr_set(prod_rx_pg, mapping,
2535                                 pci_unmap_addr(cons_rx_pg, mapping));
2536
2537                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2538                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2539
2540                 }
2541                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2542                 hw_prod = NEXT_RX_BD(hw_prod);
2543         }
2544         bnapi->rx_pg_prod = hw_prod;
2545         bnapi->rx_pg_cons = cons;
2546 }
2547
2548 static inline void
2549 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2550         u16 cons, u16 prod)
2551 {
2552         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2553         struct rx_bd *cons_bd, *prod_bd;
2554
2555         cons_rx_buf = &bp->rx_buf_ring[cons];
2556         prod_rx_buf = &bp->rx_buf_ring[prod];
2557
2558         pci_dma_sync_single_for_device(bp->pdev,
2559                 pci_unmap_addr(cons_rx_buf, mapping),
2560                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2561
2562         bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2563
2564         prod_rx_buf->skb = skb;
2565
2566         if (cons == prod)
2567                 return;
2568
2569         pci_unmap_addr_set(prod_rx_buf, mapping,
2570                         pci_unmap_addr(cons_rx_buf, mapping));
2571
2572         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2573         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2574         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2575         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2576 }
2577
2578 static int
2579 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2580             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2581             u32 ring_idx)
2582 {
2583         int err;
2584         u16 prod = ring_idx & 0xffff;
2585
2586         err = bnx2_alloc_rx_skb(bp, bnapi, prod);
2587         if (unlikely(err)) {
2588                 bnx2_reuse_rx_skb(bp, bnapi, skb, (u16) (ring_idx >> 16), prod);
2589                 if (hdr_len) {
2590                         unsigned int raw_len = len + 4;
2591                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2592
2593                         bnx2_reuse_rx_skb_pages(bp, bnapi, NULL, pages);
2594                 }
2595                 return err;
2596         }
2597
2598         skb_reserve(skb, bp->rx_offset);
2599         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2600                          PCI_DMA_FROMDEVICE);
2601
2602         if (hdr_len == 0) {
2603                 skb_put(skb, len);
2604                 return 0;
2605         } else {
2606                 unsigned int i, frag_len, frag_size, pages;
2607                 struct sw_pg *rx_pg;
2608                 u16 pg_cons = bnapi->rx_pg_cons;
2609                 u16 pg_prod = bnapi->rx_pg_prod;
2610
2611                 frag_size = len + 4 - hdr_len;
2612                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2613                 skb_put(skb, hdr_len);
2614
2615                 for (i = 0; i < pages; i++) {
2616                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2617                         if (unlikely(frag_len <= 4)) {
2618                                 unsigned int tail = 4 - frag_len;
2619
2620                                 bnapi->rx_pg_cons = pg_cons;
2621                                 bnapi->rx_pg_prod = pg_prod;
2622                                 bnx2_reuse_rx_skb_pages(bp, bnapi, NULL,
2623                                                         pages - i);
2624                                 skb->len -= tail;
2625                                 if (i == 0) {
2626                                         skb->tail -= tail;
2627                                 } else {
2628                                         skb_frag_t *frag =
2629                                                 &skb_shinfo(skb)->frags[i - 1];
2630                                         frag->size -= tail;
2631                                         skb->data_len -= tail;
2632                                         skb->truesize -= tail;
2633                                 }
2634                                 return 0;
2635                         }
2636                         rx_pg = &bp->rx_pg_ring[pg_cons];
2637
2638                         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2639                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2640
2641                         if (i == pages - 1)
2642                                 frag_len -= 4;
2643
2644                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2645                         rx_pg->page = NULL;
2646
2647                         err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod));
2648                         if (unlikely(err)) {
2649                                 bnapi->rx_pg_cons = pg_cons;
2650                                 bnapi->rx_pg_prod = pg_prod;
2651                                 bnx2_reuse_rx_skb_pages(bp, bnapi, skb,
2652                                                         pages - i);
2653                                 return err;
2654                         }
2655
2656                         frag_size -= frag_len;
2657                         skb->data_len += frag_len;
2658                         skb->truesize += frag_len;
2659                         skb->len += frag_len;
2660
2661                         pg_prod = NEXT_RX_BD(pg_prod);
2662                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2663                 }
2664                 bnapi->rx_pg_prod = pg_prod;
2665                 bnapi->rx_pg_cons = pg_cons;
2666         }
2667         return 0;
2668 }
2669
2670 static inline u16
2671 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2672 {
2673         u16 cons = bnapi->status_blk->status_rx_quick_consumer_index0;
2674
2675         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2676                 cons++;
2677         return cons;
2678 }
2679
2680 static int
2681 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2682 {
2683         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2684         struct l2_fhdr *rx_hdr;
2685         int rx_pkt = 0, pg_ring_used = 0;
2686
2687         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2688         sw_cons = bnapi->rx_cons;
2689         sw_prod = bnapi->rx_prod;
2690
2691         /* Memory barrier necessary as speculative reads of the rx
2692          * buffer can be ahead of the index in the status block
2693          */
2694         rmb();
2695         while (sw_cons != hw_cons) {
2696                 unsigned int len, hdr_len;
2697                 u32 status;
2698                 struct sw_bd *rx_buf;
2699                 struct sk_buff *skb;
2700                 dma_addr_t dma_addr;
2701
2702                 sw_ring_cons = RX_RING_IDX(sw_cons);
2703                 sw_ring_prod = RX_RING_IDX(sw_prod);
2704
2705                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2706                 skb = rx_buf->skb;
2707
2708                 rx_buf->skb = NULL;
2709
2710                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2711
2712                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2713                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2714
2715                 rx_hdr = (struct l2_fhdr *) skb->data;
2716                 len = rx_hdr->l2_fhdr_pkt_len;
2717
2718                 if ((status = rx_hdr->l2_fhdr_status) &
2719                         (L2_FHDR_ERRORS_BAD_CRC |
2720                         L2_FHDR_ERRORS_PHY_DECODE |
2721                         L2_FHDR_ERRORS_ALIGNMENT |
2722                         L2_FHDR_ERRORS_TOO_SHORT |
2723                         L2_FHDR_ERRORS_GIANT_FRAME)) {
2724
2725                         bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2726                                           sw_ring_prod);
2727                         goto next_rx;
2728                 }
2729                 hdr_len = 0;
2730                 if (status & L2_FHDR_STATUS_SPLIT) {
2731                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2732                         pg_ring_used = 1;
2733                 } else if (len > bp->rx_jumbo_thresh) {
2734                         hdr_len = bp->rx_jumbo_thresh;
2735                         pg_ring_used = 1;
2736                 }
2737
2738                 len -= 4;
2739
2740                 if (len <= bp->rx_copy_thresh) {
2741                         struct sk_buff *new_skb;
2742
2743                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
2744                         if (new_skb == NULL) {
2745                                 bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2746                                                   sw_ring_prod);
2747                                 goto next_rx;
2748                         }
2749
2750                         /* aligned copy */
2751                         skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2752                                       new_skb->data, len + 2);
2753                         skb_reserve(new_skb, 2);
2754                         skb_put(new_skb, len);
2755
2756                         bnx2_reuse_rx_skb(bp, bnapi, skb,
2757                                 sw_ring_cons, sw_ring_prod);
2758
2759                         skb = new_skb;
2760                 } else if (unlikely(bnx2_rx_skb(bp, bnapi, skb, len, hdr_len,
2761                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2762                         goto next_rx;
2763
2764                 skb->protocol = eth_type_trans(skb, bp->dev);
2765
2766                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2767                         (ntohs(skb->protocol) != 0x8100)) {
2768
2769                         dev_kfree_skb(skb);
2770                         goto next_rx;
2771
2772                 }
2773
2774                 skb->ip_summed = CHECKSUM_NONE;
2775                 if (bp->rx_csum &&
2776                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2777                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
2778
2779                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2780                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2781                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2782                 }
2783
2784 #ifdef BCM_VLAN
2785                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && bp->vlgrp) {
2786                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2787                                 rx_hdr->l2_fhdr_vlan_tag);
2788                 }
2789                 else
2790 #endif
2791                         netif_receive_skb(skb);
2792
2793                 bp->dev->last_rx = jiffies;
2794                 rx_pkt++;
2795
2796 next_rx:
2797                 sw_cons = NEXT_RX_BD(sw_cons);
2798                 sw_prod = NEXT_RX_BD(sw_prod);
2799
2800                 if ((rx_pkt == budget))
2801                         break;
2802
2803                 /* Refresh hw_cons to see if there is new work */
2804                 if (sw_cons == hw_cons) {
2805                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2806                         rmb();
2807                 }
2808         }
2809         bnapi->rx_cons = sw_cons;
2810         bnapi->rx_prod = sw_prod;
2811
2812         if (pg_ring_used)
2813                 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
2814                          bnapi->rx_pg_prod);
2815
2816         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2817
2818         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
2819
2820         mmiowb();
2821
2822         return rx_pkt;
2823
2824 }
2825
2826 /* MSI ISR - The only difference between this and the INTx ISR
2827  * is that the MSI interrupt is always serviced.
2828  */
2829 static irqreturn_t
2830 bnx2_msi(int irq, void *dev_instance)
2831 {
2832         struct net_device *dev = dev_instance;
2833         struct bnx2 *bp = netdev_priv(dev);
2834         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2835
2836         prefetch(bnapi->status_blk);
2837         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2838                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2839                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2840
2841         /* Return here if interrupt is disabled. */
2842         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2843                 return IRQ_HANDLED;
2844
2845         netif_rx_schedule(dev, &bnapi->napi);
2846
2847         return IRQ_HANDLED;
2848 }
2849
2850 static irqreturn_t
2851 bnx2_msi_1shot(int irq, void *dev_instance)
2852 {
2853         struct net_device *dev = dev_instance;
2854         struct bnx2 *bp = netdev_priv(dev);
2855         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2856
2857         prefetch(bnapi->status_blk);
2858
2859         /* Return here if interrupt is disabled. */
2860         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2861                 return IRQ_HANDLED;
2862
2863         netif_rx_schedule(dev, &bnapi->napi);
2864
2865         return IRQ_HANDLED;
2866 }
2867
2868 static irqreturn_t
2869 bnx2_interrupt(int irq, void *dev_instance)
2870 {
2871         struct net_device *dev = dev_instance;
2872         struct bnx2 *bp = netdev_priv(dev);
2873         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2874         struct status_block *sblk = bnapi->status_blk;
2875
2876         /* When using INTx, it is possible for the interrupt to arrive
2877          * at the CPU before the status block posted prior to the
2878          * interrupt. Reading a register will flush the status block.
2879          * When using MSI, the MSI message will always complete after
2880          * the status block write.
2881          */
2882         if ((sblk->status_idx == bnapi->last_status_idx) &&
2883             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2884              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2885                 return IRQ_NONE;
2886
2887         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2888                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2889                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2890
2891         /* Read back to deassert IRQ immediately to avoid too many
2892          * spurious interrupts.
2893          */
2894         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2895
2896         /* Return here if interrupt is shared and is disabled. */
2897         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2898                 return IRQ_HANDLED;
2899
2900         if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
2901                 bnapi->last_status_idx = sblk->status_idx;
2902                 __netif_rx_schedule(dev, &bnapi->napi);
2903         }
2904
2905         return IRQ_HANDLED;
2906 }
2907
2908 static irqreturn_t
2909 bnx2_tx_msix(int irq, void *dev_instance)
2910 {
2911         struct net_device *dev = dev_instance;
2912         struct bnx2 *bp = netdev_priv(dev);
2913         struct bnx2_napi *bnapi = &bp->bnx2_napi[BNX2_TX_VEC];
2914
2915         prefetch(bnapi->status_blk_msix);
2916
2917         /* Return here if interrupt is disabled. */
2918         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2919                 return IRQ_HANDLED;
2920
2921         netif_rx_schedule(dev, &bnapi->napi);
2922         return IRQ_HANDLED;
2923 }
2924
2925 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
2926                                  STATUS_ATTN_BITS_TIMER_ABORT)
2927
2928 static inline int
2929 bnx2_has_work(struct bnx2_napi *bnapi)
2930 {
2931         struct status_block *sblk = bnapi->status_blk;
2932
2933         if ((bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons) ||
2934             (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons))
2935                 return 1;
2936
2937         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2938             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2939                 return 1;
2940
2941         return 0;
2942 }
2943
2944 static int bnx2_tx_poll(struct napi_struct *napi, int budget)
2945 {
2946         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
2947         struct bnx2 *bp = bnapi->bp;
2948         int work_done = 0;
2949         struct status_block_msix *sblk = bnapi->status_blk_msix;
2950
2951         do {
2952                 work_done += bnx2_tx_int(bp, bnapi, budget - work_done);
2953                 if (unlikely(work_done >= budget))
2954                         return work_done;
2955
2956                 bnapi->last_status_idx = sblk->status_idx;
2957                 rmb();
2958         } while (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons);
2959
2960         netif_rx_complete(bp->dev, napi);
2961         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
2962                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2963                bnapi->last_status_idx);
2964         return work_done;
2965 }
2966
2967 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
2968                           int work_done, int budget)
2969 {
2970         struct status_block *sblk = bnapi->status_blk;
2971         u32 status_attn_bits = sblk->status_attn_bits;
2972         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2973
2974         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2975             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2976
2977                 bnx2_phy_int(bp, bnapi);
2978
2979                 /* This is needed to take care of transient status
2980                  * during link changes.
2981                  */
2982                 REG_WR(bp, BNX2_HC_COMMAND,
2983                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2984                 REG_RD(bp, BNX2_HC_COMMAND);
2985         }
2986
2987         if (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons)
2988                 bnx2_tx_int(bp, bnapi, 0);
2989
2990         if (bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons)
2991                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
2992
2993         return work_done;
2994 }
2995
2996 static int bnx2_poll(struct napi_struct *napi, int budget)
2997 {
2998         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
2999         struct bnx2 *bp = bnapi->bp;
3000         int work_done = 0;
3001         struct status_block *sblk = bnapi->status_blk;
3002
3003         while (1) {
3004                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3005
3006                 if (unlikely(work_done >= budget))
3007                         break;
3008
3009                 /* bnapi->last_status_idx is used below to tell the hw how
3010                  * much work has been processed, so we must read it before
3011                  * checking for more work.
3012                  */
3013                 bnapi->last_status_idx = sblk->status_idx;
3014                 rmb();
3015                 if (likely(!bnx2_has_work(bnapi))) {
3016                         netif_rx_complete(bp->dev, napi);
3017                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3018                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3019                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3020                                        bnapi->last_status_idx);
3021                                 break;
3022                         }
3023                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3024                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3025                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3026                                bnapi->last_status_idx);
3027
3028                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3029                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3030                                bnapi->last_status_idx);
3031                         break;
3032                 }
3033         }
3034
3035         return work_done;
3036 }
3037
3038 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3039  * from set_multicast.
3040  */
3041 static void
3042 bnx2_set_rx_mode(struct net_device *dev)
3043 {
3044         struct bnx2 *bp = netdev_priv(dev);
3045         u32 rx_mode, sort_mode;
3046         int i;
3047
3048         spin_lock_bh(&bp->phy_lock);
3049
3050         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3051                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3052         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3053 #ifdef BCM_VLAN
3054         if (!bp->vlgrp && !(bp->flags & BNX2_FLAG_ASF_ENABLE))
3055                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3056 #else
3057         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
3058                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3059 #endif
3060         if (dev->flags & IFF_PROMISC) {
3061                 /* Promiscuous mode. */
3062                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3063                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3064                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3065         }
3066         else if (dev->flags & IFF_ALLMULTI) {
3067                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3068                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3069                                0xffffffff);
3070                 }
3071                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3072         }
3073         else {
3074                 /* Accept one or more multicast(s). */
3075                 struct dev_mc_list *mclist;
3076                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3077                 u32 regidx;
3078                 u32 bit;
3079                 u32 crc;
3080
3081                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3082
3083                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3084                      i++, mclist = mclist->next) {
3085
3086                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3087                         bit = crc & 0xff;
3088                         regidx = (bit & 0xe0) >> 5;
3089                         bit &= 0x1f;
3090                         mc_filter[regidx] |= (1 << bit);
3091                 }
3092
3093                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3094                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3095                                mc_filter[i]);
3096                 }
3097
3098                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3099         }
3100
3101         if (rx_mode != bp->rx_mode) {
3102                 bp->rx_mode = rx_mode;
3103                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3104         }
3105
3106         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3107         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3108         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3109
3110         spin_unlock_bh(&bp->phy_lock);
3111 }
3112
3113 static void
3114 load_rv2p_fw(struct bnx2 *bp, __le32 *rv2p_code, u32 rv2p_code_len,
3115         u32 rv2p_proc)
3116 {
3117         int i;
3118         u32 val;
3119
3120
3121         for (i = 0; i < rv2p_code_len; i += 8) {
3122                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, le32_to_cpu(*rv2p_code));
3123                 rv2p_code++;
3124                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, le32_to_cpu(*rv2p_code));
3125                 rv2p_code++;
3126
3127                 if (rv2p_proc == RV2P_PROC1) {
3128                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3129                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3130                 }
3131                 else {
3132                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3133                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3134                 }
3135         }
3136
3137         /* Reset the processor, un-stall is done later. */
3138         if (rv2p_proc == RV2P_PROC1) {
3139                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3140         }
3141         else {
3142                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3143         }
3144 }
3145
3146 static int
3147 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
3148 {
3149         u32 offset;
3150         u32 val;
3151         int rc;
3152
3153         /* Halt the CPU. */
3154         val = REG_RD_IND(bp, cpu_reg->mode);
3155         val |= cpu_reg->mode_value_halt;
3156         REG_WR_IND(bp, cpu_reg->mode, val);
3157         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3158
3159         /* Load the Text area. */
3160         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3161         if (fw->gz_text) {
3162                 int j;
3163
3164                 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3165                                        fw->gz_text_len);
3166                 if (rc < 0)
3167                         return rc;
3168
3169                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3170                         REG_WR_IND(bp, offset, le32_to_cpu(fw->text[j]));
3171                 }
3172         }
3173
3174         /* Load the Data area. */
3175         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3176         if (fw->data) {
3177                 int j;
3178
3179                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3180                         REG_WR_IND(bp, offset, fw->data[j]);
3181                 }
3182         }
3183
3184         /* Load the SBSS area. */
3185         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3186         if (fw->sbss_len) {
3187                 int j;
3188
3189                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3190                         REG_WR_IND(bp, offset, 0);
3191                 }
3192         }
3193
3194         /* Load the BSS area. */
3195         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3196         if (fw->bss_len) {
3197                 int j;
3198
3199                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3200                         REG_WR_IND(bp, offset, 0);
3201                 }
3202         }
3203
3204         /* Load the Read-Only area. */
3205         offset = cpu_reg->spad_base +
3206                 (fw->rodata_addr - cpu_reg->mips_view_base);
3207         if (fw->rodata) {
3208                 int j;
3209
3210                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3211                         REG_WR_IND(bp, offset, fw->rodata[j]);
3212                 }
3213         }
3214
3215         /* Clear the pre-fetch instruction. */
3216         REG_WR_IND(bp, cpu_reg->inst, 0);
3217         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
3218
3219         /* Start the CPU. */
3220         val = REG_RD_IND(bp, cpu_reg->mode);
3221         val &= ~cpu_reg->mode_value_halt;
3222         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3223         REG_WR_IND(bp, cpu_reg->mode, val);
3224
3225         return 0;
3226 }
3227
3228 static int
3229 bnx2_init_cpus(struct bnx2 *bp)
3230 {
3231         struct cpu_reg cpu_reg;
3232         struct fw_info *fw;
3233         int rc, rv2p_len;
3234         void *text, *rv2p;
3235
3236         /* Initialize the RV2P processor. */
3237         text = vmalloc(FW_BUF_SIZE);
3238         if (!text)
3239                 return -ENOMEM;
3240         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3241                 rv2p = bnx2_xi_rv2p_proc1;
3242                 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3243         } else {
3244                 rv2p = bnx2_rv2p_proc1;
3245                 rv2p_len = sizeof(bnx2_rv2p_proc1);
3246         }
3247         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3248         if (rc < 0)
3249                 goto init_cpu_err;
3250
3251         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3252
3253         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3254                 rv2p = bnx2_xi_rv2p_proc2;
3255                 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3256         } else {
3257                 rv2p = bnx2_rv2p_proc2;
3258                 rv2p_len = sizeof(bnx2_rv2p_proc2);
3259         }
3260         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3261         if (rc < 0)
3262                 goto init_cpu_err;
3263
3264         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3265
3266         /* Initialize the RX Processor. */
3267         cpu_reg.mode = BNX2_RXP_CPU_MODE;
3268         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
3269         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
3270         cpu_reg.state = BNX2_RXP_CPU_STATE;
3271         cpu_reg.state_value_clear = 0xffffff;
3272         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3273         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3274         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3275         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3276         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3277         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3278         cpu_reg.mips_view_base = 0x8000000;
3279
3280         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3281                 fw = &bnx2_rxp_fw_09;
3282         else
3283                 fw = &bnx2_rxp_fw_06;
3284
3285         fw->text = text;
3286         rc = load_cpu_fw(bp, &cpu_reg, fw);
3287         if (rc)
3288                 goto init_cpu_err;
3289
3290         /* Initialize the TX Processor. */
3291         cpu_reg.mode = BNX2_TXP_CPU_MODE;
3292         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3293         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3294         cpu_reg.state = BNX2_TXP_CPU_STATE;
3295         cpu_reg.state_value_clear = 0xffffff;
3296         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3297         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3298         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3299         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3300         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3301         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3302         cpu_reg.mips_view_base = 0x8000000;
3303
3304         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3305                 fw = &bnx2_txp_fw_09;
3306         else
3307                 fw = &bnx2_txp_fw_06;
3308
3309         fw->text = text;
3310         rc = load_cpu_fw(bp, &cpu_reg, fw);
3311         if (rc)
3312                 goto init_cpu_err;
3313
3314         /* Initialize the TX Patch-up Processor. */
3315         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3316         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3317         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3318         cpu_reg.state = BNX2_TPAT_CPU_STATE;
3319         cpu_reg.state_value_clear = 0xffffff;
3320         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3321         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3322         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3323         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3324         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3325         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3326         cpu_reg.mips_view_base = 0x8000000;
3327
3328         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3329                 fw = &bnx2_tpat_fw_09;
3330         else
3331                 fw = &bnx2_tpat_fw_06;
3332
3333         fw->text = text;
3334         rc = load_cpu_fw(bp, &cpu_reg, fw);
3335         if (rc)
3336                 goto init_cpu_err;
3337
3338         /* Initialize the Completion Processor. */
3339         cpu_reg.mode = BNX2_COM_CPU_MODE;
3340         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3341         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3342         cpu_reg.state = BNX2_COM_CPU_STATE;
3343         cpu_reg.state_value_clear = 0xffffff;
3344         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3345         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3346         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3347         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3348         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3349         cpu_reg.spad_base = BNX2_COM_SCRATCH;
3350         cpu_reg.mips_view_base = 0x8000000;
3351
3352         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3353                 fw = &bnx2_com_fw_09;
3354         else
3355                 fw = &bnx2_com_fw_06;
3356
3357         fw->text = text;
3358         rc = load_cpu_fw(bp, &cpu_reg, fw);
3359         if (rc)
3360                 goto init_cpu_err;
3361
3362         /* Initialize the Command Processor. */
3363         cpu_reg.mode = BNX2_CP_CPU_MODE;
3364         cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3365         cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3366         cpu_reg.state = BNX2_CP_CPU_STATE;
3367         cpu_reg.state_value_clear = 0xffffff;
3368         cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3369         cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3370         cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3371         cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3372         cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3373         cpu_reg.spad_base = BNX2_CP_SCRATCH;
3374         cpu_reg.mips_view_base = 0x8000000;
3375
3376         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3377                 fw = &bnx2_cp_fw_09;
3378         else
3379                 fw = &bnx2_cp_fw_06;
3380
3381         fw->text = text;
3382         rc = load_cpu_fw(bp, &cpu_reg, fw);
3383
3384 init_cpu_err:
3385         vfree(text);
3386         return rc;
3387 }
3388
3389 static int
3390 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3391 {
3392         u16 pmcsr;
3393
3394         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3395
3396         switch (state) {
3397         case PCI_D0: {
3398                 u32 val;
3399
3400                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3401                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3402                         PCI_PM_CTRL_PME_STATUS);
3403
3404                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3405                         /* delay required during transition out of D3hot */
3406                         msleep(20);
3407
3408                 val = REG_RD(bp, BNX2_EMAC_MODE);
3409                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3410                 val &= ~BNX2_EMAC_MODE_MPKT;
3411                 REG_WR(bp, BNX2_EMAC_MODE, val);
3412
3413                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3414                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3415                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3416                 break;
3417         }
3418         case PCI_D3hot: {
3419                 int i;
3420                 u32 val, wol_msg;
3421
3422                 if (bp->wol) {
3423                         u32 advertising;
3424                         u8 autoneg;
3425
3426                         autoneg = bp->autoneg;
3427                         advertising = bp->advertising;
3428
3429                         if (bp->phy_port == PORT_TP) {
3430                                 bp->autoneg = AUTONEG_SPEED;
3431                                 bp->advertising = ADVERTISED_10baseT_Half |
3432                                         ADVERTISED_10baseT_Full |
3433                                         ADVERTISED_100baseT_Half |
3434                                         ADVERTISED_100baseT_Full |
3435                                         ADVERTISED_Autoneg;
3436                         }
3437
3438                         spin_lock_bh(&bp->phy_lock);
3439                         bnx2_setup_phy(bp, bp->phy_port);
3440                         spin_unlock_bh(&bp->phy_lock);
3441
3442                         bp->autoneg = autoneg;
3443                         bp->advertising = advertising;
3444
3445                         bnx2_set_mac_addr(bp);
3446
3447                         val = REG_RD(bp, BNX2_EMAC_MODE);
3448
3449                         /* Enable port mode. */
3450                         val &= ~BNX2_EMAC_MODE_PORT;
3451                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3452                                BNX2_EMAC_MODE_ACPI_RCVD |
3453                                BNX2_EMAC_MODE_MPKT;
3454                         if (bp->phy_port == PORT_TP)
3455                                 val |= BNX2_EMAC_MODE_PORT_MII;
3456                         else {
3457                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3458                                 if (bp->line_speed == SPEED_2500)
3459                                         val |= BNX2_EMAC_MODE_25G_MODE;
3460                         }
3461
3462                         REG_WR(bp, BNX2_EMAC_MODE, val);
3463
3464                         /* receive all multicast */
3465                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3466                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3467                                        0xffffffff);
3468                         }
3469                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3470                                BNX2_EMAC_RX_MODE_SORT_MODE);
3471
3472                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3473                               BNX2_RPM_SORT_USER0_MC_EN;
3474                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3475                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3476                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3477                                BNX2_RPM_SORT_USER0_ENA);
3478
3479                         /* Need to enable EMAC and RPM for WOL. */
3480                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3481                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3482                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3483                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3484
3485                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3486                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3487                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3488
3489                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3490                 }
3491                 else {
3492                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3493                 }
3494
3495                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3496                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3497
3498                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3499                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3500                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3501
3502                         if (bp->wol)
3503                                 pmcsr |= 3;
3504                 }
3505                 else {
3506                         pmcsr |= 3;
3507                 }
3508                 if (bp->wol) {
3509                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3510                 }
3511                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3512                                       pmcsr);
3513
3514                 /* No more memory access after this point until
3515                  * device is brought back to D0.
3516                  */
3517                 udelay(50);
3518                 break;
3519         }
3520         default:
3521                 return -EINVAL;
3522         }
3523         return 0;
3524 }
3525
3526 static int
3527 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3528 {
3529         u32 val;
3530         int j;
3531
3532         /* Request access to the flash interface. */
3533         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3534         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3535                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3536                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3537                         break;
3538
3539                 udelay(5);
3540         }
3541
3542         if (j >= NVRAM_TIMEOUT_COUNT)
3543                 return -EBUSY;
3544
3545         return 0;
3546 }
3547
3548 static int
3549 bnx2_release_nvram_lock(struct bnx2 *bp)
3550 {
3551         int j;
3552         u32 val;
3553
3554         /* Relinquish nvram interface. */
3555         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3556
3557         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3558                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3559                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3560                         break;
3561
3562                 udelay(5);
3563         }
3564
3565         if (j >= NVRAM_TIMEOUT_COUNT)
3566                 return -EBUSY;
3567
3568         return 0;
3569 }
3570
3571
3572 static int
3573 bnx2_enable_nvram_write(struct bnx2 *bp)
3574 {
3575         u32 val;
3576
3577         val = REG_RD(bp, BNX2_MISC_CFG);
3578         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3579
3580         if (bp->flash_info->flags & BNX2_NV_WREN) {
3581                 int j;
3582
3583                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3584                 REG_WR(bp, BNX2_NVM_COMMAND,
3585                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3586
3587                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3588                         udelay(5);
3589
3590                         val = REG_RD(bp, BNX2_NVM_COMMAND);
3591                         if (val & BNX2_NVM_COMMAND_DONE)
3592                                 break;
3593                 }
3594
3595                 if (j >= NVRAM_TIMEOUT_COUNT)
3596                         return -EBUSY;
3597         }
3598         return 0;
3599 }
3600
3601 static void
3602 bnx2_disable_nvram_write(struct bnx2 *bp)
3603 {
3604         u32 val;
3605
3606         val = REG_RD(bp, BNX2_MISC_CFG);
3607         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3608 }
3609
3610
3611 static void
3612 bnx2_enable_nvram_access(struct bnx2 *bp)
3613 {
3614         u32 val;
3615
3616         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3617         /* Enable both bits, even on read. */
3618         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3619                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3620 }
3621
3622 static void
3623 bnx2_disable_nvram_access(struct bnx2 *bp)
3624 {
3625         u32 val;
3626
3627         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3628         /* Disable both bits, even after read. */
3629         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3630                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3631                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3632 }
3633
3634 static int
3635 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3636 {
3637         u32 cmd;
3638         int j;
3639
3640         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3641                 /* Buffered flash, no erase needed */
3642                 return 0;
3643
3644         /* Build an erase command */
3645         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3646               BNX2_NVM_COMMAND_DOIT;
3647
3648         /* Need to clear DONE bit separately. */
3649         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3650
3651         /* Address of the NVRAM to read from. */
3652         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3653
3654         /* Issue an erase command. */
3655         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3656
3657         /* Wait for completion. */
3658         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3659                 u32 val;
3660
3661                 udelay(5);
3662
3663                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3664                 if (val & BNX2_NVM_COMMAND_DONE)
3665                         break;
3666         }
3667
3668         if (j >= NVRAM_TIMEOUT_COUNT)
3669                 return -EBUSY;
3670
3671         return 0;
3672 }
3673
3674 static int
3675 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3676 {
3677         u32 cmd;
3678         int j;
3679
3680         /* Build the command word. */
3681         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3682
3683         /* Calculate an offset of a buffered flash, not needed for 5709. */
3684         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3685                 offset = ((offset / bp->flash_info->page_size) <<
3686                            bp->flash_info->page_bits) +
3687                           (offset % bp->flash_info->page_size);
3688         }
3689
3690         /* Need to clear DONE bit separately. */
3691         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3692
3693         /* Address of the NVRAM to read from. */
3694         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3695
3696         /* Issue a read command. */
3697         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3698
3699         /* Wait for completion. */
3700         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3701                 u32 val;
3702
3703                 udelay(5);
3704
3705                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3706                 if (val & BNX2_NVM_COMMAND_DONE) {
3707                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
3708                         memcpy(ret_val, &v, 4);
3709                         break;
3710                 }
3711         }
3712         if (j >= NVRAM_TIMEOUT_COUNT)
3713                 return -EBUSY;
3714
3715         return 0;
3716 }
3717
3718
3719 static int
3720 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3721 {
3722         u32 cmd;
3723         __be32 val32;
3724         int j;
3725
3726         /* Build the command word. */
3727         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3728
3729         /* Calculate an offset of a buffered flash, not needed for 5709. */
3730         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3731                 offset = ((offset / bp->flash_info->page_size) <<
3732                           bp->flash_info->page_bits) +
3733                          (offset % bp->flash_info->page_size);
3734         }
3735
3736         /* Need to clear DONE bit separately. */
3737         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3738
3739         memcpy(&val32, val, 4);
3740
3741         /* Write the data. */
3742         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
3743
3744         /* Address of the NVRAM to write to. */
3745         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3746
3747         /* Issue the write command. */
3748         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3749
3750         /* Wait for completion. */
3751         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3752                 udelay(5);
3753
3754                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3755                         break;
3756         }
3757         if (j >= NVRAM_TIMEOUT_COUNT)
3758                 return -EBUSY;
3759
3760         return 0;
3761 }
3762
3763 static int
3764 bnx2_init_nvram(struct bnx2 *bp)
3765 {
3766         u32 val;
3767         int j, entry_count, rc = 0;
3768         struct flash_spec *flash;
3769
3770         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3771                 bp->flash_info = &flash_5709;
3772                 goto get_flash_size;
3773         }
3774
3775         /* Determine the selected interface. */
3776         val = REG_RD(bp, BNX2_NVM_CFG1);
3777
3778         entry_count = ARRAY_SIZE(flash_table);
3779
3780         if (val & 0x40000000) {
3781
3782                 /* Flash interface has been reconfigured */
3783                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3784                      j++, flash++) {
3785                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
3786                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3787                                 bp->flash_info = flash;
3788                                 break;
3789                         }
3790                 }
3791         }
3792         else {
3793                 u32 mask;
3794                 /* Not yet been reconfigured */
3795
3796                 if (val & (1 << 23))
3797                         mask = FLASH_BACKUP_STRAP_MASK;
3798                 else
3799                         mask = FLASH_STRAP_MASK;
3800
3801                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3802                         j++, flash++) {
3803
3804                         if ((val & mask) == (flash->strapping & mask)) {
3805                                 bp->flash_info = flash;
3806
3807                                 /* Request access to the flash interface. */
3808                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3809                                         return rc;
3810
3811                                 /* Enable access to flash interface */
3812                                 bnx2_enable_nvram_access(bp);
3813
3814                                 /* Reconfigure the flash interface */
3815                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3816                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3817                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3818                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3819
3820                                 /* Disable access to flash interface */
3821                                 bnx2_disable_nvram_access(bp);
3822                                 bnx2_release_nvram_lock(bp);
3823
3824                                 break;
3825                         }
3826                 }
3827         } /* if (val & 0x40000000) */
3828
3829         if (j == entry_count) {
3830                 bp->flash_info = NULL;
3831                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3832                 return -ENODEV;
3833         }
3834
3835 get_flash_size:
3836         val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3837         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3838         if (val)
3839                 bp->flash_size = val;
3840         else
3841                 bp->flash_size = bp->flash_info->total_size;
3842
3843         return rc;
3844 }
3845
3846 static int
3847 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3848                 int buf_size)
3849 {
3850         int rc = 0;
3851         u32 cmd_flags, offset32, len32, extra;
3852
3853         if (buf_size == 0)
3854                 return 0;
3855
3856         /* Request access to the flash interface. */
3857         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3858                 return rc;
3859
3860         /* Enable access to flash interface */
3861         bnx2_enable_nvram_access(bp);
3862
3863         len32 = buf_size;
3864         offset32 = offset;
3865         extra = 0;
3866
3867         cmd_flags = 0;
3868
3869         if (offset32 & 3) {
3870                 u8 buf[4];
3871                 u32 pre_len;
3872
3873                 offset32 &= ~3;
3874                 pre_len = 4 - (offset & 3);
3875
3876                 if (pre_len >= len32) {
3877                         pre_len = len32;
3878                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3879                                     BNX2_NVM_COMMAND_LAST;
3880                 }
3881                 else {
3882                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3883                 }
3884
3885                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3886
3887                 if (rc)
3888                         return rc;
3889
3890                 memcpy(ret_buf, buf + (offset & 3), pre_len);
3891
3892                 offset32 += 4;
3893                 ret_buf += pre_len;
3894                 len32 -= pre_len;
3895         }
3896         if (len32 & 3) {
3897                 extra = 4 - (len32 & 3);
3898                 len32 = (len32 + 4) & ~3;
3899         }
3900
3901         if (len32 == 4) {
3902                 u8 buf[4];
3903
3904                 if (cmd_flags)
3905                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3906                 else
3907                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3908                                     BNX2_NVM_COMMAND_LAST;
3909
3910                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3911
3912                 memcpy(ret_buf, buf, 4 - extra);
3913         }
3914         else if (len32 > 0) {
3915                 u8 buf[4];
3916
3917                 /* Read the first word. */
3918                 if (cmd_flags)
3919                         cmd_flags = 0;
3920                 else
3921                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3922
3923                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3924
3925                 /* Advance to the next dword. */
3926                 offset32 += 4;
3927                 ret_buf += 4;
3928                 len32 -= 4;
3929
3930                 while (len32 > 4 && rc == 0) {
3931                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3932
3933                         /* Advance to the next dword. */
3934                         offset32 += 4;
3935                         ret_buf += 4;
3936                         len32 -= 4;
3937                 }
3938
3939                 if (rc)
3940                         return rc;
3941
3942                 cmd_flags = BNX2_NVM_COMMAND_LAST;
3943                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3944
3945                 memcpy(ret_buf, buf, 4 - extra);
3946         }
3947
3948         /* Disable access to flash interface */
3949         bnx2_disable_nvram_access(bp);
3950
3951         bnx2_release_nvram_lock(bp);
3952
3953         return rc;
3954 }
3955
3956 static int
3957 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3958                 int buf_size)
3959 {
3960         u32 written, offset32, len32;
3961         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3962         int rc = 0;
3963         int align_start, align_end;
3964
3965         buf = data_buf;
3966         offset32 = offset;
3967         len32 = buf_size;
3968         align_start = align_end = 0;
3969
3970         if ((align_start = (offset32 & 3))) {
3971                 offset32 &= ~3;
3972                 len32 += align_start;
3973                 if (len32 < 4)
3974                         len32 = 4;
3975                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3976                         return rc;
3977         }
3978
3979         if (len32 & 3) {
3980                 align_end = 4 - (len32 & 3);
3981                 len32 += align_end;
3982                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3983                         return rc;
3984         }
3985
3986         if (align_start || align_end) {
3987                 align_buf = kmalloc(len32, GFP_KERNEL);
3988                 if (align_buf == NULL)
3989                         return -ENOMEM;
3990                 if (align_start) {
3991                         memcpy(align_buf, start, 4);
3992                 }
3993                 if (align_end) {
3994                         memcpy(align_buf + len32 - 4, end, 4);
3995                 }
3996                 memcpy(align_buf + align_start, data_buf, buf_size);
3997                 buf = align_buf;
3998         }
3999
4000         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4001                 flash_buffer = kmalloc(264, GFP_KERNEL);
4002                 if (flash_buffer == NULL) {
4003                         rc = -ENOMEM;
4004                         goto nvram_write_end;
4005                 }
4006         }
4007
4008         written = 0;
4009         while ((written < len32) && (rc == 0)) {
4010                 u32 page_start, page_end, data_start, data_end;
4011                 u32 addr, cmd_flags;
4012                 int i;
4013
4014                 /* Find the page_start addr */
4015                 page_start = offset32 + written;
4016                 page_start -= (page_start % bp->flash_info->page_size);
4017                 /* Find the page_end addr */
4018                 page_end = page_start + bp->flash_info->page_size;
4019                 /* Find the data_start addr */
4020                 data_start = (written == 0) ? offset32 : page_start;
4021                 /* Find the data_end addr */
4022                 data_end = (page_end > offset32 + len32) ?
4023                         (offset32 + len32) : page_end;
4024
4025                 /* Request access to the flash interface. */
4026                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4027                         goto nvram_write_end;
4028
4029                 /* Enable access to flash interface */
4030                 bnx2_enable_nvram_access(bp);
4031
4032                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4033                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4034                         int j;
4035
4036                         /* Read the whole page into the buffer
4037                          * (non-buffer flash only) */
4038                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4039                                 if (j == (bp->flash_info->page_size - 4)) {
4040                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4041                                 }
4042                                 rc = bnx2_nvram_read_dword(bp,
4043                                         page_start + j,
4044                                         &flash_buffer[j],
4045                                         cmd_flags);
4046
4047                                 if (rc)
4048                                         goto nvram_write_end;
4049
4050                                 cmd_flags = 0;
4051                         }
4052                 }
4053
4054                 /* Enable writes to flash interface (unlock write-protect) */
4055                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4056                         goto nvram_write_end;
4057
4058                 /* Loop to write back the buffer data from page_start to
4059                  * data_start */
4060                 i = 0;
4061                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4062                         /* Erase the page */
4063                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4064                                 goto nvram_write_end;
4065
4066                         /* Re-enable the write again for the actual write */
4067                         bnx2_enable_nvram_write(bp);
4068
4069                         for (addr = page_start; addr < data_start;
4070                                 addr += 4, i += 4) {
4071
4072                                 rc = bnx2_nvram_write_dword(bp, addr,
4073                                         &flash_buffer[i], cmd_flags);
4074
4075                                 if (rc != 0)
4076                                         goto nvram_write_end;
4077
4078                                 cmd_flags = 0;
4079                         }
4080                 }
4081
4082                 /* Loop to write the new data from data_start to data_end */
4083                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4084                         if ((addr == page_end - 4) ||
4085                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4086                                  (addr == data_end - 4))) {
4087
4088                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4089                         }
4090                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4091                                 cmd_flags);
4092
4093                         if (rc != 0)
4094                                 goto nvram_write_end;
4095
4096                         cmd_flags = 0;
4097                         buf += 4;
4098                 }
4099
4100                 /* Loop to write back the buffer data from data_end
4101                  * to page_end */
4102                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4103                         for (addr = data_end; addr < page_end;
4104                                 addr += 4, i += 4) {
4105
4106                                 if (addr == page_end-4) {
4107                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4108                                 }
4109                                 rc = bnx2_nvram_write_dword(bp, addr,
4110                                         &flash_buffer[i], cmd_flags);
4111
4112                                 if (rc != 0)
4113                                         goto nvram_write_end;
4114
4115                                 cmd_flags = 0;
4116                         }
4117                 }
4118
4119                 /* Disable writes to flash interface (lock write-protect) */
4120                 bnx2_disable_nvram_write(bp);
4121
4122                 /* Disable access to flash interface */
4123                 bnx2_disable_nvram_access(bp);
4124                 bnx2_release_nvram_lock(bp);
4125
4126                 /* Increment written */
4127                 written += data_end - data_start;
4128         }
4129
4130 nvram_write_end:
4131         kfree(flash_buffer);
4132         kfree(align_buf);
4133         return rc;
4134 }
4135
4136 static void
4137 bnx2_init_remote_phy(struct bnx2 *bp)
4138 {
4139         u32 val;
4140
4141         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4142         if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES))
4143                 return;
4144
4145         val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
4146         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4147                 return;
4148
4149         if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
4150                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4151
4152                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
4153                 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4154                         bp->phy_port = PORT_FIBRE;
4155                 else
4156                         bp->phy_port = PORT_TP;
4157
4158                 if (netif_running(bp->dev)) {
4159                         u32 sig;
4160
4161                         if (val & BNX2_LINK_STATUS_LINK_UP) {
4162                                 bp->link_up = 1;
4163                                 netif_carrier_on(bp->dev);
4164                         } else {
4165                                 bp->link_up = 0;
4166                                 netif_carrier_off(bp->dev);
4167                         }
4168                         sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4169                               BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4170                         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
4171                                    sig);
4172                 }
4173         }
4174 }
4175
4176 static void
4177 bnx2_setup_msix_tbl(struct bnx2 *bp)
4178 {
4179         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4180
4181         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4182         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4183 }
4184
4185 static int
4186 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4187 {
4188         u32 val;
4189         int i, rc = 0;
4190         u8 old_port;
4191
4192         /* Wait for the current PCI transaction to complete before
4193          * issuing a reset. */
4194         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4195                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4196                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4197                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4198                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4199         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4200         udelay(5);
4201
4202         /* Wait for the firmware to tell us it is ok to issue a reset. */
4203         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
4204
4205         /* Deposit a driver reset signature so the firmware knows that
4206          * this is a soft reset. */
4207         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
4208                    BNX2_DRV_RESET_SIGNATURE_MAGIC);
4209
4210         /* Do a dummy read to force the chip to complete all current transaction
4211          * before we issue a reset. */
4212         val = REG_RD(bp, BNX2_MISC_ID);
4213
4214         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4215                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4216                 REG_RD(bp, BNX2_MISC_COMMAND);
4217                 udelay(5);
4218
4219                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4220                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4221
4222                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4223
4224         } else {
4225                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4226                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4227                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4228
4229                 /* Chip reset. */
4230                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4231
4232                 /* Reading back any register after chip reset will hang the
4233                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4234                  * of margin for write posting.
4235                  */
4236                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4237                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4238                         msleep(20);
4239
4240                 /* Reset takes approximate 30 usec */
4241                 for (i = 0; i < 10; i++) {
4242                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4243                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4244                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4245                                 break;
4246                         udelay(10);
4247                 }
4248
4249                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4250                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4251                         printk(KERN_ERR PFX "Chip reset did not complete\n");
4252                         return -EBUSY;
4253                 }
4254         }
4255
4256         /* Make sure byte swapping is properly configured. */
4257         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4258         if (val != 0x01020304) {
4259                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4260                 return -ENODEV;
4261         }
4262
4263         /* Wait for the firmware to finish its initialization. */
4264         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
4265         if (rc)
4266                 return rc;
4267
4268         spin_lock_bh(&bp->phy_lock);
4269         old_port = bp->phy_port;
4270         bnx2_init_remote_phy(bp);
4271         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4272             old_port != bp->phy_port)
4273                 bnx2_set_default_remote_link(bp);
4274         spin_unlock_bh(&bp->phy_lock);
4275
4276         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4277                 /* Adjust the voltage regular to two steps lower.  The default
4278                  * of this register is 0x0000000e. */
4279                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4280
4281                 /* Remove bad rbuf memory from the free pool. */
4282                 rc = bnx2_alloc_bad_rbuf(bp);
4283         }
4284
4285         if (bp->flags & BNX2_FLAG_USING_MSIX)
4286                 bnx2_setup_msix_tbl(bp);
4287
4288         return rc;
4289 }
4290
4291 static int
4292 bnx2_init_chip(struct bnx2 *bp)
4293 {
4294         u32 val;
4295         int rc, i;
4296
4297         /* Make sure the interrupt is not active. */
4298         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4299
4300         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4301               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4302 #ifdef __BIG_ENDIAN
4303               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4304 #endif
4305               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4306               DMA_READ_CHANS << 12 |
4307               DMA_WRITE_CHANS << 16;
4308
4309         val |= (0x2 << 20) | (1 << 11);
4310
4311         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4312                 val |= (1 << 23);
4313
4314         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4315             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4316                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4317
4318         REG_WR(bp, BNX2_DMA_CONFIG, val);
4319
4320         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4321                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4322                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4323                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4324         }
4325
4326         if (bp->flags & BNX2_FLAG_PCIX) {
4327                 u16 val16;
4328
4329                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4330                                      &val16);
4331                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4332                                       val16 & ~PCI_X_CMD_ERO);
4333         }
4334
4335         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4336                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4337                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4338                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4339
4340         /* Initialize context mapping and zero out the quick contexts.  The
4341          * context block must have already been enabled. */
4342         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4343                 rc = bnx2_init_5709_context(bp);
4344                 if (rc)
4345                         return rc;
4346         } else
4347                 bnx2_init_context(bp);
4348
4349         if ((rc = bnx2_init_cpus(bp)) != 0)
4350                 return rc;
4351
4352         bnx2_init_nvram(bp);
4353
4354         bnx2_set_mac_addr(bp);
4355
4356         val = REG_RD(bp, BNX2_MQ_CONFIG);
4357         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4358         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4359         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4360                 val |= BNX2_MQ_CONFIG_HALT_DIS;
4361
4362         REG_WR(bp, BNX2_MQ_CONFIG, val);
4363
4364         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4365         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4366         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4367
4368         val = (BCM_PAGE_BITS - 8) << 24;
4369         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4370
4371         /* Configure page size. */
4372         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4373         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4374         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4375         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4376
4377         val = bp->mac_addr[0] +
4378               (bp->mac_addr[1] << 8) +
4379               (bp->mac_addr[2] << 16) +
4380               bp->mac_addr[3] +
4381               (bp->mac_addr[4] << 8) +
4382               (bp->mac_addr[5] << 16);
4383         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4384
4385         /* Program the MTU.  Also include 4 bytes for CRC32. */
4386         val = bp->dev->mtu + ETH_HLEN + 4;
4387         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4388                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4389         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4390
4391         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4392                 bp->bnx2_napi[i].last_status_idx = 0;
4393
4394         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4395
4396         /* Set up how to generate a link change interrupt. */
4397         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4398
4399         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4400                (u64) bp->status_blk_mapping & 0xffffffff);
4401         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4402
4403         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4404                (u64) bp->stats_blk_mapping & 0xffffffff);
4405         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4406                (u64) bp->stats_blk_mapping >> 32);
4407
4408         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4409                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4410
4411         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4412                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4413
4414         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4415                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4416
4417         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4418
4419         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4420
4421         REG_WR(bp, BNX2_HC_COM_TICKS,
4422                (bp->com_ticks_int << 16) | bp->com_ticks);
4423
4424         REG_WR(bp, BNX2_HC_CMD_TICKS,
4425                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4426
4427         if (CHIP_NUM(bp) == CHIP_NUM_5708)
4428                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4429         else
4430                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4431         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4432
4433         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4434                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4435         else {
4436                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4437                       BNX2_HC_CONFIG_COLLECT_STATS;
4438         }
4439
4440         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4441                 u32 base = ((BNX2_TX_VEC - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4442                            BNX2_HC_SB_CONFIG_1;
4443
4444                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4445                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4446
4447                 REG_WR(bp, base,
4448                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4449                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4450
4451                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4452                         (bp->tx_quick_cons_trip_int << 16) |
4453                          bp->tx_quick_cons_trip);
4454
4455                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4456                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4457
4458                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4459         }
4460
4461         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4462                 val |= BNX2_HC_CONFIG_ONE_SHOT;
4463
4464         REG_WR(bp, BNX2_HC_CONFIG, val);
4465
4466         /* Clear internal stats counters. */
4467         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4468
4469         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4470
4471         /* Initialize the receive filter. */
4472         bnx2_set_rx_mode(bp->dev);
4473
4474         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4475                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4476                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4477                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4478         }
4479         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4480                           0);
4481
4482         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4483         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4484
4485         udelay(20);
4486
4487         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4488
4489         return rc;
4490 }
4491
4492 static void
4493 bnx2_clear_ring_states(struct bnx2 *bp)
4494 {
4495         struct bnx2_napi *bnapi;
4496         int i;
4497
4498         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4499                 bnapi = &bp->bnx2_napi[i];
4500
4501                 bnapi->tx_cons = 0;
4502                 bnapi->hw_tx_cons = 0;
4503                 bnapi->rx_prod_bseq = 0;
4504                 bnapi->rx_prod = 0;
4505                 bnapi->rx_cons = 0;
4506                 bnapi->rx_pg_prod = 0;
4507                 bnapi->rx_pg_cons = 0;
4508         }
4509 }
4510
4511 static void
4512 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4513 {
4514         u32 val, offset0, offset1, offset2, offset3;
4515
4516         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4517                 offset0 = BNX2_L2CTX_TYPE_XI;
4518                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4519                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4520                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4521         } else {
4522                 offset0 = BNX2_L2CTX_TYPE;
4523                 offset1 = BNX2_L2CTX_CMD_TYPE;
4524                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4525                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4526         }
4527         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4528         CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4529
4530         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4531         CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4532
4533         val = (u64) bp->tx_desc_mapping >> 32;
4534         CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4535
4536         val = (u64) bp->tx_desc_mapping & 0xffffffff;
4537         CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4538 }
4539
4540 static void
4541 bnx2_init_tx_ring(struct bnx2 *bp)
4542 {
4543         struct tx_bd *txbd;
4544         u32 cid = TX_CID;
4545         struct bnx2_napi *bnapi;
4546
4547         bp->tx_vec = 0;
4548         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4549                 cid = TX_TSS_CID;
4550                 bp->tx_vec = BNX2_TX_VEC;
4551                 REG_WR(bp, BNX2_TSCH_TSS_CFG, BNX2_TX_INT_NUM |
4552                        (TX_TSS_CID << 7));
4553         }
4554         bnapi = &bp->bnx2_napi[bp->tx_vec];
4555
4556         bp->tx_wake_thresh = bp->tx_ring_size / 2;
4557
4558         txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4559
4560         txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4561         txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4562
4563         bp->tx_prod = 0;
4564         bp->tx_prod_bseq = 0;
4565
4566         bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4567         bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4568
4569         bnx2_init_tx_context(bp, cid);
4570 }
4571
4572 static void
4573 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4574                      int num_rings)
4575 {
4576         int i;
4577         struct rx_bd *rxbd;
4578
4579         for (i = 0; i < num_rings; i++) {
4580                 int j;
4581
4582                 rxbd = &rx_ring[i][0];
4583                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4584                         rxbd->rx_bd_len = buf_size;
4585                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4586                 }
4587                 if (i == (num_rings - 1))
4588                         j = 0;
4589                 else
4590                         j = i + 1;
4591                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4592                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4593         }
4594 }
4595
4596 static void
4597 bnx2_init_rx_ring(struct bnx2 *bp)
4598 {
4599         int i;
4600         u16 prod, ring_prod;
4601         u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
4602         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
4603
4604         bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
4605                              bp->rx_buf_use_size, bp->rx_max_ring);
4606
4607         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4608         if (bp->rx_pg_ring_size) {
4609                 bnx2_init_rxbd_rings(bp->rx_pg_desc_ring,
4610                                      bp->rx_pg_desc_mapping,
4611                                      PAGE_SIZE, bp->rx_max_pg_ring);
4612                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4613                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4614                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4615                        BNX2_L2CTX_RBDC_JUMBO_KEY);
4616
4617                 val = (u64) bp->rx_pg_desc_mapping[0] >> 32;
4618                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4619
4620                 val = (u64) bp->rx_pg_desc_mapping[0] & 0xffffffff;
4621                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4622
4623                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4624                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4625         }
4626
4627         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4628         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4629         val |= 0x02 << 8;
4630         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4631
4632         val = (u64) bp->rx_desc_mapping[0] >> 32;
4633         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4634
4635         val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4636         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4637
4638         ring_prod = prod = bnapi->rx_pg_prod;
4639         for (i = 0; i < bp->rx_pg_ring_size; i++) {
4640                 if (bnx2_alloc_rx_page(bp, ring_prod) < 0)
4641                         break;
4642                 prod = NEXT_RX_BD(prod);
4643                 ring_prod = RX_PG_RING_IDX(prod);
4644         }
4645         bnapi->rx_pg_prod = prod;
4646
4647         ring_prod = prod = bnapi->rx_prod;
4648         for (i = 0; i < bp->rx_ring_size; i++) {
4649                 if (bnx2_alloc_rx_skb(bp, bnapi, ring_prod) < 0) {
4650                         break;
4651                 }
4652                 prod = NEXT_RX_BD(prod);
4653                 ring_prod = RX_RING_IDX(prod);
4654         }
4655         bnapi->rx_prod = prod;
4656
4657         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
4658                  bnapi->rx_pg_prod);
4659         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4660
4661         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
4662 }
4663
4664 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4665 {
4666         u32 max, num_rings = 1;
4667
4668         while (ring_size > MAX_RX_DESC_CNT) {
4669                 ring_size -= MAX_RX_DESC_CNT;
4670                 num_rings++;
4671         }
4672         /* round to next power of 2 */
4673         max = max_size;
4674         while ((max & num_rings) == 0)
4675                 max >>= 1;
4676
4677         if (num_rings != max)
4678                 max <<= 1;
4679
4680         return max;
4681 }
4682
4683 static void
4684 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4685 {
4686         u32 rx_size, rx_space, jumbo_size;
4687
4688         /* 8 for CRC and VLAN */
4689         rx_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4690
4691         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4692                 sizeof(struct skb_shared_info);
4693
4694         bp->rx_copy_thresh = RX_COPY_THRESH;
4695         bp->rx_pg_ring_size = 0;
4696         bp->rx_max_pg_ring = 0;
4697         bp->rx_max_pg_ring_idx = 0;
4698         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
4699                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4700
4701                 jumbo_size = size * pages;
4702                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4703                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4704
4705                 bp->rx_pg_ring_size = jumbo_size;
4706                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4707                                                         MAX_RX_PG_RINGS);
4708                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4709                 rx_size = RX_COPY_THRESH + bp->rx_offset;
4710                 bp->rx_copy_thresh = 0;
4711         }
4712
4713         bp->rx_buf_use_size = rx_size;
4714         /* hw alignment */
4715         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4716         bp->rx_jumbo_thresh = rx_size - bp->rx_offset;
4717         bp->rx_ring_size = size;
4718         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4719         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4720 }
4721
4722 static void
4723 bnx2_free_tx_skbs(struct bnx2 *bp)
4724 {
4725         int i;
4726
4727         if (bp->tx_buf_ring == NULL)
4728                 return;
4729
4730         for (i = 0; i < TX_DESC_CNT; ) {
4731                 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4732                 struct sk_buff *skb = tx_buf->skb;
4733                 int j, last;
4734
4735                 if (skb == NULL) {
4736                         i++;
4737                         continue;
4738                 }
4739
4740                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4741                         skb_headlen(skb), PCI_DMA_TODEVICE);
4742
4743                 tx_buf->skb = NULL;
4744
4745                 last = skb_shinfo(skb)->nr_frags;
4746                 for (j = 0; j < last; j++) {
4747                         tx_buf = &bp->tx_buf_ring[i + j + 1];
4748                         pci_unmap_page(bp->pdev,
4749                                 pci_unmap_addr(tx_buf, mapping),
4750                                 skb_shinfo(skb)->frags[j].size,
4751                                 PCI_DMA_TODEVICE);
4752                 }
4753                 dev_kfree_skb(skb);
4754                 i += j + 1;
4755         }
4756
4757 }
4758
4759 static void
4760 bnx2_free_rx_skbs(struct bnx2 *bp)
4761 {
4762         int i;
4763
4764         if (bp->rx_buf_ring == NULL)
4765                 return;
4766
4767         for (i = 0; i < bp->rx_max_ring_idx; i++) {
4768                 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4769                 struct sk_buff *skb = rx_buf->skb;
4770
4771                 if (skb == NULL)
4772                         continue;
4773
4774                 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4775                         bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4776
4777                 rx_buf->skb = NULL;
4778
4779                 dev_kfree_skb(skb);
4780         }
4781         for (i = 0; i < bp->rx_max_pg_ring_idx; i++)
4782                 bnx2_free_rx_page(bp, i);
4783 }
4784
4785 static void
4786 bnx2_free_skbs(struct bnx2 *bp)
4787 {
4788         bnx2_free_tx_skbs(bp);
4789         bnx2_free_rx_skbs(bp);
4790 }
4791
4792 static int
4793 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4794 {
4795         int rc;
4796
4797         rc = bnx2_reset_chip(bp, reset_code);
4798         bnx2_free_skbs(bp);
4799         if (rc)
4800                 return rc;
4801
4802         if ((rc = bnx2_init_chip(bp)) != 0)
4803                 return rc;
4804
4805         bnx2_clear_ring_states(bp);
4806         bnx2_init_tx_ring(bp);
4807         bnx2_init_rx_ring(bp);
4808         return 0;
4809 }
4810
4811 static int
4812 bnx2_init_nic(struct bnx2 *bp)
4813 {
4814         int rc;
4815
4816         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4817                 return rc;
4818
4819         spin_lock_bh(&bp->phy_lock);
4820         bnx2_init_phy(bp);
4821         bnx2_set_link(bp);
4822         spin_unlock_bh(&bp->phy_lock);
4823         return 0;
4824 }
4825
4826 static int
4827 bnx2_test_registers(struct bnx2 *bp)
4828 {
4829         int ret;
4830         int i, is_5709;
4831         static const struct {
4832                 u16   offset;
4833                 u16   flags;
4834 #define BNX2_FL_NOT_5709        1
4835                 u32   rw_mask;
4836                 u32   ro_mask;
4837         } reg_tbl[] = {
4838                 { 0x006c, 0, 0x00000000, 0x0000003f },
4839                 { 0x0090, 0, 0xffffffff, 0x00000000 },
4840                 { 0x0094, 0, 0x00000000, 0x00000000 },
4841
4842                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4843                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4844                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4845                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4846                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4847                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4848                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4849                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4850                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4851
4852                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4853                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4854                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4855                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4856                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4857                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4858
4859                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4860                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4861                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
4862
4863                 { 0x1000, 0, 0x00000000, 0x00000001 },
4864                 { 0x1004, 0, 0x00000000, 0x000f0001 },
4865
4866                 { 0x1408, 0, 0x01c00800, 0x00000000 },
4867                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4868                 { 0x14a8, 0, 0x00000000, 0x000001ff },
4869                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4870                 { 0x14b0, 0, 0x00000002, 0x00000001 },
4871                 { 0x14b8, 0, 0x00000000, 0x00000000 },
4872                 { 0x14c0, 0, 0x00000000, 0x00000009 },
4873                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4874                 { 0x14cc, 0, 0x00000000, 0x00000001 },
4875                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4876
4877                 { 0x1800, 0, 0x00000000, 0x00000001 },
4878                 { 0x1804, 0, 0x00000000, 0x00000003 },
4879
4880                 { 0x2800, 0, 0x00000000, 0x00000001 },
4881                 { 0x2804, 0, 0x00000000, 0x00003f01 },
4882                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4883                 { 0x2810, 0, 0xffff0000, 0x00000000 },
4884                 { 0x2814, 0, 0xffff0000, 0x00000000 },
4885                 { 0x2818, 0, 0xffff0000, 0x00000000 },
4886                 { 0x281c, 0, 0xffff0000, 0x00000000 },
4887                 { 0x2834, 0, 0xffffffff, 0x00000000 },
4888                 { 0x2840, 0, 0x00000000, 0xffffffff },
4889                 { 0x2844, 0, 0x00000000, 0xffffffff },
4890                 { 0x2848, 0, 0xffffffff, 0x00000000 },
4891                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4892
4893                 { 0x2c00, 0, 0x00000000, 0x00000011 },
4894                 { 0x2c04, 0, 0x00000000, 0x00030007 },
4895
4896                 { 0x3c00, 0, 0x00000000, 0x00000001 },
4897                 { 0x3c04, 0, 0x00000000, 0x00070000 },
4898                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4899                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4900                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4901                 { 0x3c14, 0, 0x00000000, 0xffffffff },
4902                 { 0x3c18, 0, 0x00000000, 0xffffffff },
4903                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4904                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4905
4906                 { 0x5004, 0, 0x00000000, 0x0000007f },
4907                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4908
4909                 { 0x5c00, 0, 0x00000000, 0x00000001 },
4910                 { 0x5c04, 0, 0x00000000, 0x0003000f },
4911                 { 0x5c08, 0, 0x00000003, 0x00000000 },
4912                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4913                 { 0x5c10, 0, 0x00000000, 0xffffffff },
4914                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4915                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4916                 { 0x5c88, 0, 0x00000000, 0x00077373 },
4917                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4918
4919                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4920                 { 0x680c, 0, 0xffffffff, 0x00000000 },
4921                 { 0x6810, 0, 0xffffffff, 0x00000000 },
4922                 { 0x6814, 0, 0xffffffff, 0x00000000 },
4923                 { 0x6818, 0, 0xffffffff, 0x00000000 },
4924                 { 0x681c, 0, 0xffffffff, 0x00000000 },
4925                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4926                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4927                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4928                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4929                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4930                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4931                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4932                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4933                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4934                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4935                 { 0x684c, 0, 0xffffffff, 0x00000000 },
4936                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4937                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4938                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4939                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4940                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4941                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4942
4943                 { 0xffff, 0, 0x00000000, 0x00000000 },
4944         };
4945
4946         ret = 0;
4947         is_5709 = 0;
4948         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4949                 is_5709 = 1;
4950
4951         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4952                 u32 offset, rw_mask, ro_mask, save_val, val;
4953                 u16 flags = reg_tbl[i].flags;
4954
4955                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4956                         continue;
4957
4958                 offset = (u32) reg_tbl[i].offset;
4959                 rw_mask = reg_tbl[i].rw_mask;
4960                 ro_mask = reg_tbl[i].ro_mask;
4961
4962                 save_val = readl(bp->regview + offset);
4963
4964                 writel(0, bp->regview + offset);
4965
4966                 val = readl(bp->regview + offset);
4967                 if ((val & rw_mask) != 0) {
4968                         goto reg_test_err;
4969                 }
4970
4971                 if ((val & ro_mask) != (save_val & ro_mask)) {
4972                         goto reg_test_err;
4973                 }
4974
4975                 writel(0xffffffff, bp->regview + offset);
4976
4977                 val = readl(bp->regview + offset);
4978                 if ((val & rw_mask) != rw_mask) {
4979                         goto reg_test_err;
4980                 }
4981
4982                 if ((val & ro_mask) != (save_val & ro_mask)) {
4983                         goto reg_test_err;
4984                 }
4985
4986                 writel(save_val, bp->regview + offset);
4987                 continue;
4988
4989 reg_test_err:
4990                 writel(save_val, bp->regview + offset);
4991                 ret = -ENODEV;
4992                 break;
4993         }
4994         return ret;
4995 }
4996
4997 static int
4998 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4999 {
5000         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5001                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5002         int i;
5003
5004         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5005                 u32 offset;
5006
5007                 for (offset = 0; offset < size; offset += 4) {
5008
5009                         REG_WR_IND(bp, start + offset, test_pattern[i]);
5010
5011                         if (REG_RD_IND(bp, start + offset) !=
5012                                 test_pattern[i]) {
5013                                 return -ENODEV;
5014                         }
5015                 }
5016         }
5017         return 0;
5018 }
5019
5020 static int
5021 bnx2_test_memory(struct bnx2 *bp)
5022 {
5023         int ret = 0;
5024         int i;
5025         static struct mem_entry {
5026                 u32   offset;
5027                 u32   len;
5028         } mem_tbl_5706[] = {
5029                 { 0x60000,  0x4000 },
5030                 { 0xa0000,  0x3000 },
5031                 { 0xe0000,  0x4000 },
5032                 { 0x120000, 0x4000 },
5033                 { 0x1a0000, 0x4000 },
5034                 { 0x160000, 0x4000 },
5035                 { 0xffffffff, 0    },
5036         },
5037         mem_tbl_5709[] = {
5038                 { 0x60000,  0x4000 },
5039                 { 0xa0000,  0x3000 },
5040                 { 0xe0000,  0x4000 },
5041                 { 0x120000, 0x4000 },
5042                 { 0x1a0000, 0x4000 },
5043                 { 0xffffffff, 0    },
5044         };
5045         struct mem_entry *mem_tbl;
5046
5047         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5048                 mem_tbl = mem_tbl_5709;
5049         else
5050                 mem_tbl = mem_tbl_5706;
5051
5052         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5053                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5054                         mem_tbl[i].len)) != 0) {
5055                         return ret;
5056                 }
5057         }
5058
5059         return ret;
5060 }
5061
5062 #define BNX2_MAC_LOOPBACK       0
5063 #define BNX2_PHY_LOOPBACK       1
5064
5065 static int
5066 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5067 {
5068         unsigned int pkt_size, num_pkts, i;
5069         struct sk_buff *skb, *rx_skb;
5070         unsigned char *packet;
5071         u16 rx_start_idx, rx_idx;
5072         dma_addr_t map;
5073         struct tx_bd *txbd;
5074         struct sw_bd *rx_buf;
5075         struct l2_fhdr *rx_hdr;
5076         int ret = -ENODEV;
5077         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5078
5079         tx_napi = bnapi;
5080         if (bp->flags & BNX2_FLAG_USING_MSIX)
5081                 tx_napi = &bp->bnx2_napi[BNX2_TX_VEC];
5082
5083         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5084                 bp->loopback = MAC_LOOPBACK;
5085                 bnx2_set_mac_loopback(bp);
5086         }
5087         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5088                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5089                         return 0;
5090
5091                 bp->loopback = PHY_LOOPBACK;
5092                 bnx2_set_phy_loopback(bp);
5093         }
5094         else
5095                 return -EINVAL;
5096
5097         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5098         skb = netdev_alloc_skb(bp->dev, pkt_size);
5099         if (!skb)
5100                 return -ENOMEM;
5101         packet = skb_put(skb, pkt_size);
5102         memcpy(packet, bp->dev->dev_addr, 6);
5103         memset(packet + 6, 0x0, 8);
5104         for (i = 14; i < pkt_size; i++)
5105                 packet[i] = (unsigned char) (i & 0xff);
5106
5107         map = pci_map_single(bp->pdev, skb->data, pkt_size,
5108                 PCI_DMA_TODEVICE);
5109
5110         REG_WR(bp, BNX2_HC_COMMAND,
5111                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5112
5113         REG_RD(bp, BNX2_HC_COMMAND);
5114
5115         udelay(5);
5116         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5117
5118         num_pkts = 0;
5119
5120         txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
5121
5122         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5123         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5124         txbd->tx_bd_mss_nbytes = pkt_size;
5125         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5126
5127         num_pkts++;
5128         bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
5129         bp->tx_prod_bseq += pkt_size;
5130
5131         REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
5132         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5133
5134         udelay(100);
5135
5136         REG_WR(bp, BNX2_HC_COMMAND,
5137                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5138
5139         REG_RD(bp, BNX2_HC_COMMAND);
5140
5141         udelay(5);
5142
5143         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5144         dev_kfree_skb(skb);
5145
5146         if (bnx2_get_hw_tx_cons(tx_napi) != bp->tx_prod)
5147                 goto loopback_test_done;
5148
5149         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5150         if (rx_idx != rx_start_idx + num_pkts) {
5151                 goto loopback_test_done;
5152         }
5153
5154         rx_buf = &bp->rx_buf_ring[rx_start_idx];
5155         rx_skb = rx_buf->skb;
5156
5157         rx_hdr = (struct l2_fhdr *) rx_skb->data;
5158         skb_reserve(rx_skb, bp->rx_offset);
5159
5160         pci_dma_sync_single_for_cpu(bp->pdev,
5161                 pci_unmap_addr(rx_buf, mapping),
5162                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5163
5164         if (rx_hdr->l2_fhdr_status &
5165                 (L2_FHDR_ERRORS_BAD_CRC |
5166                 L2_FHDR_ERRORS_PHY_DECODE |
5167                 L2_FHDR_ERRORS_ALIGNMENT |
5168                 L2_FHDR_ERRORS_TOO_SHORT |
5169                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5170
5171                 goto loopback_test_done;
5172         }
5173
5174         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5175                 goto loopback_test_done;
5176         }
5177
5178         for (i = 14; i < pkt_size; i++) {
5179                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5180                         goto loopback_test_done;
5181                 }
5182         }
5183
5184         ret = 0;
5185
5186 loopback_test_done:
5187         bp->loopback = 0;
5188         return ret;
5189 }
5190
5191 #define BNX2_MAC_LOOPBACK_FAILED        1
5192 #define BNX2_PHY_LOOPBACK_FAILED        2
5193 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5194                                          BNX2_PHY_LOOPBACK_FAILED)
5195
5196 static int
5197 bnx2_test_loopback(struct bnx2 *bp)
5198 {
5199         int rc = 0;
5200
5201         if (!netif_running(bp->dev))
5202                 return BNX2_LOOPBACK_FAILED;
5203
5204         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5205         spin_lock_bh(&bp->phy_lock);
5206         bnx2_init_phy(bp);
5207         spin_unlock_bh(&bp->phy_lock);
5208         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5209                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5210         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5211                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5212         return rc;
5213 }
5214
5215 #define NVRAM_SIZE 0x200
5216 #define CRC32_RESIDUAL 0xdebb20e3
5217
5218 static int
5219 bnx2_test_nvram(struct bnx2 *bp)
5220 {
5221         __be32 buf[NVRAM_SIZE / 4];
5222         u8 *data = (u8 *) buf;
5223         int rc = 0;
5224         u32 magic, csum;
5225
5226         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5227                 goto test_nvram_done;
5228
5229         magic = be32_to_cpu(buf[0]);
5230         if (magic != 0x669955aa) {
5231                 rc = -ENODEV;
5232                 goto test_nvram_done;
5233         }
5234
5235         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5236                 goto test_nvram_done;
5237
5238         csum = ether_crc_le(0x100, data);
5239         if (csum != CRC32_RESIDUAL) {
5240                 rc = -ENODEV;
5241                 goto test_nvram_done;
5242         }
5243
5244         csum = ether_crc_le(0x100, data + 0x100);
5245         if (csum != CRC32_RESIDUAL) {
5246                 rc = -ENODEV;
5247         }
5248
5249 test_nvram_done:
5250         return rc;
5251 }
5252
5253 static int
5254 bnx2_test_link(struct bnx2 *bp)
5255 {
5256         u32 bmsr;
5257
5258         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5259                 if (bp->link_up)
5260                         return 0;
5261                 return -ENODEV;
5262         }
5263         spin_lock_bh(&bp->phy_lock);
5264         bnx2_enable_bmsr1(bp);
5265         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5266         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5267         bnx2_disable_bmsr1(bp);
5268         spin_unlock_bh(&bp->phy_lock);
5269
5270         if (bmsr & BMSR_LSTATUS) {
5271                 return 0;
5272         }
5273         return -ENODEV;
5274 }
5275
5276 static int
5277 bnx2_test_intr(struct bnx2 *bp)
5278 {
5279         int i;
5280         u16 status_idx;
5281
5282         if (!netif_running(bp->dev))
5283                 return -ENODEV;
5284
5285         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5286
5287         /* This register is not touched during run-time. */
5288         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5289         REG_RD(bp, BNX2_HC_COMMAND);
5290
5291         for (i = 0; i < 10; i++) {
5292                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5293                         status_idx) {
5294
5295                         break;
5296                 }
5297
5298                 msleep_interruptible(10);
5299         }
5300         if (i < 10)
5301                 return 0;
5302
5303         return -ENODEV;
5304 }
5305
5306 static int
5307 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5308 {
5309         u32 mode_ctl, an_dbg, exp;
5310
5311         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5312         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5313
5314         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5315                 return 0;
5316
5317         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5318         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5319         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5320
5321         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5322                 return 0;
5323
5324         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5325         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5326         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5327
5328         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5329                 return 0;
5330
5331         return 1;
5332 }
5333
5334 static void
5335 bnx2_5706_serdes_timer(struct bnx2 *bp)
5336 {
5337         int check_link = 1;
5338
5339         spin_lock(&bp->phy_lock);
5340         if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
5341                 bnx2_5706s_force_link_dn(bp, 0);
5342                 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
5343                 spin_unlock(&bp->phy_lock);
5344                 return;
5345         }
5346
5347         if (bp->serdes_an_pending) {
5348                 bp->serdes_an_pending--;
5349                 check_link = 0;
5350         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5351                 u32 bmcr;
5352
5353                 bp->current_interval = bp->timer_interval;
5354
5355                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5356
5357                 if (bmcr & BMCR_ANENABLE) {
5358                         if (bnx2_5706_serdes_has_link(bp)) {
5359                                 bmcr &= ~BMCR_ANENABLE;
5360                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5361                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5362                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5363                         }
5364                 }
5365         }
5366         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5367                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5368                 u32 phy2;
5369
5370                 check_link = 0;
5371                 bnx2_write_phy(bp, 0x17, 0x0f01);
5372                 bnx2_read_phy(bp, 0x15, &phy2);
5373                 if (phy2 & 0x20) {
5374                         u32 bmcr;
5375
5376                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5377                         bmcr |= BMCR_ANENABLE;
5378                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5379
5380                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5381                 }
5382         } else
5383                 bp->current_interval = bp->timer_interval;
5384
5385         if (bp->link_up && (bp->autoneg & AUTONEG_SPEED) && check_link) {
5386                 u32 val;
5387
5388                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5389                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5390                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5391
5392                 if (val & MISC_SHDW_AN_DBG_NOSYNC) {
5393                         bnx2_5706s_force_link_dn(bp, 1);
5394                         bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5395                 }
5396         }
5397         spin_unlock(&bp->phy_lock);
5398 }
5399
5400 static void
5401 bnx2_5708_serdes_timer(struct bnx2 *bp)
5402 {
5403         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5404                 return;
5405
5406         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5407                 bp->serdes_an_pending = 0;
5408                 return;
5409         }
5410
5411         spin_lock(&bp->phy_lock);
5412         if (bp->serdes_an_pending)
5413                 bp->serdes_an_pending--;
5414         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5415                 u32 bmcr;
5416
5417                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5418                 if (bmcr & BMCR_ANENABLE) {
5419                         bnx2_enable_forced_2g5(bp);
5420                         bp->current_interval = SERDES_FORCED_TIMEOUT;
5421                 } else {
5422                         bnx2_disable_forced_2g5(bp);
5423                         bp->serdes_an_pending = 2;
5424                         bp->current_interval = bp->timer_interval;
5425                 }
5426
5427         } else
5428                 bp->current_interval = bp->timer_interval;
5429
5430         spin_unlock(&bp->phy_lock);
5431 }
5432
5433 static void
5434 bnx2_timer(unsigned long data)
5435 {
5436         struct bnx2 *bp = (struct bnx2 *) data;
5437
5438         if (!netif_running(bp->dev))
5439                 return;
5440
5441         if (atomic_read(&bp->intr_sem) != 0)
5442                 goto bnx2_restart_timer;
5443
5444         bnx2_send_heart_beat(bp);
5445
5446         bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
5447
5448         /* workaround occasional corrupted counters */
5449         if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5450                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5451                                             BNX2_HC_COMMAND_STATS_NOW);
5452
5453         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5454                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5455                         bnx2_5706_serdes_timer(bp);
5456                 else
5457                         bnx2_5708_serdes_timer(bp);
5458         }
5459
5460 bnx2_restart_timer:
5461         mod_timer(&bp->timer, jiffies + bp->current_interval);
5462 }
5463
5464 static int
5465 bnx2_request_irq(struct bnx2 *bp)
5466 {
5467         struct net_device *dev = bp->dev;
5468         unsigned long flags;
5469         struct bnx2_irq *irq;
5470         int rc = 0, i;
5471
5472         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
5473                 flags = 0;
5474         else
5475                 flags = IRQF_SHARED;
5476
5477         for (i = 0; i < bp->irq_nvecs; i++) {
5478                 irq = &bp->irq_tbl[i];
5479                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5480                                  dev);
5481                 if (rc)
5482                         break;
5483                 irq->requested = 1;
5484         }
5485         return rc;
5486 }
5487
5488 static void
5489 bnx2_free_irq(struct bnx2 *bp)
5490 {
5491         struct net_device *dev = bp->dev;
5492         struct bnx2_irq *irq;
5493         int i;
5494
5495         for (i = 0; i < bp->irq_nvecs; i++) {
5496                 irq = &bp->irq_tbl[i];
5497                 if (irq->requested)
5498                         free_irq(irq->vector, dev);
5499                 irq->requested = 0;
5500         }
5501         if (bp->flags & BNX2_FLAG_USING_MSI)
5502                 pci_disable_msi(bp->pdev);
5503         else if (bp->flags & BNX2_FLAG_USING_MSIX)
5504                 pci_disable_msix(bp->pdev);
5505
5506         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
5507 }
5508
5509 static void
5510 bnx2_enable_msix(struct bnx2 *bp)
5511 {
5512         int i, rc;
5513         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5514
5515         bnx2_setup_msix_tbl(bp);
5516         REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5517         REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5518         REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5519
5520         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5521                 msix_ent[i].entry = i;
5522                 msix_ent[i].vector = 0;
5523         }
5524
5525         rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5526         if (rc != 0)
5527                 return;
5528
5529         bp->irq_tbl[BNX2_BASE_VEC].handler = bnx2_msi_1shot;
5530         bp->irq_tbl[BNX2_TX_VEC].handler = bnx2_tx_msix;
5531
5532         strcpy(bp->irq_tbl[BNX2_BASE_VEC].name, bp->dev->name);
5533         strcat(bp->irq_tbl[BNX2_BASE_VEC].name, "-base");
5534         strcpy(bp->irq_tbl[BNX2_TX_VEC].name, bp->dev->name);
5535         strcat(bp->irq_tbl[BNX2_TX_VEC].name, "-tx");
5536
5537         bp->irq_nvecs = BNX2_MAX_MSIX_VEC;
5538         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
5539         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5540                 bp->irq_tbl[i].vector = msix_ent[i].vector;
5541 }
5542
5543 static void
5544 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5545 {
5546         bp->irq_tbl[0].handler = bnx2_interrupt;
5547         strcpy(bp->irq_tbl[0].name, bp->dev->name);
5548         bp->irq_nvecs = 1;
5549         bp->irq_tbl[0].vector = bp->pdev->irq;
5550
5551         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
5552                 bnx2_enable_msix(bp);
5553
5554         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5555             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
5556                 if (pci_enable_msi(bp->pdev) == 0) {
5557                         bp->flags |= BNX2_FLAG_USING_MSI;
5558                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5559                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
5560                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5561                         } else
5562                                 bp->irq_tbl[0].handler = bnx2_msi;
5563
5564                         bp->irq_tbl[0].vector = bp->pdev->irq;
5565                 }
5566         }
5567 }
5568
5569 /* Called with rtnl_lock */
5570 static int
5571 bnx2_open(struct net_device *dev)
5572 {
5573         struct bnx2 *bp = netdev_priv(dev);
5574         int rc;
5575
5576         netif_carrier_off(dev);
5577
5578         bnx2_set_power_state(bp, PCI_D0);
5579         bnx2_disable_int(bp);
5580
5581         rc = bnx2_alloc_mem(bp);
5582         if (rc)
5583                 return rc;
5584
5585         bnx2_setup_int_mode(bp, disable_msi);
5586         bnx2_napi_enable(bp);
5587         rc = bnx2_request_irq(bp);
5588
5589         if (rc) {
5590                 bnx2_napi_disable(bp);
5591                 bnx2_free_mem(bp);
5592                 return rc;
5593         }
5594
5595         rc = bnx2_init_nic(bp);
5596
5597         if (rc) {
5598                 bnx2_napi_disable(bp);
5599                 bnx2_free_irq(bp);
5600                 bnx2_free_skbs(bp);
5601                 bnx2_free_mem(bp);
5602                 return rc;
5603         }
5604
5605         mod_timer(&bp->timer, jiffies + bp->current_interval);
5606
5607         atomic_set(&bp->intr_sem, 0);
5608
5609         bnx2_enable_int(bp);
5610
5611         if (bp->flags & BNX2_FLAG_USING_MSI) {
5612                 /* Test MSI to make sure it is working
5613                  * If MSI test fails, go back to INTx mode
5614                  */
5615                 if (bnx2_test_intr(bp) != 0) {
5616                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
5617                                " using MSI, switching to INTx mode. Please"
5618                                " report this failure to the PCI maintainer"
5619                                " and include system chipset information.\n",
5620                                bp->dev->name);
5621
5622                         bnx2_disable_int(bp);
5623                         bnx2_free_irq(bp);
5624
5625                         bnx2_setup_int_mode(bp, 1);
5626
5627                         rc = bnx2_init_nic(bp);
5628
5629                         if (!rc)
5630                                 rc = bnx2_request_irq(bp);
5631
5632                         if (rc) {
5633                                 bnx2_napi_disable(bp);
5634                                 bnx2_free_skbs(bp);
5635                                 bnx2_free_mem(bp);
5636                                 del_timer_sync(&bp->timer);
5637                                 return rc;
5638                         }
5639                         bnx2_enable_int(bp);
5640                 }
5641         }
5642         if (bp->flags & BNX2_FLAG_USING_MSI)
5643                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5644         else if (bp->flags & BNX2_FLAG_USING_MSIX)
5645                 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
5646
5647         netif_start_queue(dev);
5648
5649         return 0;
5650 }
5651
5652 static void
5653 bnx2_reset_task(struct work_struct *work)
5654 {
5655         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5656
5657         if (!netif_running(bp->dev))
5658                 return;
5659
5660         bp->in_reset_task = 1;
5661         bnx2_netif_stop(bp);
5662
5663         bnx2_init_nic(bp);
5664
5665         atomic_set(&bp->intr_sem, 1);
5666         bnx2_netif_start(bp);
5667         bp->in_reset_task = 0;
5668 }
5669
5670 static void
5671 bnx2_tx_timeout(struct net_device *dev)
5672 {
5673         struct bnx2 *bp = netdev_priv(dev);
5674
5675         /* This allows the netif to be shutdown gracefully before resetting */
5676         schedule_work(&bp->reset_task);
5677 }
5678
5679 #ifdef BCM_VLAN
5680 /* Called with rtnl_lock */
5681 static void
5682 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5683 {
5684         struct bnx2 *bp = netdev_priv(dev);
5685
5686         bnx2_netif_stop(bp);
5687
5688         bp->vlgrp = vlgrp;
5689         bnx2_set_rx_mode(dev);
5690
5691         bnx2_netif_start(bp);
5692 }
5693 #endif
5694
5695 /* Called with netif_tx_lock.
5696  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5697  * netif_wake_queue().
5698  */
5699 static int
5700 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5701 {
5702         struct bnx2 *bp = netdev_priv(dev);
5703         dma_addr_t mapping;
5704         struct tx_bd *txbd;
5705         struct sw_bd *tx_buf;
5706         u32 len, vlan_tag_flags, last_frag, mss;
5707         u16 prod, ring_prod;
5708         int i;
5709         struct bnx2_napi *bnapi = &bp->bnx2_napi[bp->tx_vec];
5710
5711         if (unlikely(bnx2_tx_avail(bp, bnapi) <
5712             (skb_shinfo(skb)->nr_frags + 1))) {
5713                 netif_stop_queue(dev);
5714                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5715                         dev->name);
5716
5717                 return NETDEV_TX_BUSY;
5718         }
5719         len = skb_headlen(skb);
5720         prod = bp->tx_prod;
5721         ring_prod = TX_RING_IDX(prod);
5722
5723         vlan_tag_flags = 0;
5724         if (skb->ip_summed == CHECKSUM_PARTIAL) {
5725                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5726         }
5727
5728         if (bp->vlgrp && vlan_tx_tag_present(skb)) {
5729                 vlan_tag_flags |=
5730                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5731         }
5732         if ((mss = skb_shinfo(skb)->gso_size)) {
5733                 u32 tcp_opt_len, ip_tcp_len;
5734                 struct iphdr *iph;
5735
5736                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5737
5738                 tcp_opt_len = tcp_optlen(skb);
5739
5740                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5741                         u32 tcp_off = skb_transport_offset(skb) -
5742                                       sizeof(struct ipv6hdr) - ETH_HLEN;
5743
5744                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5745                                           TX_BD_FLAGS_SW_FLAGS;
5746                         if (likely(tcp_off == 0))
5747                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5748                         else {
5749                                 tcp_off >>= 3;
5750                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
5751                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
5752                                                   ((tcp_off & 0x10) <<
5753                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
5754                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5755                         }
5756                 } else {
5757                         if (skb_header_cloned(skb) &&
5758                             pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5759                                 dev_kfree_skb(skb);
5760                                 return NETDEV_TX_OK;
5761                         }
5762
5763                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5764
5765                         iph = ip_hdr(skb);
5766                         iph->check = 0;
5767                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5768                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5769                                                                  iph->daddr, 0,
5770                                                                  IPPROTO_TCP,
5771                                                                  0);
5772                         if (tcp_opt_len || (iph->ihl > 5)) {
5773                                 vlan_tag_flags |= ((iph->ihl - 5) +
5774                                                    (tcp_opt_len >> 2)) << 8;
5775                         }
5776                 }
5777         } else
5778                 mss = 0;
5779
5780         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5781
5782         tx_buf = &bp->tx_buf_ring[ring_prod];
5783         tx_buf->skb = skb;
5784         pci_unmap_addr_set(tx_buf, mapping, mapping);
5785
5786         txbd = &bp->tx_desc_ring[ring_prod];
5787
5788         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5789         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5790         txbd->tx_bd_mss_nbytes = len | (mss << 16);
5791         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5792
5793         last_frag = skb_shinfo(skb)->nr_frags;
5794
5795         for (i = 0; i < last_frag; i++) {
5796                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5797
5798                 prod = NEXT_TX_BD(prod);
5799                 ring_prod = TX_RING_IDX(prod);
5800                 txbd = &bp->tx_desc_ring[ring_prod];
5801
5802                 len = frag->size;
5803                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5804                         len, PCI_DMA_TODEVICE);
5805                 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5806                                 mapping, mapping);
5807
5808                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5809                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5810                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5811                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5812
5813         }
5814         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5815
5816         prod = NEXT_TX_BD(prod);
5817         bp->tx_prod_bseq += skb->len;
5818
5819         REG_WR16(bp, bp->tx_bidx_addr, prod);
5820         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5821
5822         mmiowb();
5823
5824         bp->tx_prod = prod;
5825         dev->trans_start = jiffies;
5826
5827         if (unlikely(bnx2_tx_avail(bp, bnapi) <= MAX_SKB_FRAGS)) {
5828                 netif_stop_queue(dev);
5829                 if (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)
5830                         netif_wake_queue(dev);
5831         }
5832
5833         return NETDEV_TX_OK;
5834 }
5835
5836 /* Called with rtnl_lock */
5837 static int
5838 bnx2_close(struct net_device *dev)
5839 {
5840         struct bnx2 *bp = netdev_priv(dev);
5841         u32 reset_code;
5842
5843         /* Calling flush_scheduled_work() may deadlock because
5844          * linkwatch_event() may be on the workqueue and it will try to get
5845          * the rtnl_lock which we are holding.
5846          */
5847         while (bp->in_reset_task)
5848                 msleep(1);
5849
5850         bnx2_disable_int_sync(bp);
5851         bnx2_napi_disable(bp);
5852         del_timer_sync(&bp->timer);
5853         if (bp->flags & BNX2_FLAG_NO_WOL)
5854                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5855         else if (bp->wol)
5856                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5857         else
5858                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5859         bnx2_reset_chip(bp, reset_code);
5860         bnx2_free_irq(bp);
5861         bnx2_free_skbs(bp);
5862         bnx2_free_mem(bp);
5863         bp->link_up = 0;
5864         netif_carrier_off(bp->dev);
5865         bnx2_set_power_state(bp, PCI_D3hot);
5866         return 0;
5867 }
5868
5869 #define GET_NET_STATS64(ctr)                                    \
5870         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
5871         (unsigned long) (ctr##_lo)
5872
5873 #define GET_NET_STATS32(ctr)            \
5874         (ctr##_lo)
5875
5876 #if (BITS_PER_LONG == 64)
5877 #define GET_NET_STATS   GET_NET_STATS64
5878 #else
5879 #define GET_NET_STATS   GET_NET_STATS32
5880 #endif
5881
5882 static struct net_device_stats *
5883 bnx2_get_stats(struct net_device *dev)
5884 {
5885         struct bnx2 *bp = netdev_priv(dev);
5886         struct statistics_block *stats_blk = bp->stats_blk;
5887         struct net_device_stats *net_stats = &bp->net_stats;
5888
5889         if (bp->stats_blk == NULL) {
5890                 return net_stats;
5891         }
5892         net_stats->rx_packets =
5893                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5894                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5895                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5896
5897         net_stats->tx_packets =
5898                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5899                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5900                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5901
5902         net_stats->rx_bytes =
5903                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5904
5905         net_stats->tx_bytes =
5906                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5907
5908         net_stats->multicast =
5909                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5910
5911         net_stats->collisions =
5912                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5913
5914         net_stats->rx_length_errors =
5915                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5916                 stats_blk->stat_EtherStatsOverrsizePkts);
5917
5918         net_stats->rx_over_errors =
5919                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5920
5921         net_stats->rx_frame_errors =
5922                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5923
5924         net_stats->rx_crc_errors =
5925                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5926
5927         net_stats->rx_errors = net_stats->rx_length_errors +
5928                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5929                 net_stats->rx_crc_errors;
5930
5931         net_stats->tx_aborted_errors =
5932                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5933                 stats_blk->stat_Dot3StatsLateCollisions);
5934
5935         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5936             (CHIP_ID(bp) == CHIP_ID_5708_A0))
5937                 net_stats->tx_carrier_errors = 0;
5938         else {
5939                 net_stats->tx_carrier_errors =
5940                         (unsigned long)
5941                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
5942         }
5943
5944         net_stats->tx_errors =
5945                 (unsigned long)
5946                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5947                 +
5948                 net_stats->tx_aborted_errors +
5949                 net_stats->tx_carrier_errors;
5950
5951         net_stats->rx_missed_errors =
5952                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5953                 stats_blk->stat_FwRxDrop);
5954
5955         return net_stats;
5956 }
5957
5958 /* All ethtool functions called with rtnl_lock */
5959
5960 static int
5961 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5962 {
5963         struct bnx2 *bp = netdev_priv(dev);
5964         int support_serdes = 0, support_copper = 0;
5965
5966         cmd->supported = SUPPORTED_Autoneg;
5967         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5968                 support_serdes = 1;
5969                 support_copper = 1;
5970         } else if (bp->phy_port == PORT_FIBRE)
5971                 support_serdes = 1;
5972         else
5973                 support_copper = 1;
5974
5975         if (support_serdes) {
5976                 cmd->supported |= SUPPORTED_1000baseT_Full |
5977                         SUPPORTED_FIBRE;
5978                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
5979                         cmd->supported |= SUPPORTED_2500baseX_Full;
5980
5981         }
5982         if (support_copper) {
5983                 cmd->supported |= SUPPORTED_10baseT_Half |
5984                         SUPPORTED_10baseT_Full |
5985                         SUPPORTED_100baseT_Half |
5986                         SUPPORTED_100baseT_Full |
5987                         SUPPORTED_1000baseT_Full |
5988                         SUPPORTED_TP;
5989
5990         }
5991
5992         spin_lock_bh(&bp->phy_lock);
5993         cmd->port = bp->phy_port;
5994         cmd->advertising = bp->advertising;
5995
5996         if (bp->autoneg & AUTONEG_SPEED) {
5997                 cmd->autoneg = AUTONEG_ENABLE;
5998         }
5999         else {
6000                 cmd->autoneg = AUTONEG_DISABLE;
6001         }
6002
6003         if (netif_carrier_ok(dev)) {
6004                 cmd->speed = bp->line_speed;
6005                 cmd->duplex = bp->duplex;
6006         }
6007         else {
6008                 cmd->speed = -1;
6009                 cmd->duplex = -1;
6010         }
6011         spin_unlock_bh(&bp->phy_lock);
6012
6013         cmd->transceiver = XCVR_INTERNAL;
6014         cmd->phy_address = bp->phy_addr;
6015
6016         return 0;
6017 }
6018
6019 static int
6020 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6021 {
6022         struct bnx2 *bp = netdev_priv(dev);
6023         u8 autoneg = bp->autoneg;
6024         u8 req_duplex = bp->req_duplex;
6025         u16 req_line_speed = bp->req_line_speed;
6026         u32 advertising = bp->advertising;
6027         int err = -EINVAL;
6028
6029         spin_lock_bh(&bp->phy_lock);
6030
6031         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6032                 goto err_out_unlock;
6033
6034         if (cmd->port != bp->phy_port &&
6035             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6036                 goto err_out_unlock;
6037
6038         if (cmd->autoneg == AUTONEG_ENABLE) {
6039                 autoneg |= AUTONEG_SPEED;
6040
6041                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6042
6043                 /* allow advertising 1 speed */
6044                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6045                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
6046                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
6047                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
6048
6049                         if (cmd->port == PORT_FIBRE)
6050                                 goto err_out_unlock;
6051
6052                         advertising = cmd->advertising;
6053
6054                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6055                         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6056                             (cmd->port == PORT_TP))
6057                                 goto err_out_unlock;
6058                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6059                         advertising = cmd->advertising;
6060                 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6061                         goto err_out_unlock;
6062                 else {
6063                         if (cmd->port == PORT_FIBRE)
6064                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6065                         else
6066                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6067                 }
6068                 advertising |= ADVERTISED_Autoneg;
6069         }
6070         else {
6071                 if (cmd->port == PORT_FIBRE) {
6072                         if ((cmd->speed != SPEED_1000 &&
6073                              cmd->speed != SPEED_2500) ||
6074                             (cmd->duplex != DUPLEX_FULL))
6075                                 goto err_out_unlock;
6076
6077                         if (cmd->speed == SPEED_2500 &&
6078                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6079                                 goto err_out_unlock;
6080                 }
6081                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6082                         goto err_out_unlock;
6083
6084                 autoneg &= ~AUTONEG_SPEED;
6085                 req_line_speed = cmd->speed;
6086                 req_duplex = cmd->duplex;
6087                 advertising = 0;
6088         }
6089
6090         bp->autoneg = autoneg;
6091         bp->advertising = advertising;
6092         bp->req_line_speed = req_line_speed;
6093         bp->req_duplex = req_duplex;
6094
6095         err = bnx2_setup_phy(bp, cmd->port);
6096
6097 err_out_unlock:
6098         spin_unlock_bh(&bp->phy_lock);
6099
6100         return err;
6101 }
6102
6103 static void
6104 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6105 {
6106         struct bnx2 *bp = netdev_priv(dev);
6107
6108         strcpy(info->driver, DRV_MODULE_NAME);
6109         strcpy(info->version, DRV_MODULE_VERSION);
6110         strcpy(info->bus_info, pci_name(bp->pdev));
6111         strcpy(info->fw_version, bp->fw_version);
6112 }
6113
6114 #define BNX2_REGDUMP_LEN                (32 * 1024)
6115
6116 static int
6117 bnx2_get_regs_len(struct net_device *dev)
6118 {
6119         return BNX2_REGDUMP_LEN;
6120 }
6121
6122 static void
6123 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6124 {
6125         u32 *p = _p, i, offset;
6126         u8 *orig_p = _p;
6127         struct bnx2 *bp = netdev_priv(dev);
6128         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6129                                  0x0800, 0x0880, 0x0c00, 0x0c10,
6130                                  0x0c30, 0x0d08, 0x1000, 0x101c,
6131                                  0x1040, 0x1048, 0x1080, 0x10a4,
6132                                  0x1400, 0x1490, 0x1498, 0x14f0,
6133                                  0x1500, 0x155c, 0x1580, 0x15dc,
6134                                  0x1600, 0x1658, 0x1680, 0x16d8,
6135                                  0x1800, 0x1820, 0x1840, 0x1854,
6136                                  0x1880, 0x1894, 0x1900, 0x1984,
6137                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6138                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
6139                                  0x2000, 0x2030, 0x23c0, 0x2400,
6140                                  0x2800, 0x2820, 0x2830, 0x2850,
6141                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
6142                                  0x3c00, 0x3c94, 0x4000, 0x4010,
6143                                  0x4080, 0x4090, 0x43c0, 0x4458,
6144                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
6145                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
6146                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
6147                                  0x5fc0, 0x6000, 0x6400, 0x6428,
6148                                  0x6800, 0x6848, 0x684c, 0x6860,
6149                                  0x6888, 0x6910, 0x8000 };
6150
6151         regs->version = 0;
6152
6153         memset(p, 0, BNX2_REGDUMP_LEN);
6154
6155         if (!netif_running(bp->dev))
6156                 return;
6157
6158         i = 0;
6159         offset = reg_boundaries[0];
6160         p += offset;
6161         while (offset < BNX2_REGDUMP_LEN) {
6162                 *p++ = REG_RD(bp, offset);
6163                 offset += 4;
6164                 if (offset == reg_boundaries[i + 1]) {
6165                         offset = reg_boundaries[i + 2];
6166                         p = (u32 *) (orig_p + offset);
6167                         i += 2;
6168                 }
6169         }
6170 }
6171
6172 static void
6173 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6174 {
6175         struct bnx2 *bp = netdev_priv(dev);
6176
6177         if (bp->flags & BNX2_FLAG_NO_WOL) {
6178                 wol->supported = 0;
6179                 wol->wolopts = 0;
6180         }
6181         else {
6182                 wol->supported = WAKE_MAGIC;
6183                 if (bp->wol)
6184                         wol->wolopts = WAKE_MAGIC;
6185                 else
6186                         wol->wolopts = 0;
6187         }
6188         memset(&wol->sopass, 0, sizeof(wol->sopass));
6189 }
6190
6191 static int
6192 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6193 {
6194         struct bnx2 *bp = netdev_priv(dev);
6195
6196         if (wol->wolopts & ~WAKE_MAGIC)
6197                 return -EINVAL;
6198
6199         if (wol->wolopts & WAKE_MAGIC) {
6200                 if (bp->flags & BNX2_FLAG_NO_WOL)
6201                         return -EINVAL;
6202
6203                 bp->wol = 1;
6204         }
6205         else {
6206                 bp->wol = 0;
6207         }
6208         return 0;
6209 }
6210
6211 static int
6212 bnx2_nway_reset(struct net_device *dev)
6213 {
6214         struct bnx2 *bp = netdev_priv(dev);
6215         u32 bmcr;
6216
6217         if (!(bp->autoneg & AUTONEG_SPEED)) {
6218                 return -EINVAL;
6219         }
6220
6221         spin_lock_bh(&bp->phy_lock);
6222
6223         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6224                 int rc;
6225
6226                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6227                 spin_unlock_bh(&bp->phy_lock);
6228                 return rc;
6229         }
6230
6231         /* Force a link down visible on the other side */
6232         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6233                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6234                 spin_unlock_bh(&bp->phy_lock);
6235
6236                 msleep(20);
6237
6238                 spin_lock_bh(&bp->phy_lock);
6239
6240                 bp->current_interval = SERDES_AN_TIMEOUT;
6241                 bp->serdes_an_pending = 1;
6242                 mod_timer(&bp->timer, jiffies + bp->current_interval);
6243         }
6244
6245         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6246         bmcr &= ~BMCR_LOOPBACK;
6247         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6248
6249         spin_unlock_bh(&bp->phy_lock);
6250
6251         return 0;
6252 }
6253
6254 static int
6255 bnx2_get_eeprom_len(struct net_device *dev)
6256 {
6257         struct bnx2 *bp = netdev_priv(dev);
6258
6259         if (bp->flash_info == NULL)
6260                 return 0;
6261
6262         return (int) bp->flash_size;
6263 }
6264
6265 static int
6266 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6267                 u8 *eebuf)
6268 {
6269         struct bnx2 *bp = netdev_priv(dev);
6270         int rc;
6271
6272         /* parameters already validated in ethtool_get_eeprom */
6273
6274         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6275
6276         return rc;
6277 }
6278
6279 static int
6280 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6281                 u8 *eebuf)
6282 {
6283         struct bnx2 *bp = netdev_priv(dev);
6284         int rc;
6285
6286         /* parameters already validated in ethtool_set_eeprom */
6287
6288         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6289
6290         return rc;
6291 }
6292
6293 static int
6294 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6295 {
6296         struct bnx2 *bp = netdev_priv(dev);
6297
6298         memset(coal, 0, sizeof(struct ethtool_coalesce));
6299
6300         coal->rx_coalesce_usecs = bp->rx_ticks;
6301         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6302         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6303         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6304
6305         coal->tx_coalesce_usecs = bp->tx_ticks;
6306         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6307         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6308         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6309
6310         coal->stats_block_coalesce_usecs = bp->stats_ticks;
6311
6312         return 0;
6313 }
6314
6315 static int
6316 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6317 {
6318         struct bnx2 *bp = netdev_priv(dev);
6319
6320         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6321         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6322
6323         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6324         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6325
6326         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6327         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6328
6329         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6330         if (bp->rx_quick_cons_trip_int > 0xff)
6331                 bp->rx_quick_cons_trip_int = 0xff;
6332
6333         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6334         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6335
6336         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6337         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6338
6339         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6340         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6341
6342         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6343         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6344                 0xff;
6345
6346         bp->stats_ticks = coal->stats_block_coalesce_usecs;
6347         if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6348                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6349                         bp->stats_ticks = USEC_PER_SEC;
6350         }
6351         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6352                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6353         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6354
6355         if (netif_running(bp->dev)) {
6356                 bnx2_netif_stop(bp);
6357                 bnx2_init_nic(bp);
6358                 bnx2_netif_start(bp);
6359         }
6360
6361         return 0;
6362 }
6363
6364 static void
6365 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6366 {
6367         struct bnx2 *bp = netdev_priv(dev);
6368
6369         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6370         ering->rx_mini_max_pending = 0;
6371         ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6372
6373         ering->rx_pending = bp->rx_ring_size;
6374         ering->rx_mini_pending = 0;
6375         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6376
6377         ering->tx_max_pending = MAX_TX_DESC_CNT;
6378         ering->tx_pending = bp->tx_ring_size;
6379 }
6380
6381 static int
6382 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6383 {
6384         if (netif_running(bp->dev)) {
6385                 bnx2_netif_stop(bp);
6386                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6387                 bnx2_free_skbs(bp);
6388                 bnx2_free_mem(bp);
6389         }
6390
6391         bnx2_set_rx_ring_size(bp, rx);
6392         bp->tx_ring_size = tx;
6393
6394         if (netif_running(bp->dev)) {
6395                 int rc;
6396
6397                 rc = bnx2_alloc_mem(bp);
6398                 if (rc)
6399                         return rc;
6400                 bnx2_init_nic(bp);
6401                 bnx2_netif_start(bp);
6402         }
6403         return 0;
6404 }
6405
6406 static int
6407 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6408 {
6409         struct bnx2 *bp = netdev_priv(dev);
6410         int rc;
6411
6412         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6413                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6414                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6415
6416                 return -EINVAL;
6417         }
6418         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6419         return rc;
6420 }
6421
6422 static void
6423 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6424 {
6425         struct bnx2 *bp = netdev_priv(dev);
6426
6427         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6428         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6429         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6430 }
6431
6432 static int
6433 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6434 {
6435         struct bnx2 *bp = netdev_priv(dev);
6436
6437         bp->req_flow_ctrl = 0;
6438         if (epause->rx_pause)
6439                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6440         if (epause->tx_pause)
6441                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6442
6443         if (epause->autoneg) {
6444                 bp->autoneg |= AUTONEG_FLOW_CTRL;
6445         }
6446         else {
6447                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6448         }
6449
6450         spin_lock_bh(&bp->phy_lock);
6451
6452         bnx2_setup_phy(bp, bp->phy_port);
6453
6454         spin_unlock_bh(&bp->phy_lock);
6455
6456         return 0;
6457 }
6458
6459 static u32
6460 bnx2_get_rx_csum(struct net_device *dev)
6461 {
6462         struct bnx2 *bp = netdev_priv(dev);
6463
6464         return bp->rx_csum;
6465 }
6466
6467 static int
6468 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6469 {
6470         struct bnx2 *bp = netdev_priv(dev);
6471
6472         bp->rx_csum = data;
6473         return 0;
6474 }
6475
6476 static int
6477 bnx2_set_tso(struct net_device *dev, u32 data)
6478 {
6479         struct bnx2 *bp = netdev_priv(dev);
6480
6481         if (data) {
6482                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6483                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6484                         dev->features |= NETIF_F_TSO6;
6485         } else
6486                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6487                                    NETIF_F_TSO_ECN);
6488         return 0;
6489 }
6490
6491 #define BNX2_NUM_STATS 46
6492
6493 static struct {
6494         char string[ETH_GSTRING_LEN];
6495 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6496         { "rx_bytes" },
6497         { "rx_error_bytes" },
6498         { "tx_bytes" },
6499         { "tx_error_bytes" },
6500         { "rx_ucast_packets" },
6501         { "rx_mcast_packets" },
6502         { "rx_bcast_packets" },
6503         { "tx_ucast_packets" },
6504         { "tx_mcast_packets" },
6505         { "tx_bcast_packets" },
6506         { "tx_mac_errors" },
6507         { "tx_carrier_errors" },
6508         { "rx_crc_errors" },
6509         { "rx_align_errors" },
6510         { "tx_single_collisions" },
6511         { "tx_multi_collisions" },
6512         { "tx_deferred" },
6513         { "tx_excess_collisions" },
6514         { "tx_late_collisions" },
6515         { "tx_total_collisions" },
6516         { "rx_fragments" },
6517         { "rx_jabbers" },
6518         { "rx_undersize_packets" },
6519         { "rx_oversize_packets" },
6520         { "rx_64_byte_packets" },
6521         { "rx_65_to_127_byte_packets" },
6522         { "rx_128_to_255_byte_packets" },
6523         { "rx_256_to_511_byte_packets" },
6524         { "rx_512_to_1023_byte_packets" },
6525         { "rx_1024_to_1522_byte_packets" },
6526         { "rx_1523_to_9022_byte_packets" },
6527         { "tx_64_byte_packets" },
6528         { "tx_65_to_127_byte_packets" },
6529         { "tx_128_to_255_byte_packets" },
6530         { "tx_256_to_511_byte_packets" },
6531         { "tx_512_to_1023_byte_packets" },
6532         { "tx_1024_to_1522_byte_packets" },
6533         { "tx_1523_to_9022_byte_packets" },
6534         { "rx_xon_frames" },
6535         { "rx_xoff_frames" },
6536         { "tx_xon_frames" },
6537         { "tx_xoff_frames" },
6538         { "rx_mac_ctrl_frames" },
6539         { "rx_filtered_packets" },
6540         { "rx_discards" },
6541         { "rx_fw_discards" },
6542 };
6543
6544 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6545
6546 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6547     STATS_OFFSET32(stat_IfHCInOctets_hi),
6548     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6549     STATS_OFFSET32(stat_IfHCOutOctets_hi),
6550     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6551     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6552     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6553     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6554     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6555     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6556     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6557     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6558     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6559     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6560     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6561     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6562     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6563     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6564     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6565     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6566     STATS_OFFSET32(stat_EtherStatsCollisions),
6567     STATS_OFFSET32(stat_EtherStatsFragments),
6568     STATS_OFFSET32(stat_EtherStatsJabbers),
6569     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6570     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6571     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6572     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6573     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6574     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6575     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6576     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6577     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6578     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6579     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6580     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6581     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6582     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6583     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6584     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6585     STATS_OFFSET32(stat_XonPauseFramesReceived),
6586     STATS_OFFSET32(stat_XoffPauseFramesReceived),
6587     STATS_OFFSET32(stat_OutXonSent),
6588     STATS_OFFSET32(stat_OutXoffSent),
6589     STATS_OFFSET32(stat_MacControlFramesReceived),
6590     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6591     STATS_OFFSET32(stat_IfInMBUFDiscards),
6592     STATS_OFFSET32(stat_FwRxDrop),
6593 };
6594
6595 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6596  * skipped because of errata.
6597  */
6598 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6599         8,0,8,8,8,8,8,8,8,8,
6600         4,0,4,4,4,4,4,4,4,4,
6601         4,4,4,4,4,4,4,4,4,4,
6602         4,4,4,4,4,4,4,4,4,4,
6603         4,4,4,4,4,4,
6604 };
6605
6606 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6607         8,0,8,8,8,8,8,8,8,8,
6608         4,4,4,4,4,4,4,4,4,4,
6609         4,4,4,4,4,4,4,4,4,4,
6610         4,4,4,4,4,4,4,4,4,4,
6611         4,4,4,4,4,4,
6612 };
6613
6614 #define BNX2_NUM_TESTS 6
6615
6616 static struct {
6617         char string[ETH_GSTRING_LEN];
6618 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6619         { "register_test (offline)" },
6620         { "memory_test (offline)" },
6621         { "loopback_test (offline)" },
6622         { "nvram_test (online)" },
6623         { "interrupt_test (online)" },
6624         { "link_test (online)" },
6625 };
6626
6627 static int
6628 bnx2_get_sset_count(struct net_device *dev, int sset)
6629 {
6630         switch (sset) {
6631         case ETH_SS_TEST:
6632                 return BNX2_NUM_TESTS;
6633         case ETH_SS_STATS:
6634                 return BNX2_NUM_STATS;
6635         default:
6636                 return -EOPNOTSUPP;
6637         }
6638 }
6639
6640 static void
6641 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6642 {
6643         struct bnx2 *bp = netdev_priv(dev);
6644
6645         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6646         if (etest->flags & ETH_TEST_FL_OFFLINE) {
6647                 int i;
6648
6649                 bnx2_netif_stop(bp);
6650                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6651                 bnx2_free_skbs(bp);
6652
6653                 if (bnx2_test_registers(bp) != 0) {
6654                         buf[0] = 1;
6655                         etest->flags |= ETH_TEST_FL_FAILED;
6656                 }
6657                 if (bnx2_test_memory(bp) != 0) {
6658                         buf[1] = 1;
6659                         etest->flags |= ETH_TEST_FL_FAILED;
6660                 }
6661                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6662                         etest->flags |= ETH_TEST_FL_FAILED;
6663
6664                 if (!netif_running(bp->dev)) {
6665                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6666                 }
6667                 else {
6668                         bnx2_init_nic(bp);
6669                         bnx2_netif_start(bp);
6670                 }
6671
6672                 /* wait for link up */
6673                 for (i = 0; i < 7; i++) {
6674                         if (bp->link_up)
6675                                 break;
6676                         msleep_interruptible(1000);
6677                 }
6678         }
6679
6680         if (bnx2_test_nvram(bp) != 0) {
6681                 buf[3] = 1;
6682                 etest->flags |= ETH_TEST_FL_FAILED;
6683         }
6684         if (bnx2_test_intr(bp) != 0) {
6685                 buf[4] = 1;
6686                 etest->flags |= ETH_TEST_FL_FAILED;
6687         }
6688
6689         if (bnx2_test_link(bp) != 0) {
6690                 buf[5] = 1;
6691                 etest->flags |= ETH_TEST_FL_FAILED;
6692
6693         }
6694 }
6695
6696 static void
6697 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6698 {
6699         switch (stringset) {
6700         case ETH_SS_STATS:
6701                 memcpy(buf, bnx2_stats_str_arr,
6702                         sizeof(bnx2_stats_str_arr));
6703                 break;
6704         case ETH_SS_TEST:
6705                 memcpy(buf, bnx2_tests_str_arr,
6706                         sizeof(bnx2_tests_str_arr));
6707                 break;
6708         }
6709 }
6710
6711 static void
6712 bnx2_get_ethtool_stats(struct net_device *dev,
6713                 struct ethtool_stats *stats, u64 *buf)
6714 {
6715         struct bnx2 *bp = netdev_priv(dev);
6716         int i;
6717         u32 *hw_stats = (u32 *) bp->stats_blk;
6718         u8 *stats_len_arr = NULL;
6719
6720         if (hw_stats == NULL) {
6721                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6722                 return;
6723         }
6724
6725         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6726             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6727             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6728             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6729                 stats_len_arr = bnx2_5706_stats_len_arr;
6730         else
6731                 stats_len_arr = bnx2_5708_stats_len_arr;
6732
6733         for (i = 0; i < BNX2_NUM_STATS; i++) {
6734                 if (stats_len_arr[i] == 0) {
6735                         /* skip this counter */
6736                         buf[i] = 0;
6737                         continue;
6738                 }
6739                 if (stats_len_arr[i] == 4) {
6740                         /* 4-byte counter */
6741                         buf[i] = (u64)
6742                                 *(hw_stats + bnx2_stats_offset_arr[i]);
6743                         continue;
6744                 }
6745                 /* 8-byte counter */
6746                 buf[i] = (((u64) *(hw_stats +
6747                                         bnx2_stats_offset_arr[i])) << 32) +
6748                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6749         }
6750 }
6751
6752 static int
6753 bnx2_phys_id(struct net_device *dev, u32 data)
6754 {
6755         struct bnx2 *bp = netdev_priv(dev);
6756         int i;
6757         u32 save;
6758
6759         if (data == 0)
6760                 data = 2;
6761
6762         save = REG_RD(bp, BNX2_MISC_CFG);
6763         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6764
6765         for (i = 0; i < (data * 2); i++) {
6766                 if ((i % 2) == 0) {
6767                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6768                 }
6769                 else {
6770                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6771                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
6772                                 BNX2_EMAC_LED_100MB_OVERRIDE |
6773                                 BNX2_EMAC_LED_10MB_OVERRIDE |
6774                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6775                                 BNX2_EMAC_LED_TRAFFIC);
6776                 }
6777                 msleep_interruptible(500);
6778                 if (signal_pending(current))
6779                         break;
6780         }
6781         REG_WR(bp, BNX2_EMAC_LED, 0);
6782         REG_WR(bp, BNX2_MISC_CFG, save);
6783         return 0;
6784 }
6785
6786 static int
6787 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6788 {
6789         struct bnx2 *bp = netdev_priv(dev);
6790
6791         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6792                 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6793         else
6794                 return (ethtool_op_set_tx_csum(dev, data));
6795 }
6796
6797 static const struct ethtool_ops bnx2_ethtool_ops = {
6798         .get_settings           = bnx2_get_settings,
6799         .set_settings           = bnx2_set_settings,
6800         .get_drvinfo            = bnx2_get_drvinfo,
6801         .get_regs_len           = bnx2_get_regs_len,
6802         .get_regs               = bnx2_get_regs,
6803         .get_wol                = bnx2_get_wol,
6804         .set_wol                = bnx2_set_wol,
6805         .nway_reset             = bnx2_nway_reset,
6806         .get_link               = ethtool_op_get_link,
6807         .get_eeprom_len         = bnx2_get_eeprom_len,
6808         .get_eeprom             = bnx2_get_eeprom,
6809         .set_eeprom             = bnx2_set_eeprom,
6810         .get_coalesce           = bnx2_get_coalesce,
6811         .set_coalesce           = bnx2_set_coalesce,
6812         .get_ringparam          = bnx2_get_ringparam,
6813         .set_ringparam          = bnx2_set_ringparam,
6814         .get_pauseparam         = bnx2_get_pauseparam,
6815         .set_pauseparam         = bnx2_set_pauseparam,
6816         .get_rx_csum            = bnx2_get_rx_csum,
6817         .set_rx_csum            = bnx2_set_rx_csum,
6818         .set_tx_csum            = bnx2_set_tx_csum,
6819         .set_sg                 = ethtool_op_set_sg,
6820         .set_tso                = bnx2_set_tso,
6821         .self_test              = bnx2_self_test,
6822         .get_strings            = bnx2_get_strings,
6823         .phys_id                = bnx2_phys_id,
6824         .get_ethtool_stats      = bnx2_get_ethtool_stats,
6825         .get_sset_count         = bnx2_get_sset_count,
6826 };
6827
6828 /* Called with rtnl_lock */
6829 static int
6830 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6831 {
6832         struct mii_ioctl_data *data = if_mii(ifr);
6833         struct bnx2 *bp = netdev_priv(dev);
6834         int err;
6835
6836         switch(cmd) {
6837         case SIOCGMIIPHY:
6838                 data->phy_id = bp->phy_addr;
6839
6840                 /* fallthru */
6841         case SIOCGMIIREG: {
6842                 u32 mii_regval;
6843
6844                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6845                         return -EOPNOTSUPP;
6846
6847                 if (!netif_running(dev))
6848                         return -EAGAIN;
6849
6850                 spin_lock_bh(&bp->phy_lock);
6851                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6852                 spin_unlock_bh(&bp->phy_lock);
6853
6854                 data->val_out = mii_regval;
6855
6856                 return err;
6857         }
6858
6859         case SIOCSMIIREG:
6860                 if (!capable(CAP_NET_ADMIN))
6861                         return -EPERM;
6862
6863                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6864                         return -EOPNOTSUPP;
6865
6866                 if (!netif_running(dev))
6867                         return -EAGAIN;
6868
6869                 spin_lock_bh(&bp->phy_lock);
6870                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6871                 spin_unlock_bh(&bp->phy_lock);
6872
6873                 return err;
6874
6875         default:
6876                 /* do nothing */
6877                 break;
6878         }
6879         return -EOPNOTSUPP;
6880 }
6881
6882 /* Called with rtnl_lock */
6883 static int
6884 bnx2_change_mac_addr(struct net_device *dev, void *p)
6885 {
6886         struct sockaddr *addr = p;
6887         struct bnx2 *bp = netdev_priv(dev);
6888
6889         if (!is_valid_ether_addr(addr->sa_data))
6890                 return -EINVAL;
6891
6892         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6893         if (netif_running(dev))
6894                 bnx2_set_mac_addr(bp);
6895
6896         return 0;
6897 }
6898
6899 /* Called with rtnl_lock */
6900 static int
6901 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6902 {
6903         struct bnx2 *bp = netdev_priv(dev);
6904
6905         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6906                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6907                 return -EINVAL;
6908
6909         dev->mtu = new_mtu;
6910         return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
6911 }
6912
6913 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6914 static void
6915 poll_bnx2(struct net_device *dev)
6916 {
6917         struct bnx2 *bp = netdev_priv(dev);
6918
6919         disable_irq(bp->pdev->irq);
6920         bnx2_interrupt(bp->pdev->irq, dev);
6921         enable_irq(bp->pdev->irq);
6922 }
6923 #endif
6924
6925 static void __devinit
6926 bnx2_get_5709_media(struct bnx2 *bp)
6927 {
6928         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6929         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6930         u32 strap;
6931
6932         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6933                 return;
6934         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6935                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
6936                 return;
6937         }
6938
6939         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6940                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6941         else
6942                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6943
6944         if (PCI_FUNC(bp->pdev->devfn) == 0) {
6945                 switch (strap) {
6946                 case 0x4:
6947                 case 0x5:
6948                 case 0x6:
6949                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
6950                         return;
6951                 }
6952         } else {
6953                 switch (strap) {
6954                 case 0x1:
6955                 case 0x2:
6956                 case 0x4:
6957                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
6958                         return;
6959                 }
6960         }
6961 }
6962
6963 static void __devinit
6964 bnx2_get_pci_speed(struct bnx2 *bp)
6965 {
6966         u32 reg;
6967
6968         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6969         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6970                 u32 clkreg;
6971
6972                 bp->flags |= BNX2_FLAG_PCIX;
6973
6974                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6975
6976                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6977                 switch (clkreg) {
6978                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6979                         bp->bus_speed_mhz = 133;
6980                         break;
6981
6982                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6983                         bp->bus_speed_mhz = 100;
6984                         break;
6985
6986                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6987                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6988                         bp->bus_speed_mhz = 66;
6989                         break;
6990
6991                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6992                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6993                         bp->bus_speed_mhz = 50;
6994                         break;
6995
6996                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6997                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6998                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6999                         bp->bus_speed_mhz = 33;
7000                         break;
7001                 }
7002         }
7003         else {
7004                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7005                         bp->bus_speed_mhz = 66;
7006                 else
7007                         bp->bus_speed_mhz = 33;
7008         }
7009
7010         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7011                 bp->flags |= BNX2_FLAG_PCI_32BIT;
7012
7013 }
7014
7015 static int __devinit
7016 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7017 {
7018         struct bnx2 *bp;
7019         unsigned long mem_len;
7020         int rc, i, j;
7021         u32 reg;
7022         u64 dma_mask, persist_dma_mask;
7023
7024         SET_NETDEV_DEV(dev, &pdev->dev);
7025         bp = netdev_priv(dev);
7026
7027         bp->flags = 0;
7028         bp->phy_flags = 0;
7029
7030         /* enable device (incl. PCI PM wakeup), and bus-mastering */
7031         rc = pci_enable_device(pdev);
7032         if (rc) {
7033                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7034                 goto err_out;
7035         }
7036
7037         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7038                 dev_err(&pdev->dev,
7039                         "Cannot find PCI device base address, aborting.\n");
7040                 rc = -ENODEV;
7041                 goto err_out_disable;
7042         }
7043
7044         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7045         if (rc) {
7046                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7047                 goto err_out_disable;
7048         }
7049
7050         pci_set_master(pdev);
7051
7052         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7053         if (bp->pm_cap == 0) {
7054                 dev_err(&pdev->dev,
7055                         "Cannot find power management capability, aborting.\n");
7056                 rc = -EIO;
7057                 goto err_out_release;
7058         }
7059
7060         bp->dev = dev;
7061         bp->pdev = pdev;
7062
7063         spin_lock_init(&bp->phy_lock);
7064         spin_lock_init(&bp->indirect_lock);
7065         INIT_WORK(&bp->reset_task, bnx2_reset_task);
7066
7067         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7068         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
7069         dev->mem_end = dev->mem_start + mem_len;
7070         dev->irq = pdev->irq;
7071
7072         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7073
7074         if (!bp->regview) {
7075                 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7076                 rc = -ENOMEM;
7077                 goto err_out_release;
7078         }
7079
7080         /* Configure byte swap and enable write to the reg_window registers.
7081          * Rely on CPU to do target byte swapping on big endian systems
7082          * The chip's target access swapping will not swap all accesses
7083          */
7084         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7085                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7086                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7087
7088         bnx2_set_power_state(bp, PCI_D0);
7089
7090         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7091
7092         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7093                 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7094                         dev_err(&pdev->dev,
7095                                 "Cannot find PCIE capability, aborting.\n");
7096                         rc = -EIO;
7097                         goto err_out_unmap;
7098                 }
7099                 bp->flags |= BNX2_FLAG_PCIE;
7100                 if (CHIP_REV(bp) == CHIP_REV_Ax)
7101                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7102         } else {
7103                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7104                 if (bp->pcix_cap == 0) {
7105                         dev_err(&pdev->dev,
7106                                 "Cannot find PCIX capability, aborting.\n");
7107                         rc = -EIO;
7108                         goto err_out_unmap;
7109                 }
7110         }
7111
7112         if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7113                 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7114                         bp->flags |= BNX2_FLAG_MSIX_CAP;
7115         }
7116
7117         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7118                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7119                         bp->flags |= BNX2_FLAG_MSI_CAP;
7120         }
7121
7122         /* 5708 cannot support DMA addresses > 40-bit.  */
7123         if (CHIP_NUM(bp) == CHIP_NUM_5708)
7124                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
7125         else
7126                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
7127
7128         /* Configure DMA attributes. */
7129         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7130                 dev->features |= NETIF_F_HIGHDMA;
7131                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7132                 if (rc) {
7133                         dev_err(&pdev->dev,
7134                                 "pci_set_consistent_dma_mask failed, aborting.\n");
7135                         goto err_out_unmap;
7136                 }
7137         } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
7138                 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7139                 goto err_out_unmap;
7140         }
7141
7142         if (!(bp->flags & BNX2_FLAG_PCIE))
7143                 bnx2_get_pci_speed(bp);
7144
7145         /* 5706A0 may falsely detect SERR and PERR. */
7146         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7147                 reg = REG_RD(bp, PCI_COMMAND);
7148                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7149                 REG_WR(bp, PCI_COMMAND, reg);
7150         }
7151         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7152                 !(bp->flags & BNX2_FLAG_PCIX)) {
7153
7154                 dev_err(&pdev->dev,
7155                         "5706 A1 can only be used in a PCIX bus, aborting.\n");
7156                 goto err_out_unmap;
7157         }
7158
7159         bnx2_init_nvram(bp);
7160
7161         reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
7162
7163         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7164             BNX2_SHM_HDR_SIGNATURE_SIG) {
7165                 u32 off = PCI_FUNC(pdev->devfn) << 2;
7166
7167                 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
7168         } else
7169                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7170
7171         /* Get the permanent MAC address.  First we need to make sure the
7172          * firmware is actually running.
7173          */
7174         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
7175
7176         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7177             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7178                 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7179                 rc = -ENODEV;
7180                 goto err_out_unmap;
7181         }
7182
7183         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
7184         for (i = 0, j = 0; i < 3; i++) {
7185                 u8 num, k, skip0;
7186
7187                 num = (u8) (reg >> (24 - (i * 8)));
7188                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7189                         if (num >= k || !skip0 || k == 1) {
7190                                 bp->fw_version[j++] = (num / k) + '0';
7191                                 skip0 = 0;
7192                         }
7193                 }
7194                 if (i != 2)
7195                         bp->fw_version[j++] = '.';
7196         }
7197         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE);
7198         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7199                 bp->wol = 1;
7200
7201         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7202                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
7203
7204                 for (i = 0; i < 30; i++) {
7205                         reg = REG_RD_IND(bp, bp->shmem_base +
7206                                              BNX2_BC_STATE_CONDITION);
7207                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7208                                 break;
7209                         msleep(10);
7210                 }
7211         }
7212         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
7213         reg &= BNX2_CONDITION_MFW_RUN_MASK;
7214         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7215             reg != BNX2_CONDITION_MFW_RUN_NONE) {
7216                 int i;
7217                 u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
7218
7219                 bp->fw_version[j++] = ' ';
7220                 for (i = 0; i < 3; i++) {
7221                         reg = REG_RD_IND(bp, addr + i * 4);
7222                         reg = swab32(reg);
7223                         memcpy(&bp->fw_version[j], &reg, 4);
7224                         j += 4;
7225                 }
7226         }
7227
7228         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
7229         bp->mac_addr[0] = (u8) (reg >> 8);
7230         bp->mac_addr[1] = (u8) reg;
7231
7232         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
7233         bp->mac_addr[2] = (u8) (reg >> 24);
7234         bp->mac_addr[3] = (u8) (reg >> 16);
7235         bp->mac_addr[4] = (u8) (reg >> 8);
7236         bp->mac_addr[5] = (u8) reg;
7237
7238         bp->rx_offset = sizeof(struct l2_fhdr) + 2;
7239
7240         bp->tx_ring_size = MAX_TX_DESC_CNT;
7241         bnx2_set_rx_ring_size(bp, 255);
7242
7243         bp->rx_csum = 1;
7244
7245         bp->tx_quick_cons_trip_int = 20;
7246         bp->tx_quick_cons_trip = 20;
7247         bp->tx_ticks_int = 80;
7248         bp->tx_ticks = 80;
7249
7250         bp->rx_quick_cons_trip_int = 6;
7251         bp->rx_quick_cons_trip = 6;
7252         bp->rx_ticks_int = 18;
7253         bp->rx_ticks = 18;
7254
7255         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7256
7257         bp->timer_interval =  HZ;
7258         bp->current_interval =  HZ;
7259
7260         bp->phy_addr = 1;
7261
7262         /* Disable WOL support if we are running on a SERDES chip. */
7263         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7264                 bnx2_get_5709_media(bp);
7265         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7266                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7267
7268         bp->phy_port = PORT_TP;
7269         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7270                 bp->phy_port = PORT_FIBRE;
7271                 reg = REG_RD_IND(bp, bp->shmem_base +
7272                                      BNX2_SHARED_HW_CFG_CONFIG);
7273                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7274                         bp->flags |= BNX2_FLAG_NO_WOL;
7275                         bp->wol = 0;
7276                 }
7277                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
7278                         bp->phy_addr = 2;
7279                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7280                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7281                 }
7282                 bnx2_init_remote_phy(bp);
7283
7284         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7285                    CHIP_NUM(bp) == CHIP_NUM_5708)
7286                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
7287         else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7288                  (CHIP_REV(bp) == CHIP_REV_Ax ||
7289                   CHIP_REV(bp) == CHIP_REV_Bx))
7290                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7291
7292         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7293             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7294             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
7295                 bp->flags |= BNX2_FLAG_NO_WOL;
7296                 bp->wol = 0;
7297         }
7298
7299         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7300                 bp->tx_quick_cons_trip_int =
7301                         bp->tx_quick_cons_trip;
7302                 bp->tx_ticks_int = bp->tx_ticks;
7303                 bp->rx_quick_cons_trip_int =
7304                         bp->rx_quick_cons_trip;
7305                 bp->rx_ticks_int = bp->rx_ticks;
7306                 bp->comp_prod_trip_int = bp->comp_prod_trip;
7307                 bp->com_ticks_int = bp->com_ticks;
7308                 bp->cmd_ticks_int = bp->cmd_ticks;
7309         }
7310
7311         /* Disable MSI on 5706 if AMD 8132 bridge is found.
7312          *
7313          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
7314          * with byte enables disabled on the unused 32-bit word.  This is legal
7315          * but causes problems on the AMD 8132 which will eventually stop
7316          * responding after a while.
7317          *
7318          * AMD believes this incompatibility is unique to the 5706, and
7319          * prefers to locally disable MSI rather than globally disabling it.
7320          */
7321         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7322                 struct pci_dev *amd_8132 = NULL;
7323
7324                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7325                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
7326                                                   amd_8132))) {
7327
7328                         if (amd_8132->revision >= 0x10 &&
7329                             amd_8132->revision <= 0x13) {
7330                                 disable_msi = 1;
7331                                 pci_dev_put(amd_8132);
7332                                 break;
7333                         }
7334                 }
7335         }
7336
7337         bnx2_set_default_link(bp);
7338         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7339
7340         init_timer(&bp->timer);
7341         bp->timer.expires = RUN_AT(bp->timer_interval);
7342         bp->timer.data = (unsigned long) bp;
7343         bp->timer.function = bnx2_timer;
7344
7345         return 0;
7346
7347 err_out_unmap:
7348         if (bp->regview) {
7349                 iounmap(bp->regview);
7350                 bp->regview = NULL;
7351         }
7352
7353 err_out_release:
7354         pci_release_regions(pdev);
7355
7356 err_out_disable:
7357         pci_disable_device(pdev);
7358         pci_set_drvdata(pdev, NULL);
7359
7360 err_out:
7361         return rc;
7362 }
7363
7364 static char * __devinit
7365 bnx2_bus_string(struct bnx2 *bp, char *str)
7366 {
7367         char *s = str;
7368
7369         if (bp->flags & BNX2_FLAG_PCIE) {
7370                 s += sprintf(s, "PCI Express");
7371         } else {
7372                 s += sprintf(s, "PCI");
7373                 if (bp->flags & BNX2_FLAG_PCIX)
7374                         s += sprintf(s, "-X");
7375                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
7376                         s += sprintf(s, " 32-bit");
7377                 else
7378                         s += sprintf(s, " 64-bit");
7379                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7380         }
7381         return str;
7382 }
7383
7384 static void __devinit
7385 bnx2_init_napi(struct bnx2 *bp)
7386 {
7387         int i;
7388         struct bnx2_napi *bnapi;
7389
7390         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7391                 bnapi = &bp->bnx2_napi[i];
7392                 bnapi->bp = bp;
7393         }
7394         netif_napi_add(bp->dev, &bp->bnx2_napi[0].napi, bnx2_poll, 64);
7395         netif_napi_add(bp->dev, &bp->bnx2_napi[BNX2_TX_VEC].napi, bnx2_tx_poll,
7396                        64);
7397 }
7398
7399 static int __devinit
7400 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7401 {
7402         static int version_printed = 0;
7403         struct net_device *dev = NULL;
7404         struct bnx2 *bp;
7405         int rc;
7406         char str[40];
7407         DECLARE_MAC_BUF(mac);
7408
7409         if (version_printed++ == 0)
7410                 printk(KERN_INFO "%s", version);
7411
7412         /* dev zeroed in init_etherdev */
7413         dev = alloc_etherdev(sizeof(*bp));
7414
7415         if (!dev)
7416                 return -ENOMEM;
7417
7418         rc = bnx2_init_board(pdev, dev);
7419         if (rc < 0) {
7420                 free_netdev(dev);
7421                 return rc;
7422         }
7423
7424         dev->open = bnx2_open;
7425         dev->hard_start_xmit = bnx2_start_xmit;
7426         dev->stop = bnx2_close;
7427         dev->get_stats = bnx2_get_stats;
7428         dev->set_multicast_list = bnx2_set_rx_mode;
7429         dev->do_ioctl = bnx2_ioctl;
7430         dev->set_mac_address = bnx2_change_mac_addr;
7431         dev->change_mtu = bnx2_change_mtu;
7432         dev->tx_timeout = bnx2_tx_timeout;
7433         dev->watchdog_timeo = TX_TIMEOUT;
7434 #ifdef BCM_VLAN
7435         dev->vlan_rx_register = bnx2_vlan_rx_register;
7436 #endif
7437         dev->ethtool_ops = &bnx2_ethtool_ops;
7438
7439         bp = netdev_priv(dev);
7440         bnx2_init_napi(bp);
7441
7442 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7443         dev->poll_controller = poll_bnx2;
7444 #endif
7445
7446         pci_set_drvdata(pdev, dev);
7447
7448         memcpy(dev->dev_addr, bp->mac_addr, 6);
7449         memcpy(dev->perm_addr, bp->mac_addr, 6);
7450         bp->name = board_info[ent->driver_data].name;
7451
7452         dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7453         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7454                 dev->features |= NETIF_F_IPV6_CSUM;
7455
7456 #ifdef BCM_VLAN
7457         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7458 #endif
7459         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7460         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7461                 dev->features |= NETIF_F_TSO6;
7462
7463         if ((rc = register_netdev(dev))) {
7464                 dev_err(&pdev->dev, "Cannot register net device\n");
7465                 if (bp->regview)
7466                         iounmap(bp->regview);
7467                 pci_release_regions(pdev);
7468                 pci_disable_device(pdev);
7469                 pci_set_drvdata(pdev, NULL);
7470                 free_netdev(dev);
7471                 return rc;
7472         }
7473
7474         printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7475                 "IRQ %d, node addr %s\n",
7476                 dev->name,
7477                 bp->name,
7478                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7479                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7480                 bnx2_bus_string(bp, str),
7481                 dev->base_addr,
7482                 bp->pdev->irq, print_mac(mac, dev->dev_addr));
7483
7484         return 0;
7485 }
7486
7487 static void __devexit
7488 bnx2_remove_one(struct pci_dev *pdev)
7489 {
7490         struct net_device *dev = pci_get_drvdata(pdev);
7491         struct bnx2 *bp = netdev_priv(dev);
7492
7493         flush_scheduled_work();
7494
7495         unregister_netdev(dev);
7496
7497         if (bp->regview)
7498                 iounmap(bp->regview);
7499
7500         free_netdev(dev);
7501         pci_release_regions(pdev);
7502         pci_disable_device(pdev);
7503         pci_set_drvdata(pdev, NULL);
7504 }
7505
7506 static int
7507 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7508 {
7509         struct net_device *dev = pci_get_drvdata(pdev);
7510         struct bnx2 *bp = netdev_priv(dev);
7511         u32 reset_code;
7512
7513         /* PCI register 4 needs to be saved whether netif_running() or not.
7514          * MSI address and data need to be saved if using MSI and
7515          * netif_running().
7516          */
7517         pci_save_state(pdev);
7518         if (!netif_running(dev))
7519                 return 0;
7520
7521         flush_scheduled_work();
7522         bnx2_netif_stop(bp);
7523         netif_device_detach(dev);
7524         del_timer_sync(&bp->timer);
7525         if (bp->flags & BNX2_FLAG_NO_WOL)
7526                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
7527         else if (bp->wol)
7528                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7529         else
7530                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7531         bnx2_reset_chip(bp, reset_code);
7532         bnx2_free_skbs(bp);
7533         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7534         return 0;
7535 }
7536
7537 static int
7538 bnx2_resume(struct pci_dev *pdev)
7539 {
7540         struct net_device *dev = pci_get_drvdata(pdev);
7541         struct bnx2 *bp = netdev_priv(dev);
7542
7543         pci_restore_state(pdev);
7544         if (!netif_running(dev))
7545                 return 0;
7546
7547         bnx2_set_power_state(bp, PCI_D0);
7548         netif_device_attach(dev);
7549         bnx2_init_nic(bp);
7550         bnx2_netif_start(bp);
7551         return 0;
7552 }
7553
7554 static struct pci_driver bnx2_pci_driver = {
7555         .name           = DRV_MODULE_NAME,
7556         .id_table       = bnx2_pci_tbl,
7557         .probe          = bnx2_init_one,
7558         .remove         = __devexit_p(bnx2_remove_one),
7559         .suspend        = bnx2_suspend,
7560         .resume         = bnx2_resume,
7561 };
7562
7563 static int __init bnx2_init(void)
7564 {
7565         return pci_register_driver(&bnx2_pci_driver);
7566 }
7567
7568 static void __exit bnx2_cleanup(void)
7569 {
7570         pci_unregister_driver(&bnx2_pci_driver);
7571 }
7572
7573 module_init(bnx2_init);
7574 module_exit(bnx2_cleanup);
7575
7576
7577