parport_pc: add base_hi BAR for oxsemi_840
[linux-2.6] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2008 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50 #include <linux/log2.h>
51
52 #include "bnx2.h"
53 #include "bnx2_fw.h"
54 #include "bnx2_fw2.h"
55
56 #define FW_BUF_SIZE             0x10000
57
58 #define DRV_MODULE_NAME         "bnx2"
59 #define PFX DRV_MODULE_NAME     ": "
60 #define DRV_MODULE_VERSION      "1.7.9"
61 #define DRV_MODULE_RELDATE      "July 18, 2008"
62
63 #define RUN_AT(x) (jiffies + (x))
64
65 /* Time in jiffies before concluding the transmitter is hung. */
66 #define TX_TIMEOUT  (5*HZ)
67
68 static char version[] __devinitdata =
69         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
71 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709 Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
75
76 static int disable_msi = 0;
77
78 module_param(disable_msi, int, 0);
79 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
80
81 typedef enum {
82         BCM5706 = 0,
83         NC370T,
84         NC370I,
85         BCM5706S,
86         NC370F,
87         BCM5708,
88         BCM5708S,
89         BCM5709,
90         BCM5709S,
91         BCM5716,
92 } board_t;
93
94 /* indexed by board_t, above */
95 static struct {
96         char *name;
97 } board_info[] __devinitdata = {
98         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
99         { "HP NC370T Multifunction Gigabit Server Adapter" },
100         { "HP NC370i Multifunction Gigabit Server Adapter" },
101         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
102         { "HP NC370F Multifunction Gigabit Server Adapter" },
103         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
104         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
105         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
106         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
107         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
108         };
109
110 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
111         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
112           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
113         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
114           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
115         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
116           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
117         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
118           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
119         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
120           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
121         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
122           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
123         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
124           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
125         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
126           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
127         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
128           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
129         { PCI_VENDOR_ID_BROADCOM, 0x163b,
130           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
131         { 0, }
132 };
133
134 static struct flash_spec flash_table[] =
135 {
136 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
137 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
138         /* Slow EEPROM */
139         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
140          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
141          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
142          "EEPROM - slow"},
143         /* Expansion entry 0001 */
144         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
145          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
146          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
147          "Entry 0001"},
148         /* Saifun SA25F010 (non-buffered flash) */
149         /* strap, cfg1, & write1 need updates */
150         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
151          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
152          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
153          "Non-buffered flash (128kB)"},
154         /* Saifun SA25F020 (non-buffered flash) */
155         /* strap, cfg1, & write1 need updates */
156         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
157          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
159          "Non-buffered flash (256kB)"},
160         /* Expansion entry 0100 */
161         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
162          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
163          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
164          "Entry 0100"},
165         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
166         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
167          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
169          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
170         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
171         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
172          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
173          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
174          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
175         /* Saifun SA25F005 (non-buffered flash) */
176         /* strap, cfg1, & write1 need updates */
177         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
178          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
180          "Non-buffered flash (64kB)"},
181         /* Fast EEPROM */
182         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
183          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
184          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
185          "EEPROM - fast"},
186         /* Expansion entry 1001 */
187         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
188          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190          "Entry 1001"},
191         /* Expansion entry 1010 */
192         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
193          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
195          "Entry 1010"},
196         /* ATMEL AT45DB011B (buffered flash) */
197         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
198          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
199          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
200          "Buffered flash (128kB)"},
201         /* Expansion entry 1100 */
202         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1100"},
206         /* Expansion entry 1101 */
207         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
208          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1101"},
211         /* Ateml Expansion entry 1110 */
212         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
215          "Entry 1110 (Atmel)"},
216         /* ATMEL AT45DB021B (buffered flash) */
217         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
218          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
219          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
220          "Buffered flash (256kB)"},
221 };
222
223 static struct flash_spec flash_5709 = {
224         .flags          = BNX2_NV_BUFFERED,
225         .page_bits      = BCM5709_FLASH_PAGE_BITS,
226         .page_size      = BCM5709_FLASH_PAGE_SIZE,
227         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
228         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
229         .name           = "5709 Buffered flash (256kB)",
230 };
231
232 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
233
234 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
235 {
236         u32 diff;
237
238         smp_mb();
239
240         /* The ring uses 256 indices for 255 entries, one of them
241          * needs to be skipped.
242          */
243         diff = txr->tx_prod - txr->tx_cons;
244         if (unlikely(diff >= TX_DESC_CNT)) {
245                 diff &= 0xffff;
246                 if (diff == TX_DESC_CNT)
247                         diff = MAX_TX_DESC_CNT;
248         }
249         return (bp->tx_ring_size - diff);
250 }
251
252 static u32
253 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
254 {
255         u32 val;
256
257         spin_lock_bh(&bp->indirect_lock);
258         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
259         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
260         spin_unlock_bh(&bp->indirect_lock);
261         return val;
262 }
263
264 static void
265 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
266 {
267         spin_lock_bh(&bp->indirect_lock);
268         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
269         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
270         spin_unlock_bh(&bp->indirect_lock);
271 }
272
273 static void
274 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
275 {
276         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
277 }
278
279 static u32
280 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
281 {
282         return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
283 }
284
285 static void
286 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
287 {
288         offset += cid_addr;
289         spin_lock_bh(&bp->indirect_lock);
290         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
291                 int i;
292
293                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
294                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
295                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
296                 for (i = 0; i < 5; i++) {
297                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
298                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
299                                 break;
300                         udelay(5);
301                 }
302         } else {
303                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
304                 REG_WR(bp, BNX2_CTX_DATA, val);
305         }
306         spin_unlock_bh(&bp->indirect_lock);
307 }
308
309 static int
310 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
311 {
312         u32 val1;
313         int i, ret;
314
315         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
316                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
317                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
318
319                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
320                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
321
322                 udelay(40);
323         }
324
325         val1 = (bp->phy_addr << 21) | (reg << 16) |
326                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
327                 BNX2_EMAC_MDIO_COMM_START_BUSY;
328         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
329
330         for (i = 0; i < 50; i++) {
331                 udelay(10);
332
333                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
334                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
335                         udelay(5);
336
337                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
338                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
339
340                         break;
341                 }
342         }
343
344         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
345                 *val = 0x0;
346                 ret = -EBUSY;
347         }
348         else {
349                 *val = val1;
350                 ret = 0;
351         }
352
353         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
354                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
355                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
356
357                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
358                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
359
360                 udelay(40);
361         }
362
363         return ret;
364 }
365
366 static int
367 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
368 {
369         u32 val1;
370         int i, ret;
371
372         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
373                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
374                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
375
376                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
377                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
378
379                 udelay(40);
380         }
381
382         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
383                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
384                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
385         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
386
387         for (i = 0; i < 50; i++) {
388                 udelay(10);
389
390                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
391                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
392                         udelay(5);
393                         break;
394                 }
395         }
396
397         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
398                 ret = -EBUSY;
399         else
400                 ret = 0;
401
402         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
403                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
404                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
405
406                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
407                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
408
409                 udelay(40);
410         }
411
412         return ret;
413 }
414
415 static void
416 bnx2_disable_int(struct bnx2 *bp)
417 {
418         int i;
419         struct bnx2_napi *bnapi;
420
421         for (i = 0; i < bp->irq_nvecs; i++) {
422                 bnapi = &bp->bnx2_napi[i];
423                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
424                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
425         }
426         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
427 }
428
429 static void
430 bnx2_enable_int(struct bnx2 *bp)
431 {
432         int i;
433         struct bnx2_napi *bnapi;
434
435         for (i = 0; i < bp->irq_nvecs; i++) {
436                 bnapi = &bp->bnx2_napi[i];
437
438                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
439                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
440                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
441                        bnapi->last_status_idx);
442
443                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
444                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
445                        bnapi->last_status_idx);
446         }
447         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
448 }
449
450 static void
451 bnx2_disable_int_sync(struct bnx2 *bp)
452 {
453         int i;
454
455         atomic_inc(&bp->intr_sem);
456         bnx2_disable_int(bp);
457         for (i = 0; i < bp->irq_nvecs; i++)
458                 synchronize_irq(bp->irq_tbl[i].vector);
459 }
460
461 static void
462 bnx2_napi_disable(struct bnx2 *bp)
463 {
464         int i;
465
466         for (i = 0; i < bp->irq_nvecs; i++)
467                 napi_disable(&bp->bnx2_napi[i].napi);
468 }
469
470 static void
471 bnx2_napi_enable(struct bnx2 *bp)
472 {
473         int i;
474
475         for (i = 0; i < bp->irq_nvecs; i++)
476                 napi_enable(&bp->bnx2_napi[i].napi);
477 }
478
479 static void
480 bnx2_netif_stop(struct bnx2 *bp)
481 {
482         bnx2_disable_int_sync(bp);
483         if (netif_running(bp->dev)) {
484                 bnx2_napi_disable(bp);
485                 netif_tx_disable(bp->dev);
486                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
487         }
488 }
489
490 static void
491 bnx2_netif_start(struct bnx2 *bp)
492 {
493         if (atomic_dec_and_test(&bp->intr_sem)) {
494                 if (netif_running(bp->dev)) {
495                         netif_tx_wake_all_queues(bp->dev);
496                         bnx2_napi_enable(bp);
497                         bnx2_enable_int(bp);
498                 }
499         }
500 }
501
502 static void
503 bnx2_free_tx_mem(struct bnx2 *bp)
504 {
505         int i;
506
507         for (i = 0; i < bp->num_tx_rings; i++) {
508                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
509                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
510
511                 if (txr->tx_desc_ring) {
512                         pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
513                                             txr->tx_desc_ring,
514                                             txr->tx_desc_mapping);
515                         txr->tx_desc_ring = NULL;
516                 }
517                 kfree(txr->tx_buf_ring);
518                 txr->tx_buf_ring = NULL;
519         }
520 }
521
522 static void
523 bnx2_free_rx_mem(struct bnx2 *bp)
524 {
525         int i;
526
527         for (i = 0; i < bp->num_rx_rings; i++) {
528                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
529                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
530                 int j;
531
532                 for (j = 0; j < bp->rx_max_ring; j++) {
533                         if (rxr->rx_desc_ring[j])
534                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
535                                                     rxr->rx_desc_ring[j],
536                                                     rxr->rx_desc_mapping[j]);
537                         rxr->rx_desc_ring[j] = NULL;
538                 }
539                 if (rxr->rx_buf_ring)
540                         vfree(rxr->rx_buf_ring);
541                 rxr->rx_buf_ring = NULL;
542
543                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
544                         if (rxr->rx_pg_desc_ring[j])
545                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
546                                                     rxr->rx_pg_desc_ring[i],
547                                                     rxr->rx_pg_desc_mapping[i]);
548                         rxr->rx_pg_desc_ring[i] = NULL;
549                 }
550                 if (rxr->rx_pg_ring)
551                         vfree(rxr->rx_pg_ring);
552                 rxr->rx_pg_ring = NULL;
553         }
554 }
555
556 static int
557 bnx2_alloc_tx_mem(struct bnx2 *bp)
558 {
559         int i;
560
561         for (i = 0; i < bp->num_tx_rings; i++) {
562                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
563                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
564
565                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
566                 if (txr->tx_buf_ring == NULL)
567                         return -ENOMEM;
568
569                 txr->tx_desc_ring =
570                         pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
571                                              &txr->tx_desc_mapping);
572                 if (txr->tx_desc_ring == NULL)
573                         return -ENOMEM;
574         }
575         return 0;
576 }
577
578 static int
579 bnx2_alloc_rx_mem(struct bnx2 *bp)
580 {
581         int i;
582
583         for (i = 0; i < bp->num_rx_rings; i++) {
584                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
585                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
586                 int j;
587
588                 rxr->rx_buf_ring =
589                         vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
590                 if (rxr->rx_buf_ring == NULL)
591                         return -ENOMEM;
592
593                 memset(rxr->rx_buf_ring, 0,
594                        SW_RXBD_RING_SIZE * bp->rx_max_ring);
595
596                 for (j = 0; j < bp->rx_max_ring; j++) {
597                         rxr->rx_desc_ring[j] =
598                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
599                                                      &rxr->rx_desc_mapping[j]);
600                         if (rxr->rx_desc_ring[j] == NULL)
601                                 return -ENOMEM;
602
603                 }
604
605                 if (bp->rx_pg_ring_size) {
606                         rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
607                                                   bp->rx_max_pg_ring);
608                         if (rxr->rx_pg_ring == NULL)
609                                 return -ENOMEM;
610
611                         memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
612                                bp->rx_max_pg_ring);
613                 }
614
615                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
616                         rxr->rx_pg_desc_ring[j] =
617                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
618                                                 &rxr->rx_pg_desc_mapping[j]);
619                         if (rxr->rx_pg_desc_ring[j] == NULL)
620                                 return -ENOMEM;
621
622                 }
623         }
624         return 0;
625 }
626
627 static void
628 bnx2_free_mem(struct bnx2 *bp)
629 {
630         int i;
631         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
632
633         bnx2_free_tx_mem(bp);
634         bnx2_free_rx_mem(bp);
635
636         for (i = 0; i < bp->ctx_pages; i++) {
637                 if (bp->ctx_blk[i]) {
638                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
639                                             bp->ctx_blk[i],
640                                             bp->ctx_blk_mapping[i]);
641                         bp->ctx_blk[i] = NULL;
642                 }
643         }
644         if (bnapi->status_blk.msi) {
645                 pci_free_consistent(bp->pdev, bp->status_stats_size,
646                                     bnapi->status_blk.msi,
647                                     bp->status_blk_mapping);
648                 bnapi->status_blk.msi = NULL;
649                 bp->stats_blk = NULL;
650         }
651 }
652
653 static int
654 bnx2_alloc_mem(struct bnx2 *bp)
655 {
656         int i, status_blk_size, err;
657         struct bnx2_napi *bnapi;
658         void *status_blk;
659
660         /* Combine status and statistics blocks into one allocation. */
661         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
662         if (bp->flags & BNX2_FLAG_MSIX_CAP)
663                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
664                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
665         bp->status_stats_size = status_blk_size +
666                                 sizeof(struct statistics_block);
667
668         status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
669                                           &bp->status_blk_mapping);
670         if (status_blk == NULL)
671                 goto alloc_mem_err;
672
673         memset(status_blk, 0, bp->status_stats_size);
674
675         bnapi = &bp->bnx2_napi[0];
676         bnapi->status_blk.msi = status_blk;
677         bnapi->hw_tx_cons_ptr =
678                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
679         bnapi->hw_rx_cons_ptr =
680                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
681         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
682                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
683                         struct status_block_msix *sblk;
684
685                         bnapi = &bp->bnx2_napi[i];
686
687                         sblk = (void *) (status_blk +
688                                          BNX2_SBLK_MSIX_ALIGN_SIZE * i);
689                         bnapi->status_blk.msix = sblk;
690                         bnapi->hw_tx_cons_ptr =
691                                 &sblk->status_tx_quick_consumer_index;
692                         bnapi->hw_rx_cons_ptr =
693                                 &sblk->status_rx_quick_consumer_index;
694                         bnapi->int_num = i << 24;
695                 }
696         }
697
698         bp->stats_blk = status_blk + status_blk_size;
699
700         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
701
702         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
703                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
704                 if (bp->ctx_pages == 0)
705                         bp->ctx_pages = 1;
706                 for (i = 0; i < bp->ctx_pages; i++) {
707                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
708                                                 BCM_PAGE_SIZE,
709                                                 &bp->ctx_blk_mapping[i]);
710                         if (bp->ctx_blk[i] == NULL)
711                                 goto alloc_mem_err;
712                 }
713         }
714
715         err = bnx2_alloc_rx_mem(bp);
716         if (err)
717                 goto alloc_mem_err;
718
719         err = bnx2_alloc_tx_mem(bp);
720         if (err)
721                 goto alloc_mem_err;
722
723         return 0;
724
725 alloc_mem_err:
726         bnx2_free_mem(bp);
727         return -ENOMEM;
728 }
729
730 static void
731 bnx2_report_fw_link(struct bnx2 *bp)
732 {
733         u32 fw_link_status = 0;
734
735         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
736                 return;
737
738         if (bp->link_up) {
739                 u32 bmsr;
740
741                 switch (bp->line_speed) {
742                 case SPEED_10:
743                         if (bp->duplex == DUPLEX_HALF)
744                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
745                         else
746                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
747                         break;
748                 case SPEED_100:
749                         if (bp->duplex == DUPLEX_HALF)
750                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
751                         else
752                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
753                         break;
754                 case SPEED_1000:
755                         if (bp->duplex == DUPLEX_HALF)
756                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
757                         else
758                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
759                         break;
760                 case SPEED_2500:
761                         if (bp->duplex == DUPLEX_HALF)
762                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
763                         else
764                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
765                         break;
766                 }
767
768                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
769
770                 if (bp->autoneg) {
771                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
772
773                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
774                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
775
776                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
777                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
778                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
779                         else
780                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
781                 }
782         }
783         else
784                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
785
786         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
787 }
788
789 static char *
790 bnx2_xceiver_str(struct bnx2 *bp)
791 {
792         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
793                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
794                  "Copper"));
795 }
796
797 static void
798 bnx2_report_link(struct bnx2 *bp)
799 {
800         if (bp->link_up) {
801                 netif_carrier_on(bp->dev);
802                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
803                        bnx2_xceiver_str(bp));
804
805                 printk("%d Mbps ", bp->line_speed);
806
807                 if (bp->duplex == DUPLEX_FULL)
808                         printk("full duplex");
809                 else
810                         printk("half duplex");
811
812                 if (bp->flow_ctrl) {
813                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
814                                 printk(", receive ");
815                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
816                                         printk("& transmit ");
817                         }
818                         else {
819                                 printk(", transmit ");
820                         }
821                         printk("flow control ON");
822                 }
823                 printk("\n");
824         }
825         else {
826                 netif_carrier_off(bp->dev);
827                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
828                        bnx2_xceiver_str(bp));
829         }
830
831         bnx2_report_fw_link(bp);
832 }
833
834 static void
835 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
836 {
837         u32 local_adv, remote_adv;
838
839         bp->flow_ctrl = 0;
840         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
841                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
842
843                 if (bp->duplex == DUPLEX_FULL) {
844                         bp->flow_ctrl = bp->req_flow_ctrl;
845                 }
846                 return;
847         }
848
849         if (bp->duplex != DUPLEX_FULL) {
850                 return;
851         }
852
853         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
854             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
855                 u32 val;
856
857                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
858                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
859                         bp->flow_ctrl |= FLOW_CTRL_TX;
860                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
861                         bp->flow_ctrl |= FLOW_CTRL_RX;
862                 return;
863         }
864
865         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
866         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
867
868         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
869                 u32 new_local_adv = 0;
870                 u32 new_remote_adv = 0;
871
872                 if (local_adv & ADVERTISE_1000XPAUSE)
873                         new_local_adv |= ADVERTISE_PAUSE_CAP;
874                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
875                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
876                 if (remote_adv & ADVERTISE_1000XPAUSE)
877                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
878                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
879                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
880
881                 local_adv = new_local_adv;
882                 remote_adv = new_remote_adv;
883         }
884
885         /* See Table 28B-3 of 802.3ab-1999 spec. */
886         if (local_adv & ADVERTISE_PAUSE_CAP) {
887                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
888                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
889                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
890                         }
891                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
892                                 bp->flow_ctrl = FLOW_CTRL_RX;
893                         }
894                 }
895                 else {
896                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
897                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
898                         }
899                 }
900         }
901         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
902                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
903                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
904
905                         bp->flow_ctrl = FLOW_CTRL_TX;
906                 }
907         }
908 }
909
910 static int
911 bnx2_5709s_linkup(struct bnx2 *bp)
912 {
913         u32 val, speed;
914
915         bp->link_up = 1;
916
917         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
918         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
919         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
920
921         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
922                 bp->line_speed = bp->req_line_speed;
923                 bp->duplex = bp->req_duplex;
924                 return 0;
925         }
926         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
927         switch (speed) {
928                 case MII_BNX2_GP_TOP_AN_SPEED_10:
929                         bp->line_speed = SPEED_10;
930                         break;
931                 case MII_BNX2_GP_TOP_AN_SPEED_100:
932                         bp->line_speed = SPEED_100;
933                         break;
934                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
935                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
936                         bp->line_speed = SPEED_1000;
937                         break;
938                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
939                         bp->line_speed = SPEED_2500;
940                         break;
941         }
942         if (val & MII_BNX2_GP_TOP_AN_FD)
943                 bp->duplex = DUPLEX_FULL;
944         else
945                 bp->duplex = DUPLEX_HALF;
946         return 0;
947 }
948
949 static int
950 bnx2_5708s_linkup(struct bnx2 *bp)
951 {
952         u32 val;
953
954         bp->link_up = 1;
955         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
956         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
957                 case BCM5708S_1000X_STAT1_SPEED_10:
958                         bp->line_speed = SPEED_10;
959                         break;
960                 case BCM5708S_1000X_STAT1_SPEED_100:
961                         bp->line_speed = SPEED_100;
962                         break;
963                 case BCM5708S_1000X_STAT1_SPEED_1G:
964                         bp->line_speed = SPEED_1000;
965                         break;
966                 case BCM5708S_1000X_STAT1_SPEED_2G5:
967                         bp->line_speed = SPEED_2500;
968                         break;
969         }
970         if (val & BCM5708S_1000X_STAT1_FD)
971                 bp->duplex = DUPLEX_FULL;
972         else
973                 bp->duplex = DUPLEX_HALF;
974
975         return 0;
976 }
977
978 static int
979 bnx2_5706s_linkup(struct bnx2 *bp)
980 {
981         u32 bmcr, local_adv, remote_adv, common;
982
983         bp->link_up = 1;
984         bp->line_speed = SPEED_1000;
985
986         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
987         if (bmcr & BMCR_FULLDPLX) {
988                 bp->duplex = DUPLEX_FULL;
989         }
990         else {
991                 bp->duplex = DUPLEX_HALF;
992         }
993
994         if (!(bmcr & BMCR_ANENABLE)) {
995                 return 0;
996         }
997
998         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
999         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1000
1001         common = local_adv & remote_adv;
1002         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1003
1004                 if (common & ADVERTISE_1000XFULL) {
1005                         bp->duplex = DUPLEX_FULL;
1006                 }
1007                 else {
1008                         bp->duplex = DUPLEX_HALF;
1009                 }
1010         }
1011
1012         return 0;
1013 }
1014
1015 static int
1016 bnx2_copper_linkup(struct bnx2 *bp)
1017 {
1018         u32 bmcr;
1019
1020         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1021         if (bmcr & BMCR_ANENABLE) {
1022                 u32 local_adv, remote_adv, common;
1023
1024                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1025                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1026
1027                 common = local_adv & (remote_adv >> 2);
1028                 if (common & ADVERTISE_1000FULL) {
1029                         bp->line_speed = SPEED_1000;
1030                         bp->duplex = DUPLEX_FULL;
1031                 }
1032                 else if (common & ADVERTISE_1000HALF) {
1033                         bp->line_speed = SPEED_1000;
1034                         bp->duplex = DUPLEX_HALF;
1035                 }
1036                 else {
1037                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1038                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1039
1040                         common = local_adv & remote_adv;
1041                         if (common & ADVERTISE_100FULL) {
1042                                 bp->line_speed = SPEED_100;
1043                                 bp->duplex = DUPLEX_FULL;
1044                         }
1045                         else if (common & ADVERTISE_100HALF) {
1046                                 bp->line_speed = SPEED_100;
1047                                 bp->duplex = DUPLEX_HALF;
1048                         }
1049                         else if (common & ADVERTISE_10FULL) {
1050                                 bp->line_speed = SPEED_10;
1051                                 bp->duplex = DUPLEX_FULL;
1052                         }
1053                         else if (common & ADVERTISE_10HALF) {
1054                                 bp->line_speed = SPEED_10;
1055                                 bp->duplex = DUPLEX_HALF;
1056                         }
1057                         else {
1058                                 bp->line_speed = 0;
1059                                 bp->link_up = 0;
1060                         }
1061                 }
1062         }
1063         else {
1064                 if (bmcr & BMCR_SPEED100) {
1065                         bp->line_speed = SPEED_100;
1066                 }
1067                 else {
1068                         bp->line_speed = SPEED_10;
1069                 }
1070                 if (bmcr & BMCR_FULLDPLX) {
1071                         bp->duplex = DUPLEX_FULL;
1072                 }
1073                 else {
1074                         bp->duplex = DUPLEX_HALF;
1075                 }
1076         }
1077
1078         return 0;
1079 }
1080
1081 static void
1082 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1083 {
1084         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1085
1086         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1087         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1088         val |= 0x02 << 8;
1089
1090         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1091                 u32 lo_water, hi_water;
1092
1093                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1094                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1095                 else
1096                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1097                 if (lo_water >= bp->rx_ring_size)
1098                         lo_water = 0;
1099
1100                 hi_water = bp->rx_ring_size / 4;
1101
1102                 if (hi_water <= lo_water)
1103                         lo_water = 0;
1104
1105                 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1106                 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1107
1108                 if (hi_water > 0xf)
1109                         hi_water = 0xf;
1110                 else if (hi_water == 0)
1111                         lo_water = 0;
1112                 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1113         }
1114         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1115 }
1116
1117 static void
1118 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1119 {
1120         int i;
1121         u32 cid;
1122
1123         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1124                 if (i == 1)
1125                         cid = RX_RSS_CID;
1126                 bnx2_init_rx_context(bp, cid);
1127         }
1128 }
1129
1130 static int
1131 bnx2_set_mac_link(struct bnx2 *bp)
1132 {
1133         u32 val;
1134
1135         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1136         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1137                 (bp->duplex == DUPLEX_HALF)) {
1138                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1139         }
1140
1141         /* Configure the EMAC mode register. */
1142         val = REG_RD(bp, BNX2_EMAC_MODE);
1143
1144         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1145                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1146                 BNX2_EMAC_MODE_25G_MODE);
1147
1148         if (bp->link_up) {
1149                 switch (bp->line_speed) {
1150                         case SPEED_10:
1151                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1152                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1153                                         break;
1154                                 }
1155                                 /* fall through */
1156                         case SPEED_100:
1157                                 val |= BNX2_EMAC_MODE_PORT_MII;
1158                                 break;
1159                         case SPEED_2500:
1160                                 val |= BNX2_EMAC_MODE_25G_MODE;
1161                                 /* fall through */
1162                         case SPEED_1000:
1163                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1164                                 break;
1165                 }
1166         }
1167         else {
1168                 val |= BNX2_EMAC_MODE_PORT_GMII;
1169         }
1170
1171         /* Set the MAC to operate in the appropriate duplex mode. */
1172         if (bp->duplex == DUPLEX_HALF)
1173                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1174         REG_WR(bp, BNX2_EMAC_MODE, val);
1175
1176         /* Enable/disable rx PAUSE. */
1177         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1178
1179         if (bp->flow_ctrl & FLOW_CTRL_RX)
1180                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1181         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1182
1183         /* Enable/disable tx PAUSE. */
1184         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1185         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1186
1187         if (bp->flow_ctrl & FLOW_CTRL_TX)
1188                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1189         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1190
1191         /* Acknowledge the interrupt. */
1192         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1193
1194         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1195                 bnx2_init_all_rx_contexts(bp);
1196
1197         return 0;
1198 }
1199
1200 static void
1201 bnx2_enable_bmsr1(struct bnx2 *bp)
1202 {
1203         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1204             (CHIP_NUM(bp) == CHIP_NUM_5709))
1205                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1206                                MII_BNX2_BLK_ADDR_GP_STATUS);
1207 }
1208
1209 static void
1210 bnx2_disable_bmsr1(struct bnx2 *bp)
1211 {
1212         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1213             (CHIP_NUM(bp) == CHIP_NUM_5709))
1214                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1215                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1216 }
1217
1218 static int
1219 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1220 {
1221         u32 up1;
1222         int ret = 1;
1223
1224         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1225                 return 0;
1226
1227         if (bp->autoneg & AUTONEG_SPEED)
1228                 bp->advertising |= ADVERTISED_2500baseX_Full;
1229
1230         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1231                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1232
1233         bnx2_read_phy(bp, bp->mii_up1, &up1);
1234         if (!(up1 & BCM5708S_UP1_2G5)) {
1235                 up1 |= BCM5708S_UP1_2G5;
1236                 bnx2_write_phy(bp, bp->mii_up1, up1);
1237                 ret = 0;
1238         }
1239
1240         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1241                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1242                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1243
1244         return ret;
1245 }
1246
1247 static int
1248 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1249 {
1250         u32 up1;
1251         int ret = 0;
1252
1253         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1254                 return 0;
1255
1256         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1257                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1258
1259         bnx2_read_phy(bp, bp->mii_up1, &up1);
1260         if (up1 & BCM5708S_UP1_2G5) {
1261                 up1 &= ~BCM5708S_UP1_2G5;
1262                 bnx2_write_phy(bp, bp->mii_up1, up1);
1263                 ret = 1;
1264         }
1265
1266         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1267                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1268                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1269
1270         return ret;
1271 }
1272
1273 static void
1274 bnx2_enable_forced_2g5(struct bnx2 *bp)
1275 {
1276         u32 bmcr;
1277
1278         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1279                 return;
1280
1281         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1282                 u32 val;
1283
1284                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1285                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1286                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1287                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1288                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1289                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1290
1291                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1292                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1293                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1294
1295         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1296                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1297                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1298         }
1299
1300         if (bp->autoneg & AUTONEG_SPEED) {
1301                 bmcr &= ~BMCR_ANENABLE;
1302                 if (bp->req_duplex == DUPLEX_FULL)
1303                         bmcr |= BMCR_FULLDPLX;
1304         }
1305         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1306 }
1307
1308 static void
1309 bnx2_disable_forced_2g5(struct bnx2 *bp)
1310 {
1311         u32 bmcr;
1312
1313         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1314                 return;
1315
1316         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1317                 u32 val;
1318
1319                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1320                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1321                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1322                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1323                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1324
1325                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1326                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1327                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1328
1329         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1330                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1331                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1332         }
1333
1334         if (bp->autoneg & AUTONEG_SPEED)
1335                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1336         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1337 }
1338
1339 static void
1340 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1341 {
1342         u32 val;
1343
1344         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1345         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1346         if (start)
1347                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1348         else
1349                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1350 }
1351
1352 static int
1353 bnx2_set_link(struct bnx2 *bp)
1354 {
1355         u32 bmsr;
1356         u8 link_up;
1357
1358         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1359                 bp->link_up = 1;
1360                 return 0;
1361         }
1362
1363         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1364                 return 0;
1365
1366         link_up = bp->link_up;
1367
1368         bnx2_enable_bmsr1(bp);
1369         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1370         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1371         bnx2_disable_bmsr1(bp);
1372
1373         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1374             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1375                 u32 val, an_dbg;
1376
1377                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1378                         bnx2_5706s_force_link_dn(bp, 0);
1379                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1380                 }
1381                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1382
1383                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1384                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1385                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1386
1387                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1388                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1389                         bmsr |= BMSR_LSTATUS;
1390                 else
1391                         bmsr &= ~BMSR_LSTATUS;
1392         }
1393
1394         if (bmsr & BMSR_LSTATUS) {
1395                 bp->link_up = 1;
1396
1397                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1398                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1399                                 bnx2_5706s_linkup(bp);
1400                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1401                                 bnx2_5708s_linkup(bp);
1402                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1403                                 bnx2_5709s_linkup(bp);
1404                 }
1405                 else {
1406                         bnx2_copper_linkup(bp);
1407                 }
1408                 bnx2_resolve_flow_ctrl(bp);
1409         }
1410         else {
1411                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1412                     (bp->autoneg & AUTONEG_SPEED))
1413                         bnx2_disable_forced_2g5(bp);
1414
1415                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1416                         u32 bmcr;
1417
1418                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1419                         bmcr |= BMCR_ANENABLE;
1420                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1421
1422                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1423                 }
1424                 bp->link_up = 0;
1425         }
1426
1427         if (bp->link_up != link_up) {
1428                 bnx2_report_link(bp);
1429         }
1430
1431         bnx2_set_mac_link(bp);
1432
1433         return 0;
1434 }
1435
1436 static int
1437 bnx2_reset_phy(struct bnx2 *bp)
1438 {
1439         int i;
1440         u32 reg;
1441
1442         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1443
1444 #define PHY_RESET_MAX_WAIT 100
1445         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1446                 udelay(10);
1447
1448                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1449                 if (!(reg & BMCR_RESET)) {
1450                         udelay(20);
1451                         break;
1452                 }
1453         }
1454         if (i == PHY_RESET_MAX_WAIT) {
1455                 return -EBUSY;
1456         }
1457         return 0;
1458 }
1459
1460 static u32
1461 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1462 {
1463         u32 adv = 0;
1464
1465         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1466                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1467
1468                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1469                         adv = ADVERTISE_1000XPAUSE;
1470                 }
1471                 else {
1472                         adv = ADVERTISE_PAUSE_CAP;
1473                 }
1474         }
1475         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1476                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1477                         adv = ADVERTISE_1000XPSE_ASYM;
1478                 }
1479                 else {
1480                         adv = ADVERTISE_PAUSE_ASYM;
1481                 }
1482         }
1483         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1484                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1485                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1486                 }
1487                 else {
1488                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1489                 }
1490         }
1491         return adv;
1492 }
1493
1494 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1495
1496 static int
1497 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1498 {
1499         u32 speed_arg = 0, pause_adv;
1500
1501         pause_adv = bnx2_phy_get_pause_adv(bp);
1502
1503         if (bp->autoneg & AUTONEG_SPEED) {
1504                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1505                 if (bp->advertising & ADVERTISED_10baseT_Half)
1506                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1507                 if (bp->advertising & ADVERTISED_10baseT_Full)
1508                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1509                 if (bp->advertising & ADVERTISED_100baseT_Half)
1510                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1511                 if (bp->advertising & ADVERTISED_100baseT_Full)
1512                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1513                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1514                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1515                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1516                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1517         } else {
1518                 if (bp->req_line_speed == SPEED_2500)
1519                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1520                 else if (bp->req_line_speed == SPEED_1000)
1521                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1522                 else if (bp->req_line_speed == SPEED_100) {
1523                         if (bp->req_duplex == DUPLEX_FULL)
1524                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1525                         else
1526                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1527                 } else if (bp->req_line_speed == SPEED_10) {
1528                         if (bp->req_duplex == DUPLEX_FULL)
1529                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1530                         else
1531                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1532                 }
1533         }
1534
1535         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1536                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1537         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1538                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1539
1540         if (port == PORT_TP)
1541                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1542                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1543
1544         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1545
1546         spin_unlock_bh(&bp->phy_lock);
1547         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1548         spin_lock_bh(&bp->phy_lock);
1549
1550         return 0;
1551 }
1552
1553 static int
1554 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1555 {
1556         u32 adv, bmcr;
1557         u32 new_adv = 0;
1558
1559         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1560                 return (bnx2_setup_remote_phy(bp, port));
1561
1562         if (!(bp->autoneg & AUTONEG_SPEED)) {
1563                 u32 new_bmcr;
1564                 int force_link_down = 0;
1565
1566                 if (bp->req_line_speed == SPEED_2500) {
1567                         if (!bnx2_test_and_enable_2g5(bp))
1568                                 force_link_down = 1;
1569                 } else if (bp->req_line_speed == SPEED_1000) {
1570                         if (bnx2_test_and_disable_2g5(bp))
1571                                 force_link_down = 1;
1572                 }
1573                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1574                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1575
1576                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1577                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1578                 new_bmcr |= BMCR_SPEED1000;
1579
1580                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1581                         if (bp->req_line_speed == SPEED_2500)
1582                                 bnx2_enable_forced_2g5(bp);
1583                         else if (bp->req_line_speed == SPEED_1000) {
1584                                 bnx2_disable_forced_2g5(bp);
1585                                 new_bmcr &= ~0x2000;
1586                         }
1587
1588                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1589                         if (bp->req_line_speed == SPEED_2500)
1590                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1591                         else
1592                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1593                 }
1594
1595                 if (bp->req_duplex == DUPLEX_FULL) {
1596                         adv |= ADVERTISE_1000XFULL;
1597                         new_bmcr |= BMCR_FULLDPLX;
1598                 }
1599                 else {
1600                         adv |= ADVERTISE_1000XHALF;
1601                         new_bmcr &= ~BMCR_FULLDPLX;
1602                 }
1603                 if ((new_bmcr != bmcr) || (force_link_down)) {
1604                         /* Force a link down visible on the other side */
1605                         if (bp->link_up) {
1606                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1607                                                ~(ADVERTISE_1000XFULL |
1608                                                  ADVERTISE_1000XHALF));
1609                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1610                                         BMCR_ANRESTART | BMCR_ANENABLE);
1611
1612                                 bp->link_up = 0;
1613                                 netif_carrier_off(bp->dev);
1614                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1615                                 bnx2_report_link(bp);
1616                         }
1617                         bnx2_write_phy(bp, bp->mii_adv, adv);
1618                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1619                 } else {
1620                         bnx2_resolve_flow_ctrl(bp);
1621                         bnx2_set_mac_link(bp);
1622                 }
1623                 return 0;
1624         }
1625
1626         bnx2_test_and_enable_2g5(bp);
1627
1628         if (bp->advertising & ADVERTISED_1000baseT_Full)
1629                 new_adv |= ADVERTISE_1000XFULL;
1630
1631         new_adv |= bnx2_phy_get_pause_adv(bp);
1632
1633         bnx2_read_phy(bp, bp->mii_adv, &adv);
1634         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1635
1636         bp->serdes_an_pending = 0;
1637         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1638                 /* Force a link down visible on the other side */
1639                 if (bp->link_up) {
1640                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1641                         spin_unlock_bh(&bp->phy_lock);
1642                         msleep(20);
1643                         spin_lock_bh(&bp->phy_lock);
1644                 }
1645
1646                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1647                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1648                         BMCR_ANENABLE);
1649                 /* Speed up link-up time when the link partner
1650                  * does not autonegotiate which is very common
1651                  * in blade servers. Some blade servers use
1652                  * IPMI for kerboard input and it's important
1653                  * to minimize link disruptions. Autoneg. involves
1654                  * exchanging base pages plus 3 next pages and
1655                  * normally completes in about 120 msec.
1656                  */
1657                 bp->current_interval = SERDES_AN_TIMEOUT;
1658                 bp->serdes_an_pending = 1;
1659                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1660         } else {
1661                 bnx2_resolve_flow_ctrl(bp);
1662                 bnx2_set_mac_link(bp);
1663         }
1664
1665         return 0;
1666 }
1667
1668 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1669         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1670                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1671                 (ADVERTISED_1000baseT_Full)
1672
1673 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1674         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1675         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1676         ADVERTISED_1000baseT_Full)
1677
1678 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1679         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1680
1681 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1682
1683 static void
1684 bnx2_set_default_remote_link(struct bnx2 *bp)
1685 {
1686         u32 link;
1687
1688         if (bp->phy_port == PORT_TP)
1689                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1690         else
1691                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1692
1693         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1694                 bp->req_line_speed = 0;
1695                 bp->autoneg |= AUTONEG_SPEED;
1696                 bp->advertising = ADVERTISED_Autoneg;
1697                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1698                         bp->advertising |= ADVERTISED_10baseT_Half;
1699                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1700                         bp->advertising |= ADVERTISED_10baseT_Full;
1701                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1702                         bp->advertising |= ADVERTISED_100baseT_Half;
1703                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1704                         bp->advertising |= ADVERTISED_100baseT_Full;
1705                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1706                         bp->advertising |= ADVERTISED_1000baseT_Full;
1707                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1708                         bp->advertising |= ADVERTISED_2500baseX_Full;
1709         } else {
1710                 bp->autoneg = 0;
1711                 bp->advertising = 0;
1712                 bp->req_duplex = DUPLEX_FULL;
1713                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1714                         bp->req_line_speed = SPEED_10;
1715                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1716                                 bp->req_duplex = DUPLEX_HALF;
1717                 }
1718                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1719                         bp->req_line_speed = SPEED_100;
1720                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1721                                 bp->req_duplex = DUPLEX_HALF;
1722                 }
1723                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1724                         bp->req_line_speed = SPEED_1000;
1725                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1726                         bp->req_line_speed = SPEED_2500;
1727         }
1728 }
1729
1730 static void
1731 bnx2_set_default_link(struct bnx2 *bp)
1732 {
1733         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1734                 bnx2_set_default_remote_link(bp);
1735                 return;
1736         }
1737
1738         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1739         bp->req_line_speed = 0;
1740         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1741                 u32 reg;
1742
1743                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1744
1745                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1746                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1747                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1748                         bp->autoneg = 0;
1749                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1750                         bp->req_duplex = DUPLEX_FULL;
1751                 }
1752         } else
1753                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1754 }
1755
1756 static void
1757 bnx2_send_heart_beat(struct bnx2 *bp)
1758 {
1759         u32 msg;
1760         u32 addr;
1761
1762         spin_lock(&bp->indirect_lock);
1763         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1764         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1765         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1766         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1767         spin_unlock(&bp->indirect_lock);
1768 }
1769
1770 static void
1771 bnx2_remote_phy_event(struct bnx2 *bp)
1772 {
1773         u32 msg;
1774         u8 link_up = bp->link_up;
1775         u8 old_port;
1776
1777         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1778
1779         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1780                 bnx2_send_heart_beat(bp);
1781
1782         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1783
1784         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1785                 bp->link_up = 0;
1786         else {
1787                 u32 speed;
1788
1789                 bp->link_up = 1;
1790                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1791                 bp->duplex = DUPLEX_FULL;
1792                 switch (speed) {
1793                         case BNX2_LINK_STATUS_10HALF:
1794                                 bp->duplex = DUPLEX_HALF;
1795                         case BNX2_LINK_STATUS_10FULL:
1796                                 bp->line_speed = SPEED_10;
1797                                 break;
1798                         case BNX2_LINK_STATUS_100HALF:
1799                                 bp->duplex = DUPLEX_HALF;
1800                         case BNX2_LINK_STATUS_100BASE_T4:
1801                         case BNX2_LINK_STATUS_100FULL:
1802                                 bp->line_speed = SPEED_100;
1803                                 break;
1804                         case BNX2_LINK_STATUS_1000HALF:
1805                                 bp->duplex = DUPLEX_HALF;
1806                         case BNX2_LINK_STATUS_1000FULL:
1807                                 bp->line_speed = SPEED_1000;
1808                                 break;
1809                         case BNX2_LINK_STATUS_2500HALF:
1810                                 bp->duplex = DUPLEX_HALF;
1811                         case BNX2_LINK_STATUS_2500FULL:
1812                                 bp->line_speed = SPEED_2500;
1813                                 break;
1814                         default:
1815                                 bp->line_speed = 0;
1816                                 break;
1817                 }
1818
1819                 bp->flow_ctrl = 0;
1820                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1821                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1822                         if (bp->duplex == DUPLEX_FULL)
1823                                 bp->flow_ctrl = bp->req_flow_ctrl;
1824                 } else {
1825                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1826                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1827                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1828                                 bp->flow_ctrl |= FLOW_CTRL_RX;
1829                 }
1830
1831                 old_port = bp->phy_port;
1832                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1833                         bp->phy_port = PORT_FIBRE;
1834                 else
1835                         bp->phy_port = PORT_TP;
1836
1837                 if (old_port != bp->phy_port)
1838                         bnx2_set_default_link(bp);
1839
1840         }
1841         if (bp->link_up != link_up)
1842                 bnx2_report_link(bp);
1843
1844         bnx2_set_mac_link(bp);
1845 }
1846
1847 static int
1848 bnx2_set_remote_link(struct bnx2 *bp)
1849 {
1850         u32 evt_code;
1851
1852         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
1853         switch (evt_code) {
1854                 case BNX2_FW_EVT_CODE_LINK_EVENT:
1855                         bnx2_remote_phy_event(bp);
1856                         break;
1857                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1858                 default:
1859                         bnx2_send_heart_beat(bp);
1860                         break;
1861         }
1862         return 0;
1863 }
1864
1865 static int
1866 bnx2_setup_copper_phy(struct bnx2 *bp)
1867 {
1868         u32 bmcr;
1869         u32 new_bmcr;
1870
1871         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1872
1873         if (bp->autoneg & AUTONEG_SPEED) {
1874                 u32 adv_reg, adv1000_reg;
1875                 u32 new_adv_reg = 0;
1876                 u32 new_adv1000_reg = 0;
1877
1878                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1879                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1880                         ADVERTISE_PAUSE_ASYM);
1881
1882                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1883                 adv1000_reg &= PHY_ALL_1000_SPEED;
1884
1885                 if (bp->advertising & ADVERTISED_10baseT_Half)
1886                         new_adv_reg |= ADVERTISE_10HALF;
1887                 if (bp->advertising & ADVERTISED_10baseT_Full)
1888                         new_adv_reg |= ADVERTISE_10FULL;
1889                 if (bp->advertising & ADVERTISED_100baseT_Half)
1890                         new_adv_reg |= ADVERTISE_100HALF;
1891                 if (bp->advertising & ADVERTISED_100baseT_Full)
1892                         new_adv_reg |= ADVERTISE_100FULL;
1893                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1894                         new_adv1000_reg |= ADVERTISE_1000FULL;
1895
1896                 new_adv_reg |= ADVERTISE_CSMA;
1897
1898                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1899
1900                 if ((adv1000_reg != new_adv1000_reg) ||
1901                         (adv_reg != new_adv_reg) ||
1902                         ((bmcr & BMCR_ANENABLE) == 0)) {
1903
1904                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1905                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1906                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1907                                 BMCR_ANENABLE);
1908                 }
1909                 else if (bp->link_up) {
1910                         /* Flow ctrl may have changed from auto to forced */
1911                         /* or vice-versa. */
1912
1913                         bnx2_resolve_flow_ctrl(bp);
1914                         bnx2_set_mac_link(bp);
1915                 }
1916                 return 0;
1917         }
1918
1919         new_bmcr = 0;
1920         if (bp->req_line_speed == SPEED_100) {
1921                 new_bmcr |= BMCR_SPEED100;
1922         }
1923         if (bp->req_duplex == DUPLEX_FULL) {
1924                 new_bmcr |= BMCR_FULLDPLX;
1925         }
1926         if (new_bmcr != bmcr) {
1927                 u32 bmsr;
1928
1929                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1930                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1931
1932                 if (bmsr & BMSR_LSTATUS) {
1933                         /* Force link down */
1934                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1935                         spin_unlock_bh(&bp->phy_lock);
1936                         msleep(50);
1937                         spin_lock_bh(&bp->phy_lock);
1938
1939                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1940                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1941                 }
1942
1943                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1944
1945                 /* Normally, the new speed is setup after the link has
1946                  * gone down and up again. In some cases, link will not go
1947                  * down so we need to set up the new speed here.
1948                  */
1949                 if (bmsr & BMSR_LSTATUS) {
1950                         bp->line_speed = bp->req_line_speed;
1951                         bp->duplex = bp->req_duplex;
1952                         bnx2_resolve_flow_ctrl(bp);
1953                         bnx2_set_mac_link(bp);
1954                 }
1955         } else {
1956                 bnx2_resolve_flow_ctrl(bp);
1957                 bnx2_set_mac_link(bp);
1958         }
1959         return 0;
1960 }
1961
1962 static int
1963 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1964 {
1965         if (bp->loopback == MAC_LOOPBACK)
1966                 return 0;
1967
1968         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1969                 return (bnx2_setup_serdes_phy(bp, port));
1970         }
1971         else {
1972                 return (bnx2_setup_copper_phy(bp));
1973         }
1974 }
1975
1976 static int
1977 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
1978 {
1979         u32 val;
1980
1981         bp->mii_bmcr = MII_BMCR + 0x10;
1982         bp->mii_bmsr = MII_BMSR + 0x10;
1983         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1984         bp->mii_adv = MII_ADVERTISE + 0x10;
1985         bp->mii_lpa = MII_LPA + 0x10;
1986         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1987
1988         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1989         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1990
1991         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1992         if (reset_phy)
1993                 bnx2_reset_phy(bp);
1994
1995         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1996
1997         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1998         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1999         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2000         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2001
2002         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2003         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2004         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2005                 val |= BCM5708S_UP1_2G5;
2006         else
2007                 val &= ~BCM5708S_UP1_2G5;
2008         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2009
2010         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2011         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2012         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2013         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2014
2015         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2016
2017         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2018               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2019         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2020
2021         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2022
2023         return 0;
2024 }
2025
2026 static int
2027 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2028 {
2029         u32 val;
2030
2031         if (reset_phy)
2032                 bnx2_reset_phy(bp);
2033
2034         bp->mii_up1 = BCM5708S_UP1;
2035
2036         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2037         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2038         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2039
2040         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2041         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2042         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2043
2044         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2045         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2046         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2047
2048         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2049                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2050                 val |= BCM5708S_UP1_2G5;
2051                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2052         }
2053
2054         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2055             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2056             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2057                 /* increase tx signal amplitude */
2058                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2059                                BCM5708S_BLK_ADDR_TX_MISC);
2060                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2061                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2062                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2063                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2064         }
2065
2066         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2067               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2068
2069         if (val) {
2070                 u32 is_backplane;
2071
2072                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2073                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2074                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2075                                        BCM5708S_BLK_ADDR_TX_MISC);
2076                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2077                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2078                                        BCM5708S_BLK_ADDR_DIG);
2079                 }
2080         }
2081         return 0;
2082 }
2083
2084 static int
2085 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2086 {
2087         if (reset_phy)
2088                 bnx2_reset_phy(bp);
2089
2090         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2091
2092         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2093                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2094
2095         if (bp->dev->mtu > 1500) {
2096                 u32 val;
2097
2098                 /* Set extended packet length bit */
2099                 bnx2_write_phy(bp, 0x18, 0x7);
2100                 bnx2_read_phy(bp, 0x18, &val);
2101                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2102
2103                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2104                 bnx2_read_phy(bp, 0x1c, &val);
2105                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2106         }
2107         else {
2108                 u32 val;
2109
2110                 bnx2_write_phy(bp, 0x18, 0x7);
2111                 bnx2_read_phy(bp, 0x18, &val);
2112                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2113
2114                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2115                 bnx2_read_phy(bp, 0x1c, &val);
2116                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2117         }
2118
2119         return 0;
2120 }
2121
2122 static int
2123 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2124 {
2125         u32 val;
2126
2127         if (reset_phy)
2128                 bnx2_reset_phy(bp);
2129
2130         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2131                 bnx2_write_phy(bp, 0x18, 0x0c00);
2132                 bnx2_write_phy(bp, 0x17, 0x000a);
2133                 bnx2_write_phy(bp, 0x15, 0x310b);
2134                 bnx2_write_phy(bp, 0x17, 0x201f);
2135                 bnx2_write_phy(bp, 0x15, 0x9506);
2136                 bnx2_write_phy(bp, 0x17, 0x401f);
2137                 bnx2_write_phy(bp, 0x15, 0x14e2);
2138                 bnx2_write_phy(bp, 0x18, 0x0400);
2139         }
2140
2141         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2142                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2143                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2144                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2145                 val &= ~(1 << 8);
2146                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2147         }
2148
2149         if (bp->dev->mtu > 1500) {
2150                 /* Set extended packet length bit */
2151                 bnx2_write_phy(bp, 0x18, 0x7);
2152                 bnx2_read_phy(bp, 0x18, &val);
2153                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2154
2155                 bnx2_read_phy(bp, 0x10, &val);
2156                 bnx2_write_phy(bp, 0x10, val | 0x1);
2157         }
2158         else {
2159                 bnx2_write_phy(bp, 0x18, 0x7);
2160                 bnx2_read_phy(bp, 0x18, &val);
2161                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2162
2163                 bnx2_read_phy(bp, 0x10, &val);
2164                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2165         }
2166
2167         /* ethernet@wirespeed */
2168         bnx2_write_phy(bp, 0x18, 0x7007);
2169         bnx2_read_phy(bp, 0x18, &val);
2170         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2171         return 0;
2172 }
2173
2174
2175 static int
2176 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2177 {
2178         u32 val;
2179         int rc = 0;
2180
2181         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2182         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2183
2184         bp->mii_bmcr = MII_BMCR;
2185         bp->mii_bmsr = MII_BMSR;
2186         bp->mii_bmsr1 = MII_BMSR;
2187         bp->mii_adv = MII_ADVERTISE;
2188         bp->mii_lpa = MII_LPA;
2189
2190         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2191
2192         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2193                 goto setup_phy;
2194
2195         bnx2_read_phy(bp, MII_PHYSID1, &val);
2196         bp->phy_id = val << 16;
2197         bnx2_read_phy(bp, MII_PHYSID2, &val);
2198         bp->phy_id |= val & 0xffff;
2199
2200         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2201                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2202                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2203                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2204                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2205                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2206                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2207         }
2208         else {
2209                 rc = bnx2_init_copper_phy(bp, reset_phy);
2210         }
2211
2212 setup_phy:
2213         if (!rc)
2214                 rc = bnx2_setup_phy(bp, bp->phy_port);
2215
2216         return rc;
2217 }
2218
2219 static int
2220 bnx2_set_mac_loopback(struct bnx2 *bp)
2221 {
2222         u32 mac_mode;
2223
2224         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2225         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2226         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2227         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2228         bp->link_up = 1;
2229         return 0;
2230 }
2231
2232 static int bnx2_test_link(struct bnx2 *);
2233
2234 static int
2235 bnx2_set_phy_loopback(struct bnx2 *bp)
2236 {
2237         u32 mac_mode;
2238         int rc, i;
2239
2240         spin_lock_bh(&bp->phy_lock);
2241         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2242                             BMCR_SPEED1000);
2243         spin_unlock_bh(&bp->phy_lock);
2244         if (rc)
2245                 return rc;
2246
2247         for (i = 0; i < 10; i++) {
2248                 if (bnx2_test_link(bp) == 0)
2249                         break;
2250                 msleep(100);
2251         }
2252
2253         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2254         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2255                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2256                       BNX2_EMAC_MODE_25G_MODE);
2257
2258         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2259         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2260         bp->link_up = 1;
2261         return 0;
2262 }
2263
2264 static int
2265 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2266 {
2267         int i;
2268         u32 val;
2269
2270         bp->fw_wr_seq++;
2271         msg_data |= bp->fw_wr_seq;
2272
2273         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2274
2275         if (!ack)
2276                 return 0;
2277
2278         /* wait for an acknowledgement. */
2279         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2280                 msleep(10);
2281
2282                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2283
2284                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2285                         break;
2286         }
2287         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2288                 return 0;
2289
2290         /* If we timed out, inform the firmware that this is the case. */
2291         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2292                 if (!silent)
2293                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2294                                             "%x\n", msg_data);
2295
2296                 msg_data &= ~BNX2_DRV_MSG_CODE;
2297                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2298
2299                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2300
2301                 return -EBUSY;
2302         }
2303
2304         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2305                 return -EIO;
2306
2307         return 0;
2308 }
2309
2310 static int
2311 bnx2_init_5709_context(struct bnx2 *bp)
2312 {
2313         int i, ret = 0;
2314         u32 val;
2315
2316         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2317         val |= (BCM_PAGE_BITS - 8) << 16;
2318         REG_WR(bp, BNX2_CTX_COMMAND, val);
2319         for (i = 0; i < 10; i++) {
2320                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2321                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2322                         break;
2323                 udelay(2);
2324         }
2325         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2326                 return -EBUSY;
2327
2328         for (i = 0; i < bp->ctx_pages; i++) {
2329                 int j;
2330
2331                 if (bp->ctx_blk[i])
2332                         memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2333                 else
2334                         return -ENOMEM;
2335
2336                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2337                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2338                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2339                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2340                        (u64) bp->ctx_blk_mapping[i] >> 32);
2341                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2342                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2343                 for (j = 0; j < 10; j++) {
2344
2345                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2346                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2347                                 break;
2348                         udelay(5);
2349                 }
2350                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2351                         ret = -EBUSY;
2352                         break;
2353                 }
2354         }
2355         return ret;
2356 }
2357
2358 static void
2359 bnx2_init_context(struct bnx2 *bp)
2360 {
2361         u32 vcid;
2362
2363         vcid = 96;
2364         while (vcid) {
2365                 u32 vcid_addr, pcid_addr, offset;
2366                 int i;
2367
2368                 vcid--;
2369
2370                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2371                         u32 new_vcid;
2372
2373                         vcid_addr = GET_PCID_ADDR(vcid);
2374                         if (vcid & 0x8) {
2375                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2376                         }
2377                         else {
2378                                 new_vcid = vcid;
2379                         }
2380                         pcid_addr = GET_PCID_ADDR(new_vcid);
2381                 }
2382                 else {
2383                         vcid_addr = GET_CID_ADDR(vcid);
2384                         pcid_addr = vcid_addr;
2385                 }
2386
2387                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2388                         vcid_addr += (i << PHY_CTX_SHIFT);
2389                         pcid_addr += (i << PHY_CTX_SHIFT);
2390
2391                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2392                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2393
2394                         /* Zero out the context. */
2395                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2396                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2397                 }
2398         }
2399 }
2400
2401 static int
2402 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2403 {
2404         u16 *good_mbuf;
2405         u32 good_mbuf_cnt;
2406         u32 val;
2407
2408         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2409         if (good_mbuf == NULL) {
2410                 printk(KERN_ERR PFX "Failed to allocate memory in "
2411                                     "bnx2_alloc_bad_rbuf\n");
2412                 return -ENOMEM;
2413         }
2414
2415         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2416                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2417
2418         good_mbuf_cnt = 0;
2419
2420         /* Allocate a bunch of mbufs and save the good ones in an array. */
2421         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2422         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2423                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2424                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2425
2426                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2427
2428                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2429
2430                 /* The addresses with Bit 9 set are bad memory blocks. */
2431                 if (!(val & (1 << 9))) {
2432                         good_mbuf[good_mbuf_cnt] = (u16) val;
2433                         good_mbuf_cnt++;
2434                 }
2435
2436                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2437         }
2438
2439         /* Free the good ones back to the mbuf pool thus discarding
2440          * all the bad ones. */
2441         while (good_mbuf_cnt) {
2442                 good_mbuf_cnt--;
2443
2444                 val = good_mbuf[good_mbuf_cnt];
2445                 val = (val << 9) | val | 1;
2446
2447                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2448         }
2449         kfree(good_mbuf);
2450         return 0;
2451 }
2452
2453 static void
2454 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2455 {
2456         u32 val;
2457
2458         val = (mac_addr[0] << 8) | mac_addr[1];
2459
2460         REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2461
2462         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2463                 (mac_addr[4] << 8) | mac_addr[5];
2464
2465         REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2466 }
2467
2468 static inline int
2469 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2470 {
2471         dma_addr_t mapping;
2472         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2473         struct rx_bd *rxbd =
2474                 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2475         struct page *page = alloc_page(GFP_ATOMIC);
2476
2477         if (!page)
2478                 return -ENOMEM;
2479         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2480                                PCI_DMA_FROMDEVICE);
2481         rx_pg->page = page;
2482         pci_unmap_addr_set(rx_pg, mapping, mapping);
2483         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2484         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2485         return 0;
2486 }
2487
2488 static void
2489 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2490 {
2491         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2492         struct page *page = rx_pg->page;
2493
2494         if (!page)
2495                 return;
2496
2497         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2498                        PCI_DMA_FROMDEVICE);
2499
2500         __free_page(page);
2501         rx_pg->page = NULL;
2502 }
2503
2504 static inline int
2505 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2506 {
2507         struct sk_buff *skb;
2508         struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2509         dma_addr_t mapping;
2510         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2511         unsigned long align;
2512
2513         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2514         if (skb == NULL) {
2515                 return -ENOMEM;
2516         }
2517
2518         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2519                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2520
2521         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2522                 PCI_DMA_FROMDEVICE);
2523
2524         rx_buf->skb = skb;
2525         pci_unmap_addr_set(rx_buf, mapping, mapping);
2526
2527         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2528         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2529
2530         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2531
2532         return 0;
2533 }
2534
2535 static int
2536 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2537 {
2538         struct status_block *sblk = bnapi->status_blk.msi;
2539         u32 new_link_state, old_link_state;
2540         int is_set = 1;
2541
2542         new_link_state = sblk->status_attn_bits & event;
2543         old_link_state = sblk->status_attn_bits_ack & event;
2544         if (new_link_state != old_link_state) {
2545                 if (new_link_state)
2546                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2547                 else
2548                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2549         } else
2550                 is_set = 0;
2551
2552         return is_set;
2553 }
2554
2555 static void
2556 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2557 {
2558         spin_lock(&bp->phy_lock);
2559
2560         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2561                 bnx2_set_link(bp);
2562         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2563                 bnx2_set_remote_link(bp);
2564
2565         spin_unlock(&bp->phy_lock);
2566
2567 }
2568
2569 static inline u16
2570 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2571 {
2572         u16 cons;
2573
2574         /* Tell compiler that status block fields can change. */
2575         barrier();
2576         cons = *bnapi->hw_tx_cons_ptr;
2577         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2578                 cons++;
2579         return cons;
2580 }
2581
2582 static int
2583 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2584 {
2585         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2586         u16 hw_cons, sw_cons, sw_ring_cons;
2587         int tx_pkt = 0, index;
2588         struct netdev_queue *txq;
2589
2590         index = (bnapi - bp->bnx2_napi);
2591         txq = netdev_get_tx_queue(bp->dev, index);
2592
2593         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2594         sw_cons = txr->tx_cons;
2595
2596         while (sw_cons != hw_cons) {
2597                 struct sw_bd *tx_buf;
2598                 struct sk_buff *skb;
2599                 int i, last;
2600
2601                 sw_ring_cons = TX_RING_IDX(sw_cons);
2602
2603                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2604                 skb = tx_buf->skb;
2605
2606                 /* partial BD completions possible with TSO packets */
2607                 if (skb_is_gso(skb)) {
2608                         u16 last_idx, last_ring_idx;
2609
2610                         last_idx = sw_cons +
2611                                 skb_shinfo(skb)->nr_frags + 1;
2612                         last_ring_idx = sw_ring_cons +
2613                                 skb_shinfo(skb)->nr_frags + 1;
2614                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2615                                 last_idx++;
2616                         }
2617                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2618                                 break;
2619                         }
2620                 }
2621
2622                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2623                         skb_headlen(skb), PCI_DMA_TODEVICE);
2624
2625                 tx_buf->skb = NULL;
2626                 last = skb_shinfo(skb)->nr_frags;
2627
2628                 for (i = 0; i < last; i++) {
2629                         sw_cons = NEXT_TX_BD(sw_cons);
2630
2631                         pci_unmap_page(bp->pdev,
2632                                 pci_unmap_addr(
2633                                         &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2634                                         mapping),
2635                                 skb_shinfo(skb)->frags[i].size,
2636                                 PCI_DMA_TODEVICE);
2637                 }
2638
2639                 sw_cons = NEXT_TX_BD(sw_cons);
2640
2641                 dev_kfree_skb(skb);
2642                 tx_pkt++;
2643                 if (tx_pkt == budget)
2644                         break;
2645
2646                 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2647         }
2648
2649         txr->hw_tx_cons = hw_cons;
2650         txr->tx_cons = sw_cons;
2651
2652         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2653          * before checking for netif_tx_queue_stopped().  Without the
2654          * memory barrier, there is a small possibility that bnx2_start_xmit()
2655          * will miss it and cause the queue to be stopped forever.
2656          */
2657         smp_mb();
2658
2659         if (unlikely(netif_tx_queue_stopped(txq)) &&
2660                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2661                 __netif_tx_lock(txq, smp_processor_id());
2662                 if ((netif_tx_queue_stopped(txq)) &&
2663                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2664                         netif_tx_wake_queue(txq);
2665                 __netif_tx_unlock(txq);
2666         }
2667
2668         return tx_pkt;
2669 }
2670
2671 static void
2672 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2673                         struct sk_buff *skb, int count)
2674 {
2675         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2676         struct rx_bd *cons_bd, *prod_bd;
2677         dma_addr_t mapping;
2678         int i;
2679         u16 hw_prod = rxr->rx_pg_prod, prod;
2680         u16 cons = rxr->rx_pg_cons;
2681
2682         for (i = 0; i < count; i++) {
2683                 prod = RX_PG_RING_IDX(hw_prod);
2684
2685                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2686                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2687                 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2688                 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2689
2690                 if (i == 0 && skb) {
2691                         struct page *page;
2692                         struct skb_shared_info *shinfo;
2693
2694                         shinfo = skb_shinfo(skb);
2695                         shinfo->nr_frags--;
2696                         page = shinfo->frags[shinfo->nr_frags].page;
2697                         shinfo->frags[shinfo->nr_frags].page = NULL;
2698                         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2699                                                PCI_DMA_FROMDEVICE);
2700                         cons_rx_pg->page = page;
2701                         pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2702                         dev_kfree_skb(skb);
2703                 }
2704                 if (prod != cons) {
2705                         prod_rx_pg->page = cons_rx_pg->page;
2706                         cons_rx_pg->page = NULL;
2707                         pci_unmap_addr_set(prod_rx_pg, mapping,
2708                                 pci_unmap_addr(cons_rx_pg, mapping));
2709
2710                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2711                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2712
2713                 }
2714                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2715                 hw_prod = NEXT_RX_BD(hw_prod);
2716         }
2717         rxr->rx_pg_prod = hw_prod;
2718         rxr->rx_pg_cons = cons;
2719 }
2720
2721 static inline void
2722 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2723                   struct sk_buff *skb, u16 cons, u16 prod)
2724 {
2725         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2726         struct rx_bd *cons_bd, *prod_bd;
2727
2728         cons_rx_buf = &rxr->rx_buf_ring[cons];
2729         prod_rx_buf = &rxr->rx_buf_ring[prod];
2730
2731         pci_dma_sync_single_for_device(bp->pdev,
2732                 pci_unmap_addr(cons_rx_buf, mapping),
2733                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2734
2735         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2736
2737         prod_rx_buf->skb = skb;
2738
2739         if (cons == prod)
2740                 return;
2741
2742         pci_unmap_addr_set(prod_rx_buf, mapping,
2743                         pci_unmap_addr(cons_rx_buf, mapping));
2744
2745         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2746         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2747         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2748         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2749 }
2750
2751 static int
2752 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2753             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2754             u32 ring_idx)
2755 {
2756         int err;
2757         u16 prod = ring_idx & 0xffff;
2758
2759         err = bnx2_alloc_rx_skb(bp, rxr, prod);
2760         if (unlikely(err)) {
2761                 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2762                 if (hdr_len) {
2763                         unsigned int raw_len = len + 4;
2764                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2765
2766                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2767                 }
2768                 return err;
2769         }
2770
2771         skb_reserve(skb, BNX2_RX_OFFSET);
2772         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2773                          PCI_DMA_FROMDEVICE);
2774
2775         if (hdr_len == 0) {
2776                 skb_put(skb, len);
2777                 return 0;
2778         } else {
2779                 unsigned int i, frag_len, frag_size, pages;
2780                 struct sw_pg *rx_pg;
2781                 u16 pg_cons = rxr->rx_pg_cons;
2782                 u16 pg_prod = rxr->rx_pg_prod;
2783
2784                 frag_size = len + 4 - hdr_len;
2785                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2786                 skb_put(skb, hdr_len);
2787
2788                 for (i = 0; i < pages; i++) {
2789                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2790                         if (unlikely(frag_len <= 4)) {
2791                                 unsigned int tail = 4 - frag_len;
2792
2793                                 rxr->rx_pg_cons = pg_cons;
2794                                 rxr->rx_pg_prod = pg_prod;
2795                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
2796                                                         pages - i);
2797                                 skb->len -= tail;
2798                                 if (i == 0) {
2799                                         skb->tail -= tail;
2800                                 } else {
2801                                         skb_frag_t *frag =
2802                                                 &skb_shinfo(skb)->frags[i - 1];
2803                                         frag->size -= tail;
2804                                         skb->data_len -= tail;
2805                                         skb->truesize -= tail;
2806                                 }
2807                                 return 0;
2808                         }
2809                         rx_pg = &rxr->rx_pg_ring[pg_cons];
2810
2811                         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2812                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2813
2814                         if (i == pages - 1)
2815                                 frag_len -= 4;
2816
2817                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2818                         rx_pg->page = NULL;
2819
2820                         err = bnx2_alloc_rx_page(bp, rxr,
2821                                                  RX_PG_RING_IDX(pg_prod));
2822                         if (unlikely(err)) {
2823                                 rxr->rx_pg_cons = pg_cons;
2824                                 rxr->rx_pg_prod = pg_prod;
2825                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
2826                                                         pages - i);
2827                                 return err;
2828                         }
2829
2830                         frag_size -= frag_len;
2831                         skb->data_len += frag_len;
2832                         skb->truesize += frag_len;
2833                         skb->len += frag_len;
2834
2835                         pg_prod = NEXT_RX_BD(pg_prod);
2836                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2837                 }
2838                 rxr->rx_pg_prod = pg_prod;
2839                 rxr->rx_pg_cons = pg_cons;
2840         }
2841         return 0;
2842 }
2843
2844 static inline u16
2845 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2846 {
2847         u16 cons;
2848
2849         /* Tell compiler that status block fields can change. */
2850         barrier();
2851         cons = *bnapi->hw_rx_cons_ptr;
2852         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2853                 cons++;
2854         return cons;
2855 }
2856
2857 static int
2858 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2859 {
2860         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
2861         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2862         struct l2_fhdr *rx_hdr;
2863         int rx_pkt = 0, pg_ring_used = 0;
2864
2865         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2866         sw_cons = rxr->rx_cons;
2867         sw_prod = rxr->rx_prod;
2868
2869         /* Memory barrier necessary as speculative reads of the rx
2870          * buffer can be ahead of the index in the status block
2871          */
2872         rmb();
2873         while (sw_cons != hw_cons) {
2874                 unsigned int len, hdr_len;
2875                 u32 status;
2876                 struct sw_bd *rx_buf;
2877                 struct sk_buff *skb;
2878                 dma_addr_t dma_addr;
2879
2880                 sw_ring_cons = RX_RING_IDX(sw_cons);
2881                 sw_ring_prod = RX_RING_IDX(sw_prod);
2882
2883                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
2884                 skb = rx_buf->skb;
2885
2886                 rx_buf->skb = NULL;
2887
2888                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2889
2890                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2891                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
2892                         PCI_DMA_FROMDEVICE);
2893
2894                 rx_hdr = (struct l2_fhdr *) skb->data;
2895                 len = rx_hdr->l2_fhdr_pkt_len;
2896
2897                 if ((status = rx_hdr->l2_fhdr_status) &
2898                         (L2_FHDR_ERRORS_BAD_CRC |
2899                         L2_FHDR_ERRORS_PHY_DECODE |
2900                         L2_FHDR_ERRORS_ALIGNMENT |
2901                         L2_FHDR_ERRORS_TOO_SHORT |
2902                         L2_FHDR_ERRORS_GIANT_FRAME)) {
2903
2904                         bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2905                                           sw_ring_prod);
2906                         goto next_rx;
2907                 }
2908                 hdr_len = 0;
2909                 if (status & L2_FHDR_STATUS_SPLIT) {
2910                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2911                         pg_ring_used = 1;
2912                 } else if (len > bp->rx_jumbo_thresh) {
2913                         hdr_len = bp->rx_jumbo_thresh;
2914                         pg_ring_used = 1;
2915                 }
2916
2917                 len -= 4;
2918
2919                 if (len <= bp->rx_copy_thresh) {
2920                         struct sk_buff *new_skb;
2921
2922                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
2923                         if (new_skb == NULL) {
2924                                 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2925                                                   sw_ring_prod);
2926                                 goto next_rx;
2927                         }
2928
2929                         /* aligned copy */
2930                         skb_copy_from_linear_data_offset(skb,
2931                                                          BNX2_RX_OFFSET - 2,
2932                                       new_skb->data, len + 2);
2933                         skb_reserve(new_skb, 2);
2934                         skb_put(new_skb, len);
2935
2936                         bnx2_reuse_rx_skb(bp, rxr, skb,
2937                                 sw_ring_cons, sw_ring_prod);
2938
2939                         skb = new_skb;
2940                 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
2941                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2942                         goto next_rx;
2943
2944                 skb->protocol = eth_type_trans(skb, bp->dev);
2945
2946                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2947                         (ntohs(skb->protocol) != 0x8100)) {
2948
2949                         dev_kfree_skb(skb);
2950                         goto next_rx;
2951
2952                 }
2953
2954                 skb->ip_summed = CHECKSUM_NONE;
2955                 if (bp->rx_csum &&
2956                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2957                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
2958
2959                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2960                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2961                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2962                 }
2963
2964 #ifdef BCM_VLAN
2965                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && bp->vlgrp) {
2966                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2967                                 rx_hdr->l2_fhdr_vlan_tag);
2968                 }
2969                 else
2970 #endif
2971                         netif_receive_skb(skb);
2972
2973                 bp->dev->last_rx = jiffies;
2974                 rx_pkt++;
2975
2976 next_rx:
2977                 sw_cons = NEXT_RX_BD(sw_cons);
2978                 sw_prod = NEXT_RX_BD(sw_prod);
2979
2980                 if ((rx_pkt == budget))
2981                         break;
2982
2983                 /* Refresh hw_cons to see if there is new work */
2984                 if (sw_cons == hw_cons) {
2985                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2986                         rmb();
2987                 }
2988         }
2989         rxr->rx_cons = sw_cons;
2990         rxr->rx_prod = sw_prod;
2991
2992         if (pg_ring_used)
2993                 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
2994
2995         REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
2996
2997         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
2998
2999         mmiowb();
3000
3001         return rx_pkt;
3002
3003 }
3004
3005 /* MSI ISR - The only difference between this and the INTx ISR
3006  * is that the MSI interrupt is always serviced.
3007  */
3008 static irqreturn_t
3009 bnx2_msi(int irq, void *dev_instance)
3010 {
3011         struct bnx2_napi *bnapi = dev_instance;
3012         struct bnx2 *bp = bnapi->bp;
3013         struct net_device *dev = bp->dev;
3014
3015         prefetch(bnapi->status_blk.msi);
3016         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3017                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3018                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3019
3020         /* Return here if interrupt is disabled. */
3021         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3022                 return IRQ_HANDLED;
3023
3024         netif_rx_schedule(dev, &bnapi->napi);
3025
3026         return IRQ_HANDLED;
3027 }
3028
3029 static irqreturn_t
3030 bnx2_msi_1shot(int irq, void *dev_instance)
3031 {
3032         struct bnx2_napi *bnapi = dev_instance;
3033         struct bnx2 *bp = bnapi->bp;
3034         struct net_device *dev = bp->dev;
3035
3036         prefetch(bnapi->status_blk.msi);
3037
3038         /* Return here if interrupt is disabled. */
3039         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3040                 return IRQ_HANDLED;
3041
3042         netif_rx_schedule(dev, &bnapi->napi);
3043
3044         return IRQ_HANDLED;
3045 }
3046
3047 static irqreturn_t
3048 bnx2_interrupt(int irq, void *dev_instance)
3049 {
3050         struct bnx2_napi *bnapi = dev_instance;
3051         struct bnx2 *bp = bnapi->bp;
3052         struct net_device *dev = bp->dev;
3053         struct status_block *sblk = bnapi->status_blk.msi;
3054
3055         /* When using INTx, it is possible for the interrupt to arrive
3056          * at the CPU before the status block posted prior to the
3057          * interrupt. Reading a register will flush the status block.
3058          * When using MSI, the MSI message will always complete after
3059          * the status block write.
3060          */
3061         if ((sblk->status_idx == bnapi->last_status_idx) &&
3062             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3063              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3064                 return IRQ_NONE;
3065
3066         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3067                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3068                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3069
3070         /* Read back to deassert IRQ immediately to avoid too many
3071          * spurious interrupts.
3072          */
3073         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3074
3075         /* Return here if interrupt is shared and is disabled. */
3076         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3077                 return IRQ_HANDLED;
3078
3079         if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
3080                 bnapi->last_status_idx = sblk->status_idx;
3081                 __netif_rx_schedule(dev, &bnapi->napi);
3082         }
3083
3084         return IRQ_HANDLED;
3085 }
3086
3087 static inline int
3088 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3089 {
3090         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3091         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3092
3093         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3094             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3095                 return 1;
3096         return 0;
3097 }
3098
3099 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3100                                  STATUS_ATTN_BITS_TIMER_ABORT)
3101
3102 static inline int
3103 bnx2_has_work(struct bnx2_napi *bnapi)
3104 {
3105         struct status_block *sblk = bnapi->status_blk.msi;
3106
3107         if (bnx2_has_fast_work(bnapi))
3108                 return 1;
3109
3110         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3111             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3112                 return 1;
3113
3114         return 0;
3115 }
3116
3117 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3118 {
3119         struct status_block *sblk = bnapi->status_blk.msi;
3120         u32 status_attn_bits = sblk->status_attn_bits;
3121         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3122
3123         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3124             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3125
3126                 bnx2_phy_int(bp, bnapi);
3127
3128                 /* This is needed to take care of transient status
3129                  * during link changes.
3130                  */
3131                 REG_WR(bp, BNX2_HC_COMMAND,
3132                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3133                 REG_RD(bp, BNX2_HC_COMMAND);
3134         }
3135 }
3136
3137 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3138                           int work_done, int budget)
3139 {
3140         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3141         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3142
3143         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3144                 bnx2_tx_int(bp, bnapi, 0);
3145
3146         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3147                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3148
3149         return work_done;
3150 }
3151
3152 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3153 {
3154         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3155         struct bnx2 *bp = bnapi->bp;
3156         int work_done = 0;
3157         struct status_block_msix *sblk = bnapi->status_blk.msix;
3158
3159         while (1) {
3160                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3161                 if (unlikely(work_done >= budget))
3162                         break;
3163
3164                 bnapi->last_status_idx = sblk->status_idx;
3165                 /* status idx must be read before checking for more work. */
3166                 rmb();
3167                 if (likely(!bnx2_has_fast_work(bnapi))) {
3168
3169                         netif_rx_complete(bp->dev, napi);
3170                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3171                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3172                                bnapi->last_status_idx);
3173                         break;
3174                 }
3175         }
3176         return work_done;
3177 }
3178
3179 static int bnx2_poll(struct napi_struct *napi, int budget)
3180 {
3181         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3182         struct bnx2 *bp = bnapi->bp;
3183         int work_done = 0;
3184         struct status_block *sblk = bnapi->status_blk.msi;
3185
3186         while (1) {
3187                 bnx2_poll_link(bp, bnapi);
3188
3189                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3190
3191                 if (unlikely(work_done >= budget))
3192                         break;
3193
3194                 /* bnapi->last_status_idx is used below to tell the hw how
3195                  * much work has been processed, so we must read it before
3196                  * checking for more work.
3197                  */
3198                 bnapi->last_status_idx = sblk->status_idx;
3199                 rmb();
3200                 if (likely(!bnx2_has_work(bnapi))) {
3201                         netif_rx_complete(bp->dev, napi);
3202                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3203                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3204                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3205                                        bnapi->last_status_idx);
3206                                 break;
3207                         }
3208                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3209                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3210                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3211                                bnapi->last_status_idx);
3212
3213                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3214                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3215                                bnapi->last_status_idx);
3216                         break;
3217                 }
3218         }
3219
3220         return work_done;
3221 }
3222
3223 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3224  * from set_multicast.
3225  */
3226 static void
3227 bnx2_set_rx_mode(struct net_device *dev)
3228 {
3229         struct bnx2 *bp = netdev_priv(dev);
3230         u32 rx_mode, sort_mode;
3231         struct dev_addr_list *uc_ptr;
3232         int i;
3233
3234         spin_lock_bh(&bp->phy_lock);
3235
3236         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3237                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3238         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3239 #ifdef BCM_VLAN
3240         if (!bp->vlgrp && !(bp->flags & BNX2_FLAG_ASF_ENABLE))
3241                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3242 #else
3243         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
3244                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3245 #endif
3246         if (dev->flags & IFF_PROMISC) {
3247                 /* Promiscuous mode. */
3248                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3249                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3250                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3251         }
3252         else if (dev->flags & IFF_ALLMULTI) {
3253                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3254                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3255                                0xffffffff);
3256                 }
3257                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3258         }
3259         else {
3260                 /* Accept one or more multicast(s). */
3261                 struct dev_mc_list *mclist;
3262                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3263                 u32 regidx;
3264                 u32 bit;
3265                 u32 crc;
3266
3267                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3268
3269                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3270                      i++, mclist = mclist->next) {
3271
3272                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3273                         bit = crc & 0xff;
3274                         regidx = (bit & 0xe0) >> 5;
3275                         bit &= 0x1f;
3276                         mc_filter[regidx] |= (1 << bit);
3277                 }
3278
3279                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3280                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3281                                mc_filter[i]);
3282                 }
3283
3284                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3285         }
3286
3287         uc_ptr = NULL;
3288         if (dev->uc_count > BNX2_MAX_UNICAST_ADDRESSES) {
3289                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3290                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3291                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3292         } else if (!(dev->flags & IFF_PROMISC)) {
3293                 uc_ptr = dev->uc_list;
3294
3295                 /* Add all entries into to the match filter list */
3296                 for (i = 0; i < dev->uc_count; i++) {
3297                         bnx2_set_mac_addr(bp, uc_ptr->da_addr,
3298                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3299                         sort_mode |= (1 <<
3300                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3301                         uc_ptr = uc_ptr->next;
3302                 }
3303
3304         }
3305
3306         if (rx_mode != bp->rx_mode) {
3307                 bp->rx_mode = rx_mode;
3308                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3309         }
3310
3311         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3312         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3313         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3314
3315         spin_unlock_bh(&bp->phy_lock);
3316 }
3317
3318 static void
3319 load_rv2p_fw(struct bnx2 *bp, __le32 *rv2p_code, u32 rv2p_code_len,
3320         u32 rv2p_proc)
3321 {
3322         int i;
3323         u32 val;
3324
3325         if (rv2p_proc == RV2P_PROC2 && CHIP_NUM(bp) == CHIP_NUM_5709) {
3326                 val = le32_to_cpu(rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC]);
3327                 val &= ~XI_RV2P_PROC2_BD_PAGE_SIZE_MSK;
3328                 val |= XI_RV2P_PROC2_BD_PAGE_SIZE;
3329                 rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC] = cpu_to_le32(val);
3330         }
3331
3332         for (i = 0; i < rv2p_code_len; i += 8) {
3333                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, le32_to_cpu(*rv2p_code));
3334                 rv2p_code++;
3335                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, le32_to_cpu(*rv2p_code));
3336                 rv2p_code++;
3337
3338                 if (rv2p_proc == RV2P_PROC1) {
3339                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3340                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3341                 }
3342                 else {
3343                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3344                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3345                 }
3346         }
3347
3348         /* Reset the processor, un-stall is done later. */
3349         if (rv2p_proc == RV2P_PROC1) {
3350                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3351         }
3352         else {
3353                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3354         }
3355 }
3356
3357 static int
3358 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg, struct fw_info *fw)
3359 {
3360         u32 offset;
3361         u32 val;
3362         int rc;
3363
3364         /* Halt the CPU. */
3365         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3366         val |= cpu_reg->mode_value_halt;
3367         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3368         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3369
3370         /* Load the Text area. */
3371         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3372         if (fw->gz_text) {
3373                 int j;
3374
3375                 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3376                                        fw->gz_text_len);
3377                 if (rc < 0)
3378                         return rc;
3379
3380                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3381                         bnx2_reg_wr_ind(bp, offset, le32_to_cpu(fw->text[j]));
3382                 }
3383         }
3384
3385         /* Load the Data area. */
3386         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3387         if (fw->data) {
3388                 int j;
3389
3390                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3391                         bnx2_reg_wr_ind(bp, offset, fw->data[j]);
3392                 }
3393         }
3394
3395         /* Load the SBSS area. */
3396         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3397         if (fw->sbss_len) {
3398                 int j;
3399
3400                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3401                         bnx2_reg_wr_ind(bp, offset, 0);
3402                 }
3403         }
3404
3405         /* Load the BSS area. */
3406         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3407         if (fw->bss_len) {
3408                 int j;
3409
3410                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3411                         bnx2_reg_wr_ind(bp, offset, 0);
3412                 }
3413         }
3414
3415         /* Load the Read-Only area. */
3416         offset = cpu_reg->spad_base +
3417                 (fw->rodata_addr - cpu_reg->mips_view_base);
3418         if (fw->rodata) {
3419                 int j;
3420
3421                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3422                         bnx2_reg_wr_ind(bp, offset, fw->rodata[j]);
3423                 }
3424         }
3425
3426         /* Clear the pre-fetch instruction. */
3427         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3428         bnx2_reg_wr_ind(bp, cpu_reg->pc, fw->start_addr);
3429
3430         /* Start the CPU. */
3431         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3432         val &= ~cpu_reg->mode_value_halt;
3433         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3434         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3435
3436         return 0;
3437 }
3438
3439 static int
3440 bnx2_init_cpus(struct bnx2 *bp)
3441 {
3442         struct fw_info *fw;
3443         int rc, rv2p_len;
3444         void *text, *rv2p;
3445
3446         /* Initialize the RV2P processor. */
3447         text = vmalloc(FW_BUF_SIZE);
3448         if (!text)
3449                 return -ENOMEM;
3450         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3451                 rv2p = bnx2_xi_rv2p_proc1;
3452                 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3453         } else {
3454                 rv2p = bnx2_rv2p_proc1;
3455                 rv2p_len = sizeof(bnx2_rv2p_proc1);
3456         }
3457         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3458         if (rc < 0)
3459                 goto init_cpu_err;
3460
3461         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3462
3463         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3464                 rv2p = bnx2_xi_rv2p_proc2;
3465                 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3466         } else {
3467                 rv2p = bnx2_rv2p_proc2;
3468                 rv2p_len = sizeof(bnx2_rv2p_proc2);
3469         }
3470         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3471         if (rc < 0)
3472                 goto init_cpu_err;
3473
3474         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3475
3476         /* Initialize the RX Processor. */
3477         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3478                 fw = &bnx2_rxp_fw_09;
3479         else
3480                 fw = &bnx2_rxp_fw_06;
3481
3482         fw->text = text;
3483         rc = load_cpu_fw(bp, &cpu_reg_rxp, fw);
3484         if (rc)
3485                 goto init_cpu_err;
3486
3487         /* Initialize the TX Processor. */
3488         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3489                 fw = &bnx2_txp_fw_09;
3490         else
3491                 fw = &bnx2_txp_fw_06;
3492
3493         fw->text = text;
3494         rc = load_cpu_fw(bp, &cpu_reg_txp, fw);
3495         if (rc)
3496                 goto init_cpu_err;
3497
3498         /* Initialize the TX Patch-up Processor. */
3499         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3500                 fw = &bnx2_tpat_fw_09;
3501         else
3502                 fw = &bnx2_tpat_fw_06;
3503
3504         fw->text = text;
3505         rc = load_cpu_fw(bp, &cpu_reg_tpat, fw);
3506         if (rc)
3507                 goto init_cpu_err;
3508
3509         /* Initialize the Completion Processor. */
3510         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3511                 fw = &bnx2_com_fw_09;
3512         else
3513                 fw = &bnx2_com_fw_06;
3514
3515         fw->text = text;
3516         rc = load_cpu_fw(bp, &cpu_reg_com, fw);
3517         if (rc)
3518                 goto init_cpu_err;
3519
3520         /* Initialize the Command Processor. */
3521         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3522                 fw = &bnx2_cp_fw_09;
3523         else
3524                 fw = &bnx2_cp_fw_06;
3525
3526         fw->text = text;
3527         rc = load_cpu_fw(bp, &cpu_reg_cp, fw);
3528
3529 init_cpu_err:
3530         vfree(text);
3531         return rc;
3532 }
3533
3534 static int
3535 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3536 {
3537         u16 pmcsr;
3538
3539         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3540
3541         switch (state) {
3542         case PCI_D0: {
3543                 u32 val;
3544
3545                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3546                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3547                         PCI_PM_CTRL_PME_STATUS);
3548
3549                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3550                         /* delay required during transition out of D3hot */
3551                         msleep(20);
3552
3553                 val = REG_RD(bp, BNX2_EMAC_MODE);
3554                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3555                 val &= ~BNX2_EMAC_MODE_MPKT;
3556                 REG_WR(bp, BNX2_EMAC_MODE, val);
3557
3558                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3559                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3560                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3561                 break;
3562         }
3563         case PCI_D3hot: {
3564                 int i;
3565                 u32 val, wol_msg;
3566
3567                 if (bp->wol) {
3568                         u32 advertising;
3569                         u8 autoneg;
3570
3571                         autoneg = bp->autoneg;
3572                         advertising = bp->advertising;
3573
3574                         if (bp->phy_port == PORT_TP) {
3575                                 bp->autoneg = AUTONEG_SPEED;
3576                                 bp->advertising = ADVERTISED_10baseT_Half |
3577                                         ADVERTISED_10baseT_Full |
3578                                         ADVERTISED_100baseT_Half |
3579                                         ADVERTISED_100baseT_Full |
3580                                         ADVERTISED_Autoneg;
3581                         }
3582
3583                         spin_lock_bh(&bp->phy_lock);
3584                         bnx2_setup_phy(bp, bp->phy_port);
3585                         spin_unlock_bh(&bp->phy_lock);
3586
3587                         bp->autoneg = autoneg;
3588                         bp->advertising = advertising;
3589
3590                         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3591
3592                         val = REG_RD(bp, BNX2_EMAC_MODE);
3593
3594                         /* Enable port mode. */
3595                         val &= ~BNX2_EMAC_MODE_PORT;
3596                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3597                                BNX2_EMAC_MODE_ACPI_RCVD |
3598                                BNX2_EMAC_MODE_MPKT;
3599                         if (bp->phy_port == PORT_TP)
3600                                 val |= BNX2_EMAC_MODE_PORT_MII;
3601                         else {
3602                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3603                                 if (bp->line_speed == SPEED_2500)
3604                                         val |= BNX2_EMAC_MODE_25G_MODE;
3605                         }
3606
3607                         REG_WR(bp, BNX2_EMAC_MODE, val);
3608
3609                         /* receive all multicast */
3610                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3611                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3612                                        0xffffffff);
3613                         }
3614                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3615                                BNX2_EMAC_RX_MODE_SORT_MODE);
3616
3617                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3618                               BNX2_RPM_SORT_USER0_MC_EN;
3619                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3620                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3621                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3622                                BNX2_RPM_SORT_USER0_ENA);
3623
3624                         /* Need to enable EMAC and RPM for WOL. */
3625                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3626                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3627                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3628                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3629
3630                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3631                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3632                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3633
3634                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3635                 }
3636                 else {
3637                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3638                 }
3639
3640                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3641                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3642                                      1, 0);
3643
3644                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3645                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3646                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3647
3648                         if (bp->wol)
3649                                 pmcsr |= 3;
3650                 }
3651                 else {
3652                         pmcsr |= 3;
3653                 }
3654                 if (bp->wol) {
3655                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3656                 }
3657                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3658                                       pmcsr);
3659
3660                 /* No more memory access after this point until
3661                  * device is brought back to D0.
3662                  */
3663                 udelay(50);
3664                 break;
3665         }
3666         default:
3667                 return -EINVAL;
3668         }
3669         return 0;
3670 }
3671
3672 static int
3673 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3674 {
3675         u32 val;
3676         int j;
3677
3678         /* Request access to the flash interface. */
3679         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3680         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3681                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3682                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3683                         break;
3684
3685                 udelay(5);
3686         }
3687
3688         if (j >= NVRAM_TIMEOUT_COUNT)
3689                 return -EBUSY;
3690
3691         return 0;
3692 }
3693
3694 static int
3695 bnx2_release_nvram_lock(struct bnx2 *bp)
3696 {
3697         int j;
3698         u32 val;
3699
3700         /* Relinquish nvram interface. */
3701         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3702
3703         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3704                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3705                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3706                         break;
3707
3708                 udelay(5);
3709         }
3710
3711         if (j >= NVRAM_TIMEOUT_COUNT)
3712                 return -EBUSY;
3713
3714         return 0;
3715 }
3716
3717
3718 static int
3719 bnx2_enable_nvram_write(struct bnx2 *bp)
3720 {
3721         u32 val;
3722
3723         val = REG_RD(bp, BNX2_MISC_CFG);
3724         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3725
3726         if (bp->flash_info->flags & BNX2_NV_WREN) {
3727                 int j;
3728
3729                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3730                 REG_WR(bp, BNX2_NVM_COMMAND,
3731                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3732
3733                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3734                         udelay(5);
3735
3736                         val = REG_RD(bp, BNX2_NVM_COMMAND);
3737                         if (val & BNX2_NVM_COMMAND_DONE)
3738                                 break;
3739                 }
3740
3741                 if (j >= NVRAM_TIMEOUT_COUNT)
3742                         return -EBUSY;
3743         }
3744         return 0;
3745 }
3746
3747 static void
3748 bnx2_disable_nvram_write(struct bnx2 *bp)
3749 {
3750         u32 val;
3751
3752         val = REG_RD(bp, BNX2_MISC_CFG);
3753         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3754 }
3755
3756
3757 static void
3758 bnx2_enable_nvram_access(struct bnx2 *bp)
3759 {
3760         u32 val;
3761
3762         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3763         /* Enable both bits, even on read. */
3764         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3765                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3766 }
3767
3768 static void
3769 bnx2_disable_nvram_access(struct bnx2 *bp)
3770 {
3771         u32 val;
3772
3773         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3774         /* Disable both bits, even after read. */
3775         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3776                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3777                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3778 }
3779
3780 static int
3781 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3782 {
3783         u32 cmd;
3784         int j;
3785
3786         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3787                 /* Buffered flash, no erase needed */
3788                 return 0;
3789
3790         /* Build an erase command */
3791         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3792               BNX2_NVM_COMMAND_DOIT;
3793
3794         /* Need to clear DONE bit separately. */
3795         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3796
3797         /* Address of the NVRAM to read from. */
3798         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3799
3800         /* Issue an erase command. */
3801         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3802
3803         /* Wait for completion. */
3804         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3805                 u32 val;
3806
3807                 udelay(5);
3808
3809                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3810                 if (val & BNX2_NVM_COMMAND_DONE)
3811                         break;
3812         }
3813
3814         if (j >= NVRAM_TIMEOUT_COUNT)
3815                 return -EBUSY;
3816
3817         return 0;
3818 }
3819
3820 static int
3821 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3822 {
3823         u32 cmd;
3824         int j;
3825
3826         /* Build the command word. */
3827         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3828
3829         /* Calculate an offset of a buffered flash, not needed for 5709. */
3830         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3831                 offset = ((offset / bp->flash_info->page_size) <<
3832                            bp->flash_info->page_bits) +
3833                           (offset % bp->flash_info->page_size);
3834         }
3835
3836         /* Need to clear DONE bit separately. */
3837         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3838
3839         /* Address of the NVRAM to read from. */
3840         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3841
3842         /* Issue a read command. */
3843         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3844
3845         /* Wait for completion. */
3846         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3847                 u32 val;
3848
3849                 udelay(5);
3850
3851                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3852                 if (val & BNX2_NVM_COMMAND_DONE) {
3853                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
3854                         memcpy(ret_val, &v, 4);
3855                         break;
3856                 }
3857         }
3858         if (j >= NVRAM_TIMEOUT_COUNT)
3859                 return -EBUSY;
3860
3861         return 0;
3862 }
3863
3864
3865 static int
3866 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3867 {
3868         u32 cmd;
3869         __be32 val32;
3870         int j;
3871
3872         /* Build the command word. */
3873         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3874
3875         /* Calculate an offset of a buffered flash, not needed for 5709. */
3876         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3877                 offset = ((offset / bp->flash_info->page_size) <<
3878                           bp->flash_info->page_bits) +
3879                          (offset % bp->flash_info->page_size);
3880         }
3881
3882         /* Need to clear DONE bit separately. */
3883         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3884
3885         memcpy(&val32, val, 4);
3886
3887         /* Write the data. */
3888         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
3889
3890         /* Address of the NVRAM to write to. */
3891         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3892
3893         /* Issue the write command. */
3894         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3895
3896         /* Wait for completion. */
3897         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3898                 udelay(5);
3899
3900                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3901                         break;
3902         }
3903         if (j >= NVRAM_TIMEOUT_COUNT)
3904                 return -EBUSY;
3905
3906         return 0;
3907 }
3908
3909 static int
3910 bnx2_init_nvram(struct bnx2 *bp)
3911 {
3912         u32 val;
3913         int j, entry_count, rc = 0;
3914         struct flash_spec *flash;
3915
3916         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3917                 bp->flash_info = &flash_5709;
3918                 goto get_flash_size;
3919         }
3920
3921         /* Determine the selected interface. */
3922         val = REG_RD(bp, BNX2_NVM_CFG1);
3923
3924         entry_count = ARRAY_SIZE(flash_table);
3925
3926         if (val & 0x40000000) {
3927
3928                 /* Flash interface has been reconfigured */
3929                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3930                      j++, flash++) {
3931                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
3932                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3933                                 bp->flash_info = flash;
3934                                 break;
3935                         }
3936                 }
3937         }
3938         else {
3939                 u32 mask;
3940                 /* Not yet been reconfigured */
3941
3942                 if (val & (1 << 23))
3943                         mask = FLASH_BACKUP_STRAP_MASK;
3944                 else
3945                         mask = FLASH_STRAP_MASK;
3946
3947                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3948                         j++, flash++) {
3949
3950                         if ((val & mask) == (flash->strapping & mask)) {
3951                                 bp->flash_info = flash;
3952
3953                                 /* Request access to the flash interface. */
3954                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3955                                         return rc;
3956
3957                                 /* Enable access to flash interface */
3958                                 bnx2_enable_nvram_access(bp);
3959
3960                                 /* Reconfigure the flash interface */
3961                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3962                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3963                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3964                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3965
3966                                 /* Disable access to flash interface */
3967                                 bnx2_disable_nvram_access(bp);
3968                                 bnx2_release_nvram_lock(bp);
3969
3970                                 break;
3971                         }
3972                 }
3973         } /* if (val & 0x40000000) */
3974
3975         if (j == entry_count) {
3976                 bp->flash_info = NULL;
3977                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3978                 return -ENODEV;
3979         }
3980
3981 get_flash_size:
3982         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
3983         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3984         if (val)
3985                 bp->flash_size = val;
3986         else
3987                 bp->flash_size = bp->flash_info->total_size;
3988
3989         return rc;
3990 }
3991
3992 static int
3993 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3994                 int buf_size)
3995 {
3996         int rc = 0;
3997         u32 cmd_flags, offset32, len32, extra;
3998
3999         if (buf_size == 0)
4000                 return 0;
4001
4002         /* Request access to the flash interface. */
4003         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4004                 return rc;
4005
4006         /* Enable access to flash interface */
4007         bnx2_enable_nvram_access(bp);
4008
4009         len32 = buf_size;
4010         offset32 = offset;
4011         extra = 0;
4012
4013         cmd_flags = 0;
4014
4015         if (offset32 & 3) {
4016                 u8 buf[4];
4017                 u32 pre_len;
4018
4019                 offset32 &= ~3;
4020                 pre_len = 4 - (offset & 3);
4021
4022                 if (pre_len >= len32) {
4023                         pre_len = len32;
4024                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4025                                     BNX2_NVM_COMMAND_LAST;
4026                 }
4027                 else {
4028                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4029                 }
4030
4031                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4032
4033                 if (rc)
4034                         return rc;
4035
4036                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4037
4038                 offset32 += 4;
4039                 ret_buf += pre_len;
4040                 len32 -= pre_len;
4041         }
4042         if (len32 & 3) {
4043                 extra = 4 - (len32 & 3);
4044                 len32 = (len32 + 4) & ~3;
4045         }
4046
4047         if (len32 == 4) {
4048                 u8 buf[4];
4049
4050                 if (cmd_flags)
4051                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4052                 else
4053                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4054                                     BNX2_NVM_COMMAND_LAST;
4055
4056                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4057
4058                 memcpy(ret_buf, buf, 4 - extra);
4059         }
4060         else if (len32 > 0) {
4061                 u8 buf[4];
4062
4063                 /* Read the first word. */
4064                 if (cmd_flags)
4065                         cmd_flags = 0;
4066                 else
4067                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4068
4069                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4070
4071                 /* Advance to the next dword. */
4072                 offset32 += 4;
4073                 ret_buf += 4;
4074                 len32 -= 4;
4075
4076                 while (len32 > 4 && rc == 0) {
4077                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4078
4079                         /* Advance to the next dword. */
4080                         offset32 += 4;
4081                         ret_buf += 4;
4082                         len32 -= 4;
4083                 }
4084
4085                 if (rc)
4086                         return rc;
4087
4088                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4089                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4090
4091                 memcpy(ret_buf, buf, 4 - extra);
4092         }
4093
4094         /* Disable access to flash interface */
4095         bnx2_disable_nvram_access(bp);
4096
4097         bnx2_release_nvram_lock(bp);
4098
4099         return rc;
4100 }
4101
4102 static int
4103 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4104                 int buf_size)
4105 {
4106         u32 written, offset32, len32;
4107         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4108         int rc = 0;
4109         int align_start, align_end;
4110
4111         buf = data_buf;
4112         offset32 = offset;
4113         len32 = buf_size;
4114         align_start = align_end = 0;
4115
4116         if ((align_start = (offset32 & 3))) {
4117                 offset32 &= ~3;
4118                 len32 += align_start;
4119                 if (len32 < 4)
4120                         len32 = 4;
4121                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4122                         return rc;
4123         }
4124
4125         if (len32 & 3) {
4126                 align_end = 4 - (len32 & 3);
4127                 len32 += align_end;
4128                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4129                         return rc;
4130         }
4131
4132         if (align_start || align_end) {
4133                 align_buf = kmalloc(len32, GFP_KERNEL);
4134                 if (align_buf == NULL)
4135                         return -ENOMEM;
4136                 if (align_start) {
4137                         memcpy(align_buf, start, 4);
4138                 }
4139                 if (align_end) {
4140                         memcpy(align_buf + len32 - 4, end, 4);
4141                 }
4142                 memcpy(align_buf + align_start, data_buf, buf_size);
4143                 buf = align_buf;
4144         }
4145
4146         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4147                 flash_buffer = kmalloc(264, GFP_KERNEL);
4148                 if (flash_buffer == NULL) {
4149                         rc = -ENOMEM;
4150                         goto nvram_write_end;
4151                 }
4152         }
4153
4154         written = 0;
4155         while ((written < len32) && (rc == 0)) {
4156                 u32 page_start, page_end, data_start, data_end;
4157                 u32 addr, cmd_flags;
4158                 int i;
4159
4160                 /* Find the page_start addr */
4161                 page_start = offset32 + written;
4162                 page_start -= (page_start % bp->flash_info->page_size);
4163                 /* Find the page_end addr */
4164                 page_end = page_start + bp->flash_info->page_size;
4165                 /* Find the data_start addr */
4166                 data_start = (written == 0) ? offset32 : page_start;
4167                 /* Find the data_end addr */
4168                 data_end = (page_end > offset32 + len32) ?
4169                         (offset32 + len32) : page_end;
4170
4171                 /* Request access to the flash interface. */
4172                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4173                         goto nvram_write_end;
4174
4175                 /* Enable access to flash interface */
4176                 bnx2_enable_nvram_access(bp);
4177
4178                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4179                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4180                         int j;
4181
4182                         /* Read the whole page into the buffer
4183                          * (non-buffer flash only) */
4184                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4185                                 if (j == (bp->flash_info->page_size - 4)) {
4186                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4187                                 }
4188                                 rc = bnx2_nvram_read_dword(bp,
4189                                         page_start + j,
4190                                         &flash_buffer[j],
4191                                         cmd_flags);
4192
4193                                 if (rc)
4194                                         goto nvram_write_end;
4195
4196                                 cmd_flags = 0;
4197                         }
4198                 }
4199
4200                 /* Enable writes to flash interface (unlock write-protect) */
4201                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4202                         goto nvram_write_end;
4203
4204                 /* Loop to write back the buffer data from page_start to
4205                  * data_start */
4206                 i = 0;
4207                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4208                         /* Erase the page */
4209                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4210                                 goto nvram_write_end;
4211
4212                         /* Re-enable the write again for the actual write */
4213                         bnx2_enable_nvram_write(bp);
4214
4215                         for (addr = page_start; addr < data_start;
4216                                 addr += 4, i += 4) {
4217
4218                                 rc = bnx2_nvram_write_dword(bp, addr,
4219                                         &flash_buffer[i], cmd_flags);
4220
4221                                 if (rc != 0)
4222                                         goto nvram_write_end;
4223
4224                                 cmd_flags = 0;
4225                         }
4226                 }
4227
4228                 /* Loop to write the new data from data_start to data_end */
4229                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4230                         if ((addr == page_end - 4) ||
4231                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4232                                  (addr == data_end - 4))) {
4233
4234                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4235                         }
4236                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4237                                 cmd_flags);
4238
4239                         if (rc != 0)
4240                                 goto nvram_write_end;
4241
4242                         cmd_flags = 0;
4243                         buf += 4;
4244                 }
4245
4246                 /* Loop to write back the buffer data from data_end
4247                  * to page_end */
4248                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4249                         for (addr = data_end; addr < page_end;
4250                                 addr += 4, i += 4) {
4251
4252                                 if (addr == page_end-4) {
4253                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4254                                 }
4255                                 rc = bnx2_nvram_write_dword(bp, addr,
4256                                         &flash_buffer[i], cmd_flags);
4257
4258                                 if (rc != 0)
4259                                         goto nvram_write_end;
4260
4261                                 cmd_flags = 0;
4262                         }
4263                 }
4264
4265                 /* Disable writes to flash interface (lock write-protect) */
4266                 bnx2_disable_nvram_write(bp);
4267
4268                 /* Disable access to flash interface */
4269                 bnx2_disable_nvram_access(bp);
4270                 bnx2_release_nvram_lock(bp);
4271
4272                 /* Increment written */
4273                 written += data_end - data_start;
4274         }
4275
4276 nvram_write_end:
4277         kfree(flash_buffer);
4278         kfree(align_buf);
4279         return rc;
4280 }
4281
4282 static void
4283 bnx2_init_fw_cap(struct bnx2 *bp)
4284 {
4285         u32 val, sig = 0;
4286
4287         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4288         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4289
4290         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4291                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4292
4293         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4294         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4295                 return;
4296
4297         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4298                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4299                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4300         }
4301
4302         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4303             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4304                 u32 link;
4305
4306                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4307
4308                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4309                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4310                         bp->phy_port = PORT_FIBRE;
4311                 else
4312                         bp->phy_port = PORT_TP;
4313
4314                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4315                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4316         }
4317
4318         if (netif_running(bp->dev) && sig)
4319                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4320 }
4321
4322 static void
4323 bnx2_setup_msix_tbl(struct bnx2 *bp)
4324 {
4325         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4326
4327         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4328         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4329 }
4330
4331 static int
4332 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4333 {
4334         u32 val;
4335         int i, rc = 0;
4336         u8 old_port;
4337
4338         /* Wait for the current PCI transaction to complete before
4339          * issuing a reset. */
4340         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4341                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4342                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4343                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4344                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4345         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4346         udelay(5);
4347
4348         /* Wait for the firmware to tell us it is ok to issue a reset. */
4349         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4350
4351         /* Deposit a driver reset signature so the firmware knows that
4352          * this is a soft reset. */
4353         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4354                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4355
4356         /* Do a dummy read to force the chip to complete all current transaction
4357          * before we issue a reset. */
4358         val = REG_RD(bp, BNX2_MISC_ID);
4359
4360         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4361                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4362                 REG_RD(bp, BNX2_MISC_COMMAND);
4363                 udelay(5);
4364
4365                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4366                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4367
4368                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4369
4370         } else {
4371                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4372                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4373                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4374
4375                 /* Chip reset. */
4376                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4377
4378                 /* Reading back any register after chip reset will hang the
4379                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4380                  * of margin for write posting.
4381                  */
4382                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4383                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4384                         msleep(20);
4385
4386                 /* Reset takes approximate 30 usec */
4387                 for (i = 0; i < 10; i++) {
4388                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4389                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4390                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4391                                 break;
4392                         udelay(10);
4393                 }
4394
4395                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4396                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4397                         printk(KERN_ERR PFX "Chip reset did not complete\n");
4398                         return -EBUSY;
4399                 }
4400         }
4401
4402         /* Make sure byte swapping is properly configured. */
4403         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4404         if (val != 0x01020304) {
4405                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4406                 return -ENODEV;
4407         }
4408
4409         /* Wait for the firmware to finish its initialization. */
4410         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4411         if (rc)
4412                 return rc;
4413
4414         spin_lock_bh(&bp->phy_lock);
4415         old_port = bp->phy_port;
4416         bnx2_init_fw_cap(bp);
4417         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4418             old_port != bp->phy_port)
4419                 bnx2_set_default_remote_link(bp);
4420         spin_unlock_bh(&bp->phy_lock);
4421
4422         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4423                 /* Adjust the voltage regular to two steps lower.  The default
4424                  * of this register is 0x0000000e. */
4425                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4426
4427                 /* Remove bad rbuf memory from the free pool. */
4428                 rc = bnx2_alloc_bad_rbuf(bp);
4429         }
4430
4431         if (bp->flags & BNX2_FLAG_USING_MSIX)
4432                 bnx2_setup_msix_tbl(bp);
4433
4434         return rc;
4435 }
4436
4437 static int
4438 bnx2_init_chip(struct bnx2 *bp)
4439 {
4440         u32 val;
4441         int rc, i;
4442
4443         /* Make sure the interrupt is not active. */
4444         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4445
4446         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4447               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4448 #ifdef __BIG_ENDIAN
4449               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4450 #endif
4451               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4452               DMA_READ_CHANS << 12 |
4453               DMA_WRITE_CHANS << 16;
4454
4455         val |= (0x2 << 20) | (1 << 11);
4456
4457         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4458                 val |= (1 << 23);
4459
4460         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4461             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4462                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4463
4464         REG_WR(bp, BNX2_DMA_CONFIG, val);
4465
4466         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4467                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4468                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4469                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4470         }
4471
4472         if (bp->flags & BNX2_FLAG_PCIX) {
4473                 u16 val16;
4474
4475                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4476                                      &val16);
4477                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4478                                       val16 & ~PCI_X_CMD_ERO);
4479         }
4480
4481         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4482                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4483                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4484                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4485
4486         /* Initialize context mapping and zero out the quick contexts.  The
4487          * context block must have already been enabled. */
4488         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4489                 rc = bnx2_init_5709_context(bp);
4490                 if (rc)
4491                         return rc;
4492         } else
4493                 bnx2_init_context(bp);
4494
4495         if ((rc = bnx2_init_cpus(bp)) != 0)
4496                 return rc;
4497
4498         bnx2_init_nvram(bp);
4499
4500         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4501
4502         val = REG_RD(bp, BNX2_MQ_CONFIG);
4503         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4504         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4505         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4506                 val |= BNX2_MQ_CONFIG_HALT_DIS;
4507
4508         REG_WR(bp, BNX2_MQ_CONFIG, val);
4509
4510         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4511         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4512         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4513
4514         val = (BCM_PAGE_BITS - 8) << 24;
4515         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4516
4517         /* Configure page size. */
4518         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4519         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4520         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4521         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4522
4523         val = bp->mac_addr[0] +
4524               (bp->mac_addr[1] << 8) +
4525               (bp->mac_addr[2] << 16) +
4526               bp->mac_addr[3] +
4527               (bp->mac_addr[4] << 8) +
4528               (bp->mac_addr[5] << 16);
4529         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4530
4531         /* Program the MTU.  Also include 4 bytes for CRC32. */
4532         val = bp->dev->mtu + ETH_HLEN + 4;
4533         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4534                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4535         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4536
4537         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4538                 bp->bnx2_napi[i].last_status_idx = 0;
4539
4540         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4541
4542         /* Set up how to generate a link change interrupt. */
4543         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4544
4545         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4546                (u64) bp->status_blk_mapping & 0xffffffff);
4547         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4548
4549         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4550                (u64) bp->stats_blk_mapping & 0xffffffff);
4551         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4552                (u64) bp->stats_blk_mapping >> 32);
4553
4554         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4555                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4556
4557         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4558                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4559
4560         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4561                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4562
4563         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4564
4565         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4566
4567         REG_WR(bp, BNX2_HC_COM_TICKS,
4568                (bp->com_ticks_int << 16) | bp->com_ticks);
4569
4570         REG_WR(bp, BNX2_HC_CMD_TICKS,
4571                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4572
4573         if (CHIP_NUM(bp) == CHIP_NUM_5708)
4574                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4575         else
4576                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4577         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4578
4579         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4580                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4581         else {
4582                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4583                       BNX2_HC_CONFIG_COLLECT_STATS;
4584         }
4585
4586         if (bp->irq_nvecs > 1) {
4587                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4588                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4589
4590                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4591         }
4592
4593         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4594                 val |= BNX2_HC_CONFIG_ONE_SHOT;
4595
4596         REG_WR(bp, BNX2_HC_CONFIG, val);
4597
4598         for (i = 1; i < bp->irq_nvecs; i++) {
4599                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4600                            BNX2_HC_SB_CONFIG_1;
4601
4602                 REG_WR(bp, base,
4603                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4604                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4605                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4606
4607                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4608                         (bp->tx_quick_cons_trip_int << 16) |
4609                          bp->tx_quick_cons_trip);
4610
4611                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4612                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4613
4614                 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4615                        (bp->rx_quick_cons_trip_int << 16) |
4616                         bp->rx_quick_cons_trip);
4617
4618                 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4619                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
4620         }
4621
4622         /* Clear internal stats counters. */
4623         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4624
4625         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4626
4627         /* Initialize the receive filter. */
4628         bnx2_set_rx_mode(bp->dev);
4629
4630         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4631                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4632                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4633                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4634         }
4635         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4636                           1, 0);
4637
4638         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4639         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4640
4641         udelay(20);
4642
4643         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4644
4645         return rc;
4646 }
4647
4648 static void
4649 bnx2_clear_ring_states(struct bnx2 *bp)
4650 {
4651         struct bnx2_napi *bnapi;
4652         struct bnx2_tx_ring_info *txr;
4653         struct bnx2_rx_ring_info *rxr;
4654         int i;
4655
4656         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4657                 bnapi = &bp->bnx2_napi[i];
4658                 txr = &bnapi->tx_ring;
4659                 rxr = &bnapi->rx_ring;
4660
4661                 txr->tx_cons = 0;
4662                 txr->hw_tx_cons = 0;
4663                 rxr->rx_prod_bseq = 0;
4664                 rxr->rx_prod = 0;
4665                 rxr->rx_cons = 0;
4666                 rxr->rx_pg_prod = 0;
4667                 rxr->rx_pg_cons = 0;
4668         }
4669 }
4670
4671 static void
4672 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
4673 {
4674         u32 val, offset0, offset1, offset2, offset3;
4675         u32 cid_addr = GET_CID_ADDR(cid);
4676
4677         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4678                 offset0 = BNX2_L2CTX_TYPE_XI;
4679                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4680                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4681                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4682         } else {
4683                 offset0 = BNX2_L2CTX_TYPE;
4684                 offset1 = BNX2_L2CTX_CMD_TYPE;
4685                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4686                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4687         }
4688         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4689         bnx2_ctx_wr(bp, cid_addr, offset0, val);
4690
4691         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4692         bnx2_ctx_wr(bp, cid_addr, offset1, val);
4693
4694         val = (u64) txr->tx_desc_mapping >> 32;
4695         bnx2_ctx_wr(bp, cid_addr, offset2, val);
4696
4697         val = (u64) txr->tx_desc_mapping & 0xffffffff;
4698         bnx2_ctx_wr(bp, cid_addr, offset3, val);
4699 }
4700
4701 static void
4702 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
4703 {
4704         struct tx_bd *txbd;
4705         u32 cid = TX_CID;
4706         struct bnx2_napi *bnapi;
4707         struct bnx2_tx_ring_info *txr;
4708
4709         bnapi = &bp->bnx2_napi[ring_num];
4710         txr = &bnapi->tx_ring;
4711
4712         if (ring_num == 0)
4713                 cid = TX_CID;
4714         else
4715                 cid = TX_TSS_CID + ring_num - 1;
4716
4717         bp->tx_wake_thresh = bp->tx_ring_size / 2;
4718
4719         txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
4720
4721         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
4722         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
4723
4724         txr->tx_prod = 0;
4725         txr->tx_prod_bseq = 0;
4726
4727         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4728         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4729
4730         bnx2_init_tx_context(bp, cid, txr);
4731 }
4732
4733 static void
4734 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4735                      int num_rings)
4736 {
4737         int i;
4738         struct rx_bd *rxbd;
4739
4740         for (i = 0; i < num_rings; i++) {
4741                 int j;
4742
4743                 rxbd = &rx_ring[i][0];
4744                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4745                         rxbd->rx_bd_len = buf_size;
4746                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4747                 }
4748                 if (i == (num_rings - 1))
4749                         j = 0;
4750                 else
4751                         j = i + 1;
4752                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4753                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4754         }
4755 }
4756
4757 static void
4758 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
4759 {
4760         int i;
4761         u16 prod, ring_prod;
4762         u32 cid, rx_cid_addr, val;
4763         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
4764         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4765
4766         if (ring_num == 0)
4767                 cid = RX_CID;
4768         else
4769                 cid = RX_RSS_CID + ring_num - 1;
4770
4771         rx_cid_addr = GET_CID_ADDR(cid);
4772
4773         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
4774                              bp->rx_buf_use_size, bp->rx_max_ring);
4775
4776         bnx2_init_rx_context(bp, cid);
4777
4778         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4779                 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
4780                 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
4781         }
4782
4783         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4784         if (bp->rx_pg_ring_size) {
4785                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
4786                                      rxr->rx_pg_desc_mapping,
4787                                      PAGE_SIZE, bp->rx_max_pg_ring);
4788                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4789                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4790                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4791                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
4792
4793                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
4794                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4795
4796                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
4797                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4798
4799                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4800                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4801         }
4802
4803         val = (u64) rxr->rx_desc_mapping[0] >> 32;
4804         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4805
4806         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
4807         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4808
4809         ring_prod = prod = rxr->rx_pg_prod;
4810         for (i = 0; i < bp->rx_pg_ring_size; i++) {
4811                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
4812                         break;
4813                 prod = NEXT_RX_BD(prod);
4814                 ring_prod = RX_PG_RING_IDX(prod);
4815         }
4816         rxr->rx_pg_prod = prod;
4817
4818         ring_prod = prod = rxr->rx_prod;
4819         for (i = 0; i < bp->rx_ring_size; i++) {
4820                 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
4821                         break;
4822                 prod = NEXT_RX_BD(prod);
4823                 ring_prod = RX_RING_IDX(prod);
4824         }
4825         rxr->rx_prod = prod;
4826
4827         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
4828         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
4829         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
4830
4831         REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
4832         REG_WR16(bp, rxr->rx_bidx_addr, prod);
4833
4834         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
4835 }
4836
4837 static void
4838 bnx2_init_all_rings(struct bnx2 *bp)
4839 {
4840         int i;
4841         u32 val;
4842
4843         bnx2_clear_ring_states(bp);
4844
4845         REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
4846         for (i = 0; i < bp->num_tx_rings; i++)
4847                 bnx2_init_tx_ring(bp, i);
4848
4849         if (bp->num_tx_rings > 1)
4850                 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
4851                        (TX_TSS_CID << 7));
4852
4853         REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
4854         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
4855
4856         for (i = 0; i < bp->num_rx_rings; i++)
4857                 bnx2_init_rx_ring(bp, i);
4858
4859         if (bp->num_rx_rings > 1) {
4860                 u32 tbl_32;
4861                 u8 *tbl = (u8 *) &tbl_32;
4862
4863                 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
4864                                 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
4865
4866                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
4867                         tbl[i % 4] = i % (bp->num_rx_rings - 1);
4868                         if ((i % 4) == 3)
4869                                 bnx2_reg_wr_ind(bp,
4870                                                 BNX2_RXP_SCRATCH_RSS_TBL + i,
4871                                                 cpu_to_be32(tbl_32));
4872                 }
4873
4874                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
4875                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
4876
4877                 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
4878
4879         }
4880 }
4881
4882 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4883 {
4884         u32 max, num_rings = 1;
4885
4886         while (ring_size > MAX_RX_DESC_CNT) {
4887                 ring_size -= MAX_RX_DESC_CNT;
4888                 num_rings++;
4889         }
4890         /* round to next power of 2 */
4891         max = max_size;
4892         while ((max & num_rings) == 0)
4893                 max >>= 1;
4894
4895         if (num_rings != max)
4896                 max <<= 1;
4897
4898         return max;
4899 }
4900
4901 static void
4902 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4903 {
4904         u32 rx_size, rx_space, jumbo_size;
4905
4906         /* 8 for CRC and VLAN */
4907         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
4908
4909         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4910                 sizeof(struct skb_shared_info);
4911
4912         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
4913         bp->rx_pg_ring_size = 0;
4914         bp->rx_max_pg_ring = 0;
4915         bp->rx_max_pg_ring_idx = 0;
4916         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
4917                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4918
4919                 jumbo_size = size * pages;
4920                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4921                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4922
4923                 bp->rx_pg_ring_size = jumbo_size;
4924                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4925                                                         MAX_RX_PG_RINGS);
4926                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4927                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
4928                 bp->rx_copy_thresh = 0;
4929         }
4930
4931         bp->rx_buf_use_size = rx_size;
4932         /* hw alignment */
4933         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4934         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
4935         bp->rx_ring_size = size;
4936         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4937         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4938 }
4939
4940 static void
4941 bnx2_free_tx_skbs(struct bnx2 *bp)
4942 {
4943         int i;
4944
4945         for (i = 0; i < bp->num_tx_rings; i++) {
4946                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
4947                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
4948                 int j;
4949
4950                 if (txr->tx_buf_ring == NULL)
4951                         continue;
4952
4953                 for (j = 0; j < TX_DESC_CNT; ) {
4954                         struct sw_bd *tx_buf = &txr->tx_buf_ring[j];
4955                         struct sk_buff *skb = tx_buf->skb;
4956                         int k, last;
4957
4958                         if (skb == NULL) {
4959                                 j++;
4960                                 continue;
4961                         }
4962
4963                         pci_unmap_single(bp->pdev,
4964                                          pci_unmap_addr(tx_buf, mapping),
4965                         skb_headlen(skb), PCI_DMA_TODEVICE);
4966
4967                         tx_buf->skb = NULL;
4968
4969                         last = skb_shinfo(skb)->nr_frags;
4970                         for (k = 0; k < last; k++) {
4971                                 tx_buf = &txr->tx_buf_ring[j + k + 1];
4972                                 pci_unmap_page(bp->pdev,
4973                                         pci_unmap_addr(tx_buf, mapping),
4974                                         skb_shinfo(skb)->frags[j].size,
4975                                         PCI_DMA_TODEVICE);
4976                         }
4977                         dev_kfree_skb(skb);
4978                         j += k + 1;
4979                 }
4980         }
4981 }
4982
4983 static void
4984 bnx2_free_rx_skbs(struct bnx2 *bp)
4985 {
4986         int i;
4987
4988         for (i = 0; i < bp->num_rx_rings; i++) {
4989                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
4990                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4991                 int j;
4992
4993                 if (rxr->rx_buf_ring == NULL)
4994                         return;
4995
4996                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
4997                         struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
4998                         struct sk_buff *skb = rx_buf->skb;
4999
5000                         if (skb == NULL)
5001                                 continue;
5002
5003                         pci_unmap_single(bp->pdev,
5004                                          pci_unmap_addr(rx_buf, mapping),
5005                                          bp->rx_buf_use_size,
5006                                          PCI_DMA_FROMDEVICE);
5007
5008                         rx_buf->skb = NULL;
5009
5010                         dev_kfree_skb(skb);
5011                 }
5012                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5013                         bnx2_free_rx_page(bp, rxr, j);
5014         }
5015 }
5016
5017 static void
5018 bnx2_free_skbs(struct bnx2 *bp)
5019 {
5020         bnx2_free_tx_skbs(bp);
5021         bnx2_free_rx_skbs(bp);
5022 }
5023
5024 static int
5025 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5026 {
5027         int rc;
5028
5029         rc = bnx2_reset_chip(bp, reset_code);
5030         bnx2_free_skbs(bp);
5031         if (rc)
5032                 return rc;
5033
5034         if ((rc = bnx2_init_chip(bp)) != 0)
5035                 return rc;
5036
5037         bnx2_init_all_rings(bp);
5038         return 0;
5039 }
5040
5041 static int
5042 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5043 {
5044         int rc;
5045
5046         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5047                 return rc;
5048
5049         spin_lock_bh(&bp->phy_lock);
5050         bnx2_init_phy(bp, reset_phy);
5051         bnx2_set_link(bp);
5052         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5053                 bnx2_remote_phy_event(bp);
5054         spin_unlock_bh(&bp->phy_lock);
5055         return 0;
5056 }
5057
5058 static int
5059 bnx2_test_registers(struct bnx2 *bp)
5060 {
5061         int ret;
5062         int i, is_5709;
5063         static const struct {
5064                 u16   offset;
5065                 u16   flags;
5066 #define BNX2_FL_NOT_5709        1
5067                 u32   rw_mask;
5068                 u32   ro_mask;
5069         } reg_tbl[] = {
5070                 { 0x006c, 0, 0x00000000, 0x0000003f },
5071                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5072                 { 0x0094, 0, 0x00000000, 0x00000000 },
5073
5074                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5075                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5076                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5077                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5078                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5079                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5080                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5081                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5082                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5083
5084                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5085                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5086                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5087                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5088                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5089                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5090
5091                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5092                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5093                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5094
5095                 { 0x1000, 0, 0x00000000, 0x00000001 },
5096                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5097
5098                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5099                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5100                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5101                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5102                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5103                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5104                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5105                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5106                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5107                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5108
5109                 { 0x1800, 0, 0x00000000, 0x00000001 },
5110                 { 0x1804, 0, 0x00000000, 0x00000003 },
5111
5112                 { 0x2800, 0, 0x00000000, 0x00000001 },
5113                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5114                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5115                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5116                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5117                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5118                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5119                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5120                 { 0x2840, 0, 0x00000000, 0xffffffff },
5121                 { 0x2844, 0, 0x00000000, 0xffffffff },
5122                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5123                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5124
5125                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5126                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5127
5128                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5129                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5130                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5131                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5132                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5133                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5134                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5135                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5136                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5137
5138                 { 0x5004, 0, 0x00000000, 0x0000007f },
5139                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5140
5141                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5142                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5143                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5144                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5145                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5146                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5147                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5148                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5149                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5150
5151                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5152                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5153                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5154                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5155                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5156                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5157                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5158                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5159                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5160                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5161                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5162                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5163                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5164                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5165                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5166                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5167                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5168                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5169                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5170                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5171                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5172                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5173                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5174
5175                 { 0xffff, 0, 0x00000000, 0x00000000 },
5176         };
5177
5178         ret = 0;
5179         is_5709 = 0;
5180         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5181                 is_5709 = 1;
5182
5183         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5184                 u32 offset, rw_mask, ro_mask, save_val, val;
5185                 u16 flags = reg_tbl[i].flags;
5186
5187                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5188                         continue;
5189
5190                 offset = (u32) reg_tbl[i].offset;
5191                 rw_mask = reg_tbl[i].rw_mask;
5192                 ro_mask = reg_tbl[i].ro_mask;
5193
5194                 save_val = readl(bp->regview + offset);
5195
5196                 writel(0, bp->regview + offset);
5197
5198                 val = readl(bp->regview + offset);
5199                 if ((val & rw_mask) != 0) {
5200                         goto reg_test_err;
5201                 }
5202
5203                 if ((val & ro_mask) != (save_val & ro_mask)) {
5204                         goto reg_test_err;
5205                 }
5206
5207                 writel(0xffffffff, bp->regview + offset);
5208
5209                 val = readl(bp->regview + offset);
5210                 if ((val & rw_mask) != rw_mask) {
5211                         goto reg_test_err;
5212                 }
5213
5214                 if ((val & ro_mask) != (save_val & ro_mask)) {
5215                         goto reg_test_err;
5216                 }
5217
5218                 writel(save_val, bp->regview + offset);
5219                 continue;
5220
5221 reg_test_err:
5222                 writel(save_val, bp->regview + offset);
5223                 ret = -ENODEV;
5224                 break;
5225         }
5226         return ret;
5227 }
5228
5229 static int
5230 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5231 {
5232         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5233                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5234         int i;
5235
5236         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5237                 u32 offset;
5238
5239                 for (offset = 0; offset < size; offset += 4) {
5240
5241                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5242
5243                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5244                                 test_pattern[i]) {
5245                                 return -ENODEV;
5246                         }
5247                 }
5248         }
5249         return 0;
5250 }
5251
5252 static int
5253 bnx2_test_memory(struct bnx2 *bp)
5254 {
5255         int ret = 0;
5256         int i;
5257         static struct mem_entry {
5258                 u32   offset;
5259                 u32   len;
5260         } mem_tbl_5706[] = {
5261                 { 0x60000,  0x4000 },
5262                 { 0xa0000,  0x3000 },
5263                 { 0xe0000,  0x4000 },
5264                 { 0x120000, 0x4000 },
5265                 { 0x1a0000, 0x4000 },
5266                 { 0x160000, 0x4000 },
5267                 { 0xffffffff, 0    },
5268         },
5269         mem_tbl_5709[] = {
5270                 { 0x60000,  0x4000 },
5271                 { 0xa0000,  0x3000 },
5272                 { 0xe0000,  0x4000 },
5273                 { 0x120000, 0x4000 },
5274                 { 0x1a0000, 0x4000 },
5275                 { 0xffffffff, 0    },
5276         };
5277         struct mem_entry *mem_tbl;
5278
5279         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5280                 mem_tbl = mem_tbl_5709;
5281         else
5282                 mem_tbl = mem_tbl_5706;
5283
5284         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5285                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5286                         mem_tbl[i].len)) != 0) {
5287                         return ret;
5288                 }
5289         }
5290
5291         return ret;
5292 }
5293
5294 #define BNX2_MAC_LOOPBACK       0
5295 #define BNX2_PHY_LOOPBACK       1
5296
5297 static int
5298 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5299 {
5300         unsigned int pkt_size, num_pkts, i;
5301         struct sk_buff *skb, *rx_skb;
5302         unsigned char *packet;
5303         u16 rx_start_idx, rx_idx;
5304         dma_addr_t map;
5305         struct tx_bd *txbd;
5306         struct sw_bd *rx_buf;
5307         struct l2_fhdr *rx_hdr;
5308         int ret = -ENODEV;
5309         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5310         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5311         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5312
5313         tx_napi = bnapi;
5314
5315         txr = &tx_napi->tx_ring;
5316         rxr = &bnapi->rx_ring;
5317         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5318                 bp->loopback = MAC_LOOPBACK;
5319                 bnx2_set_mac_loopback(bp);
5320         }
5321         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5322                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5323                         return 0;
5324
5325                 bp->loopback = PHY_LOOPBACK;
5326                 bnx2_set_phy_loopback(bp);
5327         }
5328         else
5329                 return -EINVAL;
5330
5331         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5332         skb = netdev_alloc_skb(bp->dev, pkt_size);
5333         if (!skb)
5334                 return -ENOMEM;
5335         packet = skb_put(skb, pkt_size);
5336         memcpy(packet, bp->dev->dev_addr, 6);
5337         memset(packet + 6, 0x0, 8);
5338         for (i = 14; i < pkt_size; i++)
5339                 packet[i] = (unsigned char) (i & 0xff);
5340
5341         map = pci_map_single(bp->pdev, skb->data, pkt_size,
5342                 PCI_DMA_TODEVICE);
5343
5344         REG_WR(bp, BNX2_HC_COMMAND,
5345                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5346
5347         REG_RD(bp, BNX2_HC_COMMAND);
5348
5349         udelay(5);
5350         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5351
5352         num_pkts = 0;
5353
5354         txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5355
5356         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5357         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5358         txbd->tx_bd_mss_nbytes = pkt_size;
5359         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5360
5361         num_pkts++;
5362         txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5363         txr->tx_prod_bseq += pkt_size;
5364
5365         REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5366         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5367
5368         udelay(100);
5369
5370         REG_WR(bp, BNX2_HC_COMMAND,
5371                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5372
5373         REG_RD(bp, BNX2_HC_COMMAND);
5374
5375         udelay(5);
5376
5377         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5378         dev_kfree_skb(skb);
5379
5380         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5381                 goto loopback_test_done;
5382
5383         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5384         if (rx_idx != rx_start_idx + num_pkts) {
5385                 goto loopback_test_done;
5386         }
5387
5388         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5389         rx_skb = rx_buf->skb;
5390
5391         rx_hdr = (struct l2_fhdr *) rx_skb->data;
5392         skb_reserve(rx_skb, BNX2_RX_OFFSET);
5393
5394         pci_dma_sync_single_for_cpu(bp->pdev,
5395                 pci_unmap_addr(rx_buf, mapping),
5396                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5397
5398         if (rx_hdr->l2_fhdr_status &
5399                 (L2_FHDR_ERRORS_BAD_CRC |
5400                 L2_FHDR_ERRORS_PHY_DECODE |
5401                 L2_FHDR_ERRORS_ALIGNMENT |
5402                 L2_FHDR_ERRORS_TOO_SHORT |
5403                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5404
5405                 goto loopback_test_done;
5406         }
5407
5408         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5409                 goto loopback_test_done;
5410         }
5411
5412         for (i = 14; i < pkt_size; i++) {
5413                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5414                         goto loopback_test_done;
5415                 }
5416         }
5417
5418         ret = 0;
5419
5420 loopback_test_done:
5421         bp->loopback = 0;
5422         return ret;
5423 }
5424
5425 #define BNX2_MAC_LOOPBACK_FAILED        1
5426 #define BNX2_PHY_LOOPBACK_FAILED        2
5427 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5428                                          BNX2_PHY_LOOPBACK_FAILED)
5429
5430 static int
5431 bnx2_test_loopback(struct bnx2 *bp)
5432 {
5433         int rc = 0;
5434
5435         if (!netif_running(bp->dev))
5436                 return BNX2_LOOPBACK_FAILED;
5437
5438         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5439         spin_lock_bh(&bp->phy_lock);
5440         bnx2_init_phy(bp, 1);
5441         spin_unlock_bh(&bp->phy_lock);
5442         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5443                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5444         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5445                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5446         return rc;
5447 }
5448
5449 #define NVRAM_SIZE 0x200
5450 #define CRC32_RESIDUAL 0xdebb20e3
5451
5452 static int
5453 bnx2_test_nvram(struct bnx2 *bp)
5454 {
5455         __be32 buf[NVRAM_SIZE / 4];
5456         u8 *data = (u8 *) buf;
5457         int rc = 0;
5458         u32 magic, csum;
5459
5460         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5461                 goto test_nvram_done;
5462
5463         magic = be32_to_cpu(buf[0]);
5464         if (magic != 0x669955aa) {
5465                 rc = -ENODEV;
5466                 goto test_nvram_done;
5467         }
5468
5469         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5470                 goto test_nvram_done;
5471
5472         csum = ether_crc_le(0x100, data);
5473         if (csum != CRC32_RESIDUAL) {
5474                 rc = -ENODEV;
5475                 goto test_nvram_done;
5476         }
5477
5478         csum = ether_crc_le(0x100, data + 0x100);
5479         if (csum != CRC32_RESIDUAL) {
5480                 rc = -ENODEV;
5481         }
5482
5483 test_nvram_done:
5484         return rc;
5485 }
5486
5487 static int
5488 bnx2_test_link(struct bnx2 *bp)
5489 {
5490         u32 bmsr;
5491
5492         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5493                 if (bp->link_up)
5494                         return 0;
5495                 return -ENODEV;
5496         }
5497         spin_lock_bh(&bp->phy_lock);
5498         bnx2_enable_bmsr1(bp);
5499         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5500         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5501         bnx2_disable_bmsr1(bp);
5502         spin_unlock_bh(&bp->phy_lock);
5503
5504         if (bmsr & BMSR_LSTATUS) {
5505                 return 0;
5506         }
5507         return -ENODEV;
5508 }
5509
5510 static int
5511 bnx2_test_intr(struct bnx2 *bp)
5512 {
5513         int i;
5514         u16 status_idx;
5515
5516         if (!netif_running(bp->dev))
5517                 return -ENODEV;
5518
5519         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5520
5521         /* This register is not touched during run-time. */
5522         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5523         REG_RD(bp, BNX2_HC_COMMAND);
5524
5525         for (i = 0; i < 10; i++) {
5526                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5527                         status_idx) {
5528
5529                         break;
5530                 }
5531
5532                 msleep_interruptible(10);
5533         }
5534         if (i < 10)
5535                 return 0;
5536
5537         return -ENODEV;
5538 }
5539
5540 /* Determining link for parallel detection. */
5541 static int
5542 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5543 {
5544         u32 mode_ctl, an_dbg, exp;
5545
5546         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5547                 return 0;
5548
5549         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5550         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5551
5552         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5553                 return 0;
5554
5555         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5556         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5557         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5558
5559         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5560                 return 0;
5561
5562         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5563         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5564         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5565
5566         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5567                 return 0;
5568
5569         return 1;
5570 }
5571
5572 static void
5573 bnx2_5706_serdes_timer(struct bnx2 *bp)
5574 {
5575         int check_link = 1;
5576
5577         spin_lock(&bp->phy_lock);
5578         if (bp->serdes_an_pending) {
5579                 bp->serdes_an_pending--;
5580                 check_link = 0;
5581         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5582                 u32 bmcr;
5583
5584                 bp->current_interval = bp->timer_interval;
5585
5586                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5587
5588                 if (bmcr & BMCR_ANENABLE) {
5589                         if (bnx2_5706_serdes_has_link(bp)) {
5590                                 bmcr &= ~BMCR_ANENABLE;
5591                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5592                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5593                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5594                         }
5595                 }
5596         }
5597         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5598                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5599                 u32 phy2;
5600
5601                 bnx2_write_phy(bp, 0x17, 0x0f01);
5602                 bnx2_read_phy(bp, 0x15, &phy2);
5603                 if (phy2 & 0x20) {
5604                         u32 bmcr;
5605
5606                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5607                         bmcr |= BMCR_ANENABLE;
5608                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5609
5610                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5611                 }
5612         } else
5613                 bp->current_interval = bp->timer_interval;
5614
5615         if (check_link) {
5616                 u32 val;
5617
5618                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5619                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5620                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5621
5622                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5623                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5624                                 bnx2_5706s_force_link_dn(bp, 1);
5625                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5626                         } else
5627                                 bnx2_set_link(bp);
5628                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5629                         bnx2_set_link(bp);
5630         }
5631         spin_unlock(&bp->phy_lock);
5632 }
5633
5634 static void
5635 bnx2_5708_serdes_timer(struct bnx2 *bp)
5636 {
5637         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5638                 return;
5639
5640         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5641                 bp->serdes_an_pending = 0;
5642                 return;
5643         }
5644
5645         spin_lock(&bp->phy_lock);
5646         if (bp->serdes_an_pending)
5647                 bp->serdes_an_pending--;
5648         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5649                 u32 bmcr;
5650
5651                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5652                 if (bmcr & BMCR_ANENABLE) {
5653                         bnx2_enable_forced_2g5(bp);
5654                         bp->current_interval = SERDES_FORCED_TIMEOUT;
5655                 } else {
5656                         bnx2_disable_forced_2g5(bp);
5657                         bp->serdes_an_pending = 2;
5658                         bp->current_interval = bp->timer_interval;
5659                 }
5660
5661         } else
5662                 bp->current_interval = bp->timer_interval;
5663
5664         spin_unlock(&bp->phy_lock);
5665 }
5666
5667 static void
5668 bnx2_timer(unsigned long data)
5669 {
5670         struct bnx2 *bp = (struct bnx2 *) data;
5671
5672         if (!netif_running(bp->dev))
5673                 return;
5674
5675         if (atomic_read(&bp->intr_sem) != 0)
5676                 goto bnx2_restart_timer;
5677
5678         bnx2_send_heart_beat(bp);
5679
5680         bp->stats_blk->stat_FwRxDrop =
5681                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
5682
5683         /* workaround occasional corrupted counters */
5684         if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5685                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5686                                             BNX2_HC_COMMAND_STATS_NOW);
5687
5688         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5689                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5690                         bnx2_5706_serdes_timer(bp);
5691                 else
5692                         bnx2_5708_serdes_timer(bp);
5693         }
5694
5695 bnx2_restart_timer:
5696         mod_timer(&bp->timer, jiffies + bp->current_interval);
5697 }
5698
5699 static int
5700 bnx2_request_irq(struct bnx2 *bp)
5701 {
5702         unsigned long flags;
5703         struct bnx2_irq *irq;
5704         int rc = 0, i;
5705
5706         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
5707                 flags = 0;
5708         else
5709                 flags = IRQF_SHARED;
5710
5711         for (i = 0; i < bp->irq_nvecs; i++) {
5712                 irq = &bp->irq_tbl[i];
5713                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5714                                  &bp->bnx2_napi[i]);
5715                 if (rc)
5716                         break;
5717                 irq->requested = 1;
5718         }
5719         return rc;
5720 }
5721
5722 static void
5723 bnx2_free_irq(struct bnx2 *bp)
5724 {
5725         struct bnx2_irq *irq;
5726         int i;
5727
5728         for (i = 0; i < bp->irq_nvecs; i++) {
5729                 irq = &bp->irq_tbl[i];
5730                 if (irq->requested)
5731                         free_irq(irq->vector, &bp->bnx2_napi[i]);
5732                 irq->requested = 0;
5733         }
5734         if (bp->flags & BNX2_FLAG_USING_MSI)
5735                 pci_disable_msi(bp->pdev);
5736         else if (bp->flags & BNX2_FLAG_USING_MSIX)
5737                 pci_disable_msix(bp->pdev);
5738
5739         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
5740 }
5741
5742 static void
5743 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
5744 {
5745         int i, rc;
5746         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5747
5748         bnx2_setup_msix_tbl(bp);
5749         REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5750         REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5751         REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5752
5753         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5754                 msix_ent[i].entry = i;
5755                 msix_ent[i].vector = 0;
5756
5757                 strcpy(bp->irq_tbl[i].name, bp->dev->name);
5758                 bp->irq_tbl[i].handler = bnx2_msi_1shot;
5759         }
5760
5761         rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5762         if (rc != 0)
5763                 return;
5764
5765         bp->irq_nvecs = msix_vecs;
5766         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
5767         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5768                 bp->irq_tbl[i].vector = msix_ent[i].vector;
5769 }
5770
5771 static void
5772 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5773 {
5774         int cpus = num_online_cpus();
5775         int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
5776
5777         bp->irq_tbl[0].handler = bnx2_interrupt;
5778         strcpy(bp->irq_tbl[0].name, bp->dev->name);
5779         bp->irq_nvecs = 1;
5780         bp->irq_tbl[0].vector = bp->pdev->irq;
5781
5782         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
5783                 bnx2_enable_msix(bp, msix_vecs);
5784
5785         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5786             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
5787                 if (pci_enable_msi(bp->pdev) == 0) {
5788                         bp->flags |= BNX2_FLAG_USING_MSI;
5789                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5790                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
5791                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5792                         } else
5793                                 bp->irq_tbl[0].handler = bnx2_msi;
5794
5795                         bp->irq_tbl[0].vector = bp->pdev->irq;
5796                 }
5797         }
5798
5799         bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
5800         bp->dev->real_num_tx_queues = bp->num_tx_rings;
5801
5802         bp->num_rx_rings = bp->irq_nvecs;
5803 }
5804
5805 /* Called with rtnl_lock */
5806 static int
5807 bnx2_open(struct net_device *dev)
5808 {
5809         struct bnx2 *bp = netdev_priv(dev);
5810         int rc;
5811
5812         netif_carrier_off(dev);
5813
5814         bnx2_set_power_state(bp, PCI_D0);
5815         bnx2_disable_int(bp);
5816
5817         bnx2_setup_int_mode(bp, disable_msi);
5818         bnx2_napi_enable(bp);
5819         rc = bnx2_alloc_mem(bp);
5820         if (rc)
5821                 goto open_err;
5822
5823         rc = bnx2_request_irq(bp);
5824         if (rc)
5825                 goto open_err;
5826
5827         rc = bnx2_init_nic(bp, 1);
5828         if (rc)
5829                 goto open_err;
5830
5831         mod_timer(&bp->timer, jiffies + bp->current_interval);
5832
5833         atomic_set(&bp->intr_sem, 0);
5834
5835         bnx2_enable_int(bp);
5836
5837         if (bp->flags & BNX2_FLAG_USING_MSI) {
5838                 /* Test MSI to make sure it is working
5839                  * If MSI test fails, go back to INTx mode
5840                  */
5841                 if (bnx2_test_intr(bp) != 0) {
5842                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
5843                                " using MSI, switching to INTx mode. Please"
5844                                " report this failure to the PCI maintainer"
5845                                " and include system chipset information.\n",
5846                                bp->dev->name);
5847
5848                         bnx2_disable_int(bp);
5849                         bnx2_free_irq(bp);
5850
5851                         bnx2_setup_int_mode(bp, 1);
5852
5853                         rc = bnx2_init_nic(bp, 0);
5854
5855                         if (!rc)
5856                                 rc = bnx2_request_irq(bp);
5857
5858                         if (rc) {
5859                                 del_timer_sync(&bp->timer);
5860                                 goto open_err;
5861                         }
5862                         bnx2_enable_int(bp);
5863                 }
5864         }
5865         if (bp->flags & BNX2_FLAG_USING_MSI)
5866                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5867         else if (bp->flags & BNX2_FLAG_USING_MSIX)
5868                 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
5869
5870         netif_tx_start_all_queues(dev);
5871
5872         return 0;
5873
5874 open_err:
5875         bnx2_napi_disable(bp);
5876         bnx2_free_skbs(bp);
5877         bnx2_free_irq(bp);
5878         bnx2_free_mem(bp);
5879         return rc;
5880 }
5881
5882 static void
5883 bnx2_reset_task(struct work_struct *work)
5884 {
5885         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5886
5887         if (!netif_running(bp->dev))
5888                 return;
5889
5890         bnx2_netif_stop(bp);
5891
5892         bnx2_init_nic(bp, 1);
5893
5894         atomic_set(&bp->intr_sem, 1);
5895         bnx2_netif_start(bp);
5896 }
5897
5898 static void
5899 bnx2_tx_timeout(struct net_device *dev)
5900 {
5901         struct bnx2 *bp = netdev_priv(dev);
5902
5903         /* This allows the netif to be shutdown gracefully before resetting */
5904         schedule_work(&bp->reset_task);
5905 }
5906
5907 #ifdef BCM_VLAN
5908 /* Called with rtnl_lock */
5909 static void
5910 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5911 {
5912         struct bnx2 *bp = netdev_priv(dev);
5913
5914         bnx2_netif_stop(bp);
5915
5916         bp->vlgrp = vlgrp;
5917         bnx2_set_rx_mode(dev);
5918         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
5919                 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
5920
5921         bnx2_netif_start(bp);
5922 }
5923 #endif
5924
5925 /* Called with netif_tx_lock.
5926  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5927  * netif_wake_queue().
5928  */
5929 static int
5930 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5931 {
5932         struct bnx2 *bp = netdev_priv(dev);
5933         dma_addr_t mapping;
5934         struct tx_bd *txbd;
5935         struct sw_bd *tx_buf;
5936         u32 len, vlan_tag_flags, last_frag, mss;
5937         u16 prod, ring_prod;
5938         int i;
5939         struct bnx2_napi *bnapi;
5940         struct bnx2_tx_ring_info *txr;
5941         struct netdev_queue *txq;
5942
5943         /*  Determine which tx ring we will be placed on */
5944         i = skb_get_queue_mapping(skb);
5945         bnapi = &bp->bnx2_napi[i];
5946         txr = &bnapi->tx_ring;
5947         txq = netdev_get_tx_queue(dev, i);
5948
5949         if (unlikely(bnx2_tx_avail(bp, txr) <
5950             (skb_shinfo(skb)->nr_frags + 1))) {
5951                 netif_tx_stop_queue(txq);
5952                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5953                         dev->name);
5954
5955                 return NETDEV_TX_BUSY;
5956         }
5957         len = skb_headlen(skb);
5958         prod = txr->tx_prod;
5959         ring_prod = TX_RING_IDX(prod);
5960
5961         vlan_tag_flags = 0;
5962         if (skb->ip_summed == CHECKSUM_PARTIAL) {
5963                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5964         }
5965
5966         if (bp->vlgrp && vlan_tx_tag_present(skb)) {
5967                 vlan_tag_flags |=
5968                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5969         }
5970         if ((mss = skb_shinfo(skb)->gso_size)) {
5971                 u32 tcp_opt_len, ip_tcp_len;
5972                 struct iphdr *iph;
5973
5974                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5975
5976                 tcp_opt_len = tcp_optlen(skb);
5977
5978                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5979                         u32 tcp_off = skb_transport_offset(skb) -
5980                                       sizeof(struct ipv6hdr) - ETH_HLEN;
5981
5982                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5983                                           TX_BD_FLAGS_SW_FLAGS;
5984                         if (likely(tcp_off == 0))
5985                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5986                         else {
5987                                 tcp_off >>= 3;
5988                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
5989                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
5990                                                   ((tcp_off & 0x10) <<
5991                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
5992                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5993                         }
5994                 } else {
5995                         if (skb_header_cloned(skb) &&
5996                             pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5997                                 dev_kfree_skb(skb);
5998                                 return NETDEV_TX_OK;
5999                         }
6000
6001                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6002
6003                         iph = ip_hdr(skb);
6004                         iph->check = 0;
6005                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
6006                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6007                                                                  iph->daddr, 0,
6008                                                                  IPPROTO_TCP,
6009                                                                  0);
6010                         if (tcp_opt_len || (iph->ihl > 5)) {
6011                                 vlan_tag_flags |= ((iph->ihl - 5) +
6012                                                    (tcp_opt_len >> 2)) << 8;
6013                         }
6014                 }
6015         } else
6016                 mss = 0;
6017
6018         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6019
6020         tx_buf = &txr->tx_buf_ring[ring_prod];
6021         tx_buf->skb = skb;
6022         pci_unmap_addr_set(tx_buf, mapping, mapping);
6023
6024         txbd = &txr->tx_desc_ring[ring_prod];
6025
6026         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6027         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6028         txbd->tx_bd_mss_nbytes = len | (mss << 16);
6029         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6030
6031         last_frag = skb_shinfo(skb)->nr_frags;
6032
6033         for (i = 0; i < last_frag; i++) {
6034                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6035
6036                 prod = NEXT_TX_BD(prod);
6037                 ring_prod = TX_RING_IDX(prod);
6038                 txbd = &txr->tx_desc_ring[ring_prod];
6039
6040                 len = frag->size;
6041                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
6042                         len, PCI_DMA_TODEVICE);
6043                 pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod],
6044                                 mapping, mapping);
6045
6046                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6047                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6048                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6049                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6050
6051         }
6052         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6053
6054         prod = NEXT_TX_BD(prod);
6055         txr->tx_prod_bseq += skb->len;
6056
6057         REG_WR16(bp, txr->tx_bidx_addr, prod);
6058         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6059
6060         mmiowb();
6061
6062         txr->tx_prod = prod;
6063         dev->trans_start = jiffies;
6064
6065         if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6066                 netif_tx_stop_queue(txq);
6067                 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6068                         netif_tx_wake_queue(txq);
6069         }
6070
6071         return NETDEV_TX_OK;
6072 }
6073
6074 /* Called with rtnl_lock */
6075 static int
6076 bnx2_close(struct net_device *dev)
6077 {
6078         struct bnx2 *bp = netdev_priv(dev);
6079         u32 reset_code;
6080
6081         cancel_work_sync(&bp->reset_task);
6082
6083         bnx2_disable_int_sync(bp);
6084         bnx2_napi_disable(bp);
6085         del_timer_sync(&bp->timer);
6086         if (bp->flags & BNX2_FLAG_NO_WOL)
6087                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6088         else if (bp->wol)
6089                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6090         else
6091                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6092         bnx2_reset_chip(bp, reset_code);
6093         bnx2_free_irq(bp);
6094         bnx2_free_skbs(bp);
6095         bnx2_free_mem(bp);
6096         bp->link_up = 0;
6097         netif_carrier_off(bp->dev);
6098         bnx2_set_power_state(bp, PCI_D3hot);
6099         return 0;
6100 }
6101
6102 #define GET_NET_STATS64(ctr)                                    \
6103         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
6104         (unsigned long) (ctr##_lo)
6105
6106 #define GET_NET_STATS32(ctr)            \
6107         (ctr##_lo)
6108
6109 #if (BITS_PER_LONG == 64)
6110 #define GET_NET_STATS   GET_NET_STATS64
6111 #else
6112 #define GET_NET_STATS   GET_NET_STATS32
6113 #endif
6114
6115 static struct net_device_stats *
6116 bnx2_get_stats(struct net_device *dev)
6117 {
6118         struct bnx2 *bp = netdev_priv(dev);
6119         struct statistics_block *stats_blk = bp->stats_blk;
6120         struct net_device_stats *net_stats = &bp->net_stats;
6121
6122         if (bp->stats_blk == NULL) {
6123                 return net_stats;
6124         }
6125         net_stats->rx_packets =
6126                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
6127                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
6128                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
6129
6130         net_stats->tx_packets =
6131                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
6132                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
6133                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
6134
6135         net_stats->rx_bytes =
6136                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
6137
6138         net_stats->tx_bytes =
6139                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
6140
6141         net_stats->multicast =
6142                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
6143
6144         net_stats->collisions =
6145                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
6146
6147         net_stats->rx_length_errors =
6148                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
6149                 stats_blk->stat_EtherStatsOverrsizePkts);
6150
6151         net_stats->rx_over_errors =
6152                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
6153
6154         net_stats->rx_frame_errors =
6155                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
6156
6157         net_stats->rx_crc_errors =
6158                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
6159
6160         net_stats->rx_errors = net_stats->rx_length_errors +
6161                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6162                 net_stats->rx_crc_errors;
6163
6164         net_stats->tx_aborted_errors =
6165                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
6166                 stats_blk->stat_Dot3StatsLateCollisions);
6167
6168         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6169             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6170                 net_stats->tx_carrier_errors = 0;
6171         else {
6172                 net_stats->tx_carrier_errors =
6173                         (unsigned long)
6174                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
6175         }
6176
6177         net_stats->tx_errors =
6178                 (unsigned long)
6179                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6180                 +
6181                 net_stats->tx_aborted_errors +
6182                 net_stats->tx_carrier_errors;
6183
6184         net_stats->rx_missed_errors =
6185                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
6186                 stats_blk->stat_FwRxDrop);
6187
6188         return net_stats;
6189 }
6190
6191 /* All ethtool functions called with rtnl_lock */
6192
6193 static int
6194 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6195 {
6196         struct bnx2 *bp = netdev_priv(dev);
6197         int support_serdes = 0, support_copper = 0;
6198
6199         cmd->supported = SUPPORTED_Autoneg;
6200         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6201                 support_serdes = 1;
6202                 support_copper = 1;
6203         } else if (bp->phy_port == PORT_FIBRE)
6204                 support_serdes = 1;
6205         else
6206                 support_copper = 1;
6207
6208         if (support_serdes) {
6209                 cmd->supported |= SUPPORTED_1000baseT_Full |
6210                         SUPPORTED_FIBRE;
6211                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6212                         cmd->supported |= SUPPORTED_2500baseX_Full;
6213
6214         }
6215         if (support_copper) {
6216                 cmd->supported |= SUPPORTED_10baseT_Half |
6217                         SUPPORTED_10baseT_Full |
6218                         SUPPORTED_100baseT_Half |
6219                         SUPPORTED_100baseT_Full |
6220                         SUPPORTED_1000baseT_Full |
6221                         SUPPORTED_TP;
6222
6223         }
6224
6225         spin_lock_bh(&bp->phy_lock);
6226         cmd->port = bp->phy_port;
6227         cmd->advertising = bp->advertising;
6228
6229         if (bp->autoneg & AUTONEG_SPEED) {
6230                 cmd->autoneg = AUTONEG_ENABLE;
6231         }
6232         else {
6233                 cmd->autoneg = AUTONEG_DISABLE;
6234         }
6235
6236         if (netif_carrier_ok(dev)) {
6237                 cmd->speed = bp->line_speed;
6238                 cmd->duplex = bp->duplex;
6239         }
6240         else {
6241                 cmd->speed = -1;
6242                 cmd->duplex = -1;
6243         }
6244         spin_unlock_bh(&bp->phy_lock);
6245
6246         cmd->transceiver = XCVR_INTERNAL;
6247         cmd->phy_address = bp->phy_addr;
6248
6249         return 0;
6250 }
6251
6252 static int
6253 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6254 {
6255         struct bnx2 *bp = netdev_priv(dev);
6256         u8 autoneg = bp->autoneg;
6257         u8 req_duplex = bp->req_duplex;
6258         u16 req_line_speed = bp->req_line_speed;
6259         u32 advertising = bp->advertising;
6260         int err = -EINVAL;
6261
6262         spin_lock_bh(&bp->phy_lock);
6263
6264         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6265                 goto err_out_unlock;
6266
6267         if (cmd->port != bp->phy_port &&
6268             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6269                 goto err_out_unlock;
6270
6271         /* If device is down, we can store the settings only if the user
6272          * is setting the currently active port.
6273          */
6274         if (!netif_running(dev) && cmd->port != bp->phy_port)
6275                 goto err_out_unlock;
6276
6277         if (cmd->autoneg == AUTONEG_ENABLE) {
6278                 autoneg |= AUTONEG_SPEED;
6279
6280                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6281
6282                 /* allow advertising 1 speed */
6283                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6284                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
6285                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
6286                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
6287
6288                         if (cmd->port == PORT_FIBRE)
6289                                 goto err_out_unlock;
6290
6291                         advertising = cmd->advertising;
6292
6293                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6294                         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6295                             (cmd->port == PORT_TP))
6296                                 goto err_out_unlock;
6297                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6298                         advertising = cmd->advertising;
6299                 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6300                         goto err_out_unlock;
6301                 else {
6302                         if (cmd->port == PORT_FIBRE)
6303                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6304                         else
6305                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6306                 }
6307                 advertising |= ADVERTISED_Autoneg;
6308         }
6309         else {
6310                 if (cmd->port == PORT_FIBRE) {
6311                         if ((cmd->speed != SPEED_1000 &&
6312                              cmd->speed != SPEED_2500) ||
6313                             (cmd->duplex != DUPLEX_FULL))
6314                                 goto err_out_unlock;
6315
6316                         if (cmd->speed == SPEED_2500 &&
6317                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6318                                 goto err_out_unlock;
6319                 }
6320                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6321                         goto err_out_unlock;
6322
6323                 autoneg &= ~AUTONEG_SPEED;
6324                 req_line_speed = cmd->speed;
6325                 req_duplex = cmd->duplex;
6326                 advertising = 0;
6327         }
6328
6329         bp->autoneg = autoneg;
6330         bp->advertising = advertising;
6331         bp->req_line_speed = req_line_speed;
6332         bp->req_duplex = req_duplex;
6333
6334         err = 0;
6335         /* If device is down, the new settings will be picked up when it is
6336          * brought up.
6337          */
6338         if (netif_running(dev))
6339                 err = bnx2_setup_phy(bp, cmd->port);
6340
6341 err_out_unlock:
6342         spin_unlock_bh(&bp->phy_lock);
6343
6344         return err;
6345 }
6346
6347 static void
6348 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6349 {
6350         struct bnx2 *bp = netdev_priv(dev);
6351
6352         strcpy(info->driver, DRV_MODULE_NAME);
6353         strcpy(info->version, DRV_MODULE_VERSION);
6354         strcpy(info->bus_info, pci_name(bp->pdev));
6355         strcpy(info->fw_version, bp->fw_version);
6356 }
6357
6358 #define BNX2_REGDUMP_LEN                (32 * 1024)
6359
6360 static int
6361 bnx2_get_regs_len(struct net_device *dev)
6362 {
6363         return BNX2_REGDUMP_LEN;
6364 }
6365
6366 static void
6367 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6368 {
6369         u32 *p = _p, i, offset;
6370         u8 *orig_p = _p;
6371         struct bnx2 *bp = netdev_priv(dev);
6372         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6373                                  0x0800, 0x0880, 0x0c00, 0x0c10,
6374                                  0x0c30, 0x0d08, 0x1000, 0x101c,
6375                                  0x1040, 0x1048, 0x1080, 0x10a4,
6376                                  0x1400, 0x1490, 0x1498, 0x14f0,
6377                                  0x1500, 0x155c, 0x1580, 0x15dc,
6378                                  0x1600, 0x1658, 0x1680, 0x16d8,
6379                                  0x1800, 0x1820, 0x1840, 0x1854,
6380                                  0x1880, 0x1894, 0x1900, 0x1984,
6381                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6382                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
6383                                  0x2000, 0x2030, 0x23c0, 0x2400,
6384                                  0x2800, 0x2820, 0x2830, 0x2850,
6385                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
6386                                  0x3c00, 0x3c94, 0x4000, 0x4010,
6387                                  0x4080, 0x4090, 0x43c0, 0x4458,
6388                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
6389                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
6390                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
6391                                  0x5fc0, 0x6000, 0x6400, 0x6428,
6392                                  0x6800, 0x6848, 0x684c, 0x6860,
6393                                  0x6888, 0x6910, 0x8000 };
6394
6395         regs->version = 0;
6396
6397         memset(p, 0, BNX2_REGDUMP_LEN);
6398
6399         if (!netif_running(bp->dev))
6400                 return;
6401
6402         i = 0;
6403         offset = reg_boundaries[0];
6404         p += offset;
6405         while (offset < BNX2_REGDUMP_LEN) {
6406                 *p++ = REG_RD(bp, offset);
6407                 offset += 4;
6408                 if (offset == reg_boundaries[i + 1]) {
6409                         offset = reg_boundaries[i + 2];
6410                         p = (u32 *) (orig_p + offset);
6411                         i += 2;
6412                 }
6413         }
6414 }
6415
6416 static void
6417 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6418 {
6419         struct bnx2 *bp = netdev_priv(dev);
6420
6421         if (bp->flags & BNX2_FLAG_NO_WOL) {
6422                 wol->supported = 0;
6423                 wol->wolopts = 0;
6424         }
6425         else {
6426                 wol->supported = WAKE_MAGIC;
6427                 if (bp->wol)
6428                         wol->wolopts = WAKE_MAGIC;
6429                 else
6430                         wol->wolopts = 0;
6431         }
6432         memset(&wol->sopass, 0, sizeof(wol->sopass));
6433 }
6434
6435 static int
6436 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6437 {
6438         struct bnx2 *bp = netdev_priv(dev);
6439
6440         if (wol->wolopts & ~WAKE_MAGIC)
6441                 return -EINVAL;
6442
6443         if (wol->wolopts & WAKE_MAGIC) {
6444                 if (bp->flags & BNX2_FLAG_NO_WOL)
6445                         return -EINVAL;
6446
6447                 bp->wol = 1;
6448         }
6449         else {
6450                 bp->wol = 0;
6451         }
6452         return 0;
6453 }
6454
6455 static int
6456 bnx2_nway_reset(struct net_device *dev)
6457 {
6458         struct bnx2 *bp = netdev_priv(dev);
6459         u32 bmcr;
6460
6461         if (!(bp->autoneg & AUTONEG_SPEED)) {
6462                 return -EINVAL;
6463         }
6464
6465         spin_lock_bh(&bp->phy_lock);
6466
6467         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6468                 int rc;
6469
6470                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6471                 spin_unlock_bh(&bp->phy_lock);
6472                 return rc;
6473         }
6474
6475         /* Force a link down visible on the other side */
6476         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6477                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6478                 spin_unlock_bh(&bp->phy_lock);
6479
6480                 msleep(20);
6481
6482                 spin_lock_bh(&bp->phy_lock);
6483
6484                 bp->current_interval = SERDES_AN_TIMEOUT;
6485                 bp->serdes_an_pending = 1;
6486                 mod_timer(&bp->timer, jiffies + bp->current_interval);
6487         }
6488
6489         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6490         bmcr &= ~BMCR_LOOPBACK;
6491         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6492
6493         spin_unlock_bh(&bp->phy_lock);
6494
6495         return 0;
6496 }
6497
6498 static int
6499 bnx2_get_eeprom_len(struct net_device *dev)
6500 {
6501         struct bnx2 *bp = netdev_priv(dev);
6502
6503         if (bp->flash_info == NULL)
6504                 return 0;
6505
6506         return (int) bp->flash_size;
6507 }
6508
6509 static int
6510 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6511                 u8 *eebuf)
6512 {
6513         struct bnx2 *bp = netdev_priv(dev);
6514         int rc;
6515
6516         /* parameters already validated in ethtool_get_eeprom */
6517
6518         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6519
6520         return rc;
6521 }
6522
6523 static int
6524 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6525                 u8 *eebuf)
6526 {
6527         struct bnx2 *bp = netdev_priv(dev);
6528         int rc;
6529
6530         /* parameters already validated in ethtool_set_eeprom */
6531
6532         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6533
6534         return rc;
6535 }
6536
6537 static int
6538 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6539 {
6540         struct bnx2 *bp = netdev_priv(dev);
6541
6542         memset(coal, 0, sizeof(struct ethtool_coalesce));
6543
6544         coal->rx_coalesce_usecs = bp->rx_ticks;
6545         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6546         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6547         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6548
6549         coal->tx_coalesce_usecs = bp->tx_ticks;
6550         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6551         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6552         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6553
6554         coal->stats_block_coalesce_usecs = bp->stats_ticks;
6555
6556         return 0;
6557 }
6558
6559 static int
6560 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6561 {
6562         struct bnx2 *bp = netdev_priv(dev);
6563
6564         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6565         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6566
6567         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6568         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6569
6570         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6571         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6572
6573         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6574         if (bp->rx_quick_cons_trip_int > 0xff)
6575                 bp->rx_quick_cons_trip_int = 0xff;
6576
6577         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6578         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6579
6580         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6581         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6582
6583         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6584         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6585
6586         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6587         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6588                 0xff;
6589
6590         bp->stats_ticks = coal->stats_block_coalesce_usecs;
6591         if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6592                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6593                         bp->stats_ticks = USEC_PER_SEC;
6594         }
6595         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6596                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6597         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6598
6599         if (netif_running(bp->dev)) {
6600                 bnx2_netif_stop(bp);
6601                 bnx2_init_nic(bp, 0);
6602                 bnx2_netif_start(bp);
6603         }
6604
6605         return 0;
6606 }
6607
6608 static void
6609 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6610 {
6611         struct bnx2 *bp = netdev_priv(dev);
6612
6613         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6614         ering->rx_mini_max_pending = 0;
6615         ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6616
6617         ering->rx_pending = bp->rx_ring_size;
6618         ering->rx_mini_pending = 0;
6619         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6620
6621         ering->tx_max_pending = MAX_TX_DESC_CNT;
6622         ering->tx_pending = bp->tx_ring_size;
6623 }
6624
6625 static int
6626 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6627 {
6628         if (netif_running(bp->dev)) {
6629                 bnx2_netif_stop(bp);
6630                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6631                 bnx2_free_skbs(bp);
6632                 bnx2_free_mem(bp);
6633         }
6634
6635         bnx2_set_rx_ring_size(bp, rx);
6636         bp->tx_ring_size = tx;
6637
6638         if (netif_running(bp->dev)) {
6639                 int rc;
6640
6641                 rc = bnx2_alloc_mem(bp);
6642                 if (rc)
6643                         return rc;
6644                 bnx2_init_nic(bp, 0);
6645                 bnx2_netif_start(bp);
6646         }
6647         return 0;
6648 }
6649
6650 static int
6651 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6652 {
6653         struct bnx2 *bp = netdev_priv(dev);
6654         int rc;
6655
6656         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6657                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6658                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6659
6660                 return -EINVAL;
6661         }
6662         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6663         return rc;
6664 }
6665
6666 static void
6667 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6668 {
6669         struct bnx2 *bp = netdev_priv(dev);
6670
6671         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6672         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6673         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6674 }
6675
6676 static int
6677 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6678 {
6679         struct bnx2 *bp = netdev_priv(dev);
6680
6681         bp->req_flow_ctrl = 0;
6682         if (epause->rx_pause)
6683                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6684         if (epause->tx_pause)
6685                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6686
6687         if (epause->autoneg) {
6688                 bp->autoneg |= AUTONEG_FLOW_CTRL;
6689         }
6690         else {
6691                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6692         }
6693
6694         spin_lock_bh(&bp->phy_lock);
6695
6696         bnx2_setup_phy(bp, bp->phy_port);
6697
6698         spin_unlock_bh(&bp->phy_lock);
6699
6700         return 0;
6701 }
6702
6703 static u32
6704 bnx2_get_rx_csum(struct net_device *dev)
6705 {
6706         struct bnx2 *bp = netdev_priv(dev);
6707
6708         return bp->rx_csum;
6709 }
6710
6711 static int
6712 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6713 {
6714         struct bnx2 *bp = netdev_priv(dev);
6715
6716         bp->rx_csum = data;
6717         return 0;
6718 }
6719
6720 static int
6721 bnx2_set_tso(struct net_device *dev, u32 data)
6722 {
6723         struct bnx2 *bp = netdev_priv(dev);
6724
6725         if (data) {
6726                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6727                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6728                         dev->features |= NETIF_F_TSO6;
6729         } else
6730                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6731                                    NETIF_F_TSO_ECN);
6732         return 0;
6733 }
6734
6735 #define BNX2_NUM_STATS 46
6736
6737 static struct {
6738         char string[ETH_GSTRING_LEN];
6739 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6740         { "rx_bytes" },
6741         { "rx_error_bytes" },
6742         { "tx_bytes" },
6743         { "tx_error_bytes" },
6744         { "rx_ucast_packets" },
6745         { "rx_mcast_packets" },
6746         { "rx_bcast_packets" },
6747         { "tx_ucast_packets" },
6748         { "tx_mcast_packets" },
6749         { "tx_bcast_packets" },
6750         { "tx_mac_errors" },
6751         { "tx_carrier_errors" },
6752         { "rx_crc_errors" },
6753         { "rx_align_errors" },
6754         { "tx_single_collisions" },
6755         { "tx_multi_collisions" },
6756         { "tx_deferred" },
6757         { "tx_excess_collisions" },
6758         { "tx_late_collisions" },
6759         { "tx_total_collisions" },
6760         { "rx_fragments" },
6761         { "rx_jabbers" },
6762         { "rx_undersize_packets" },
6763         { "rx_oversize_packets" },
6764         { "rx_64_byte_packets" },
6765         { "rx_65_to_127_byte_packets" },
6766         { "rx_128_to_255_byte_packets" },
6767         { "rx_256_to_511_byte_packets" },
6768         { "rx_512_to_1023_byte_packets" },
6769         { "rx_1024_to_1522_byte_packets" },
6770         { "rx_1523_to_9022_byte_packets" },
6771         { "tx_64_byte_packets" },
6772         { "tx_65_to_127_byte_packets" },
6773         { "tx_128_to_255_byte_packets" },
6774         { "tx_256_to_511_byte_packets" },
6775         { "tx_512_to_1023_byte_packets" },
6776         { "tx_1024_to_1522_byte_packets" },
6777         { "tx_1523_to_9022_byte_packets" },
6778         { "rx_xon_frames" },
6779         { "rx_xoff_frames" },
6780         { "tx_xon_frames" },
6781         { "tx_xoff_frames" },
6782         { "rx_mac_ctrl_frames" },
6783         { "rx_filtered_packets" },
6784         { "rx_discards" },
6785         { "rx_fw_discards" },
6786 };
6787
6788 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6789
6790 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6791     STATS_OFFSET32(stat_IfHCInOctets_hi),
6792     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6793     STATS_OFFSET32(stat_IfHCOutOctets_hi),
6794     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6795     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6796     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6797     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6798     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6799     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6800     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6801     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6802     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6803     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6804     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6805     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6806     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6807     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6808     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6809     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6810     STATS_OFFSET32(stat_EtherStatsCollisions),
6811     STATS_OFFSET32(stat_EtherStatsFragments),
6812     STATS_OFFSET32(stat_EtherStatsJabbers),
6813     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6814     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6815     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6816     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6817     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6818     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6819     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6820     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6821     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6822     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6823     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6824     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6825     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6826     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6827     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6828     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6829     STATS_OFFSET32(stat_XonPauseFramesReceived),
6830     STATS_OFFSET32(stat_XoffPauseFramesReceived),
6831     STATS_OFFSET32(stat_OutXonSent),
6832     STATS_OFFSET32(stat_OutXoffSent),
6833     STATS_OFFSET32(stat_MacControlFramesReceived),
6834     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6835     STATS_OFFSET32(stat_IfInMBUFDiscards),
6836     STATS_OFFSET32(stat_FwRxDrop),
6837 };
6838
6839 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6840  * skipped because of errata.
6841  */
6842 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6843         8,0,8,8,8,8,8,8,8,8,
6844         4,0,4,4,4,4,4,4,4,4,
6845         4,4,4,4,4,4,4,4,4,4,
6846         4,4,4,4,4,4,4,4,4,4,
6847         4,4,4,4,4,4,
6848 };
6849
6850 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6851         8,0,8,8,8,8,8,8,8,8,
6852         4,4,4,4,4,4,4,4,4,4,
6853         4,4,4,4,4,4,4,4,4,4,
6854         4,4,4,4,4,4,4,4,4,4,
6855         4,4,4,4,4,4,
6856 };
6857
6858 #define BNX2_NUM_TESTS 6
6859
6860 static struct {
6861         char string[ETH_GSTRING_LEN];
6862 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6863         { "register_test (offline)" },
6864         { "memory_test (offline)" },
6865         { "loopback_test (offline)" },
6866         { "nvram_test (online)" },
6867         { "interrupt_test (online)" },
6868         { "link_test (online)" },
6869 };
6870
6871 static int
6872 bnx2_get_sset_count(struct net_device *dev, int sset)
6873 {
6874         switch (sset) {
6875         case ETH_SS_TEST:
6876                 return BNX2_NUM_TESTS;
6877         case ETH_SS_STATS:
6878                 return BNX2_NUM_STATS;
6879         default:
6880                 return -EOPNOTSUPP;
6881         }
6882 }
6883
6884 static void
6885 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6886 {
6887         struct bnx2 *bp = netdev_priv(dev);
6888
6889         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6890         if (etest->flags & ETH_TEST_FL_OFFLINE) {
6891                 int i;
6892
6893                 bnx2_netif_stop(bp);
6894                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6895                 bnx2_free_skbs(bp);
6896
6897                 if (bnx2_test_registers(bp) != 0) {
6898                         buf[0] = 1;
6899                         etest->flags |= ETH_TEST_FL_FAILED;
6900                 }
6901                 if (bnx2_test_memory(bp) != 0) {
6902                         buf[1] = 1;
6903                         etest->flags |= ETH_TEST_FL_FAILED;
6904                 }
6905                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6906                         etest->flags |= ETH_TEST_FL_FAILED;
6907
6908                 if (!netif_running(bp->dev)) {
6909                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6910                 }
6911                 else {
6912                         bnx2_init_nic(bp, 1);
6913                         bnx2_netif_start(bp);
6914                 }
6915
6916                 /* wait for link up */
6917                 for (i = 0; i < 7; i++) {
6918                         if (bp->link_up)
6919                                 break;
6920                         msleep_interruptible(1000);
6921                 }
6922         }
6923
6924         if (bnx2_test_nvram(bp) != 0) {
6925                 buf[3] = 1;
6926                 etest->flags |= ETH_TEST_FL_FAILED;
6927         }
6928         if (bnx2_test_intr(bp) != 0) {
6929                 buf[4] = 1;
6930                 etest->flags |= ETH_TEST_FL_FAILED;
6931         }
6932
6933         if (bnx2_test_link(bp) != 0) {
6934                 buf[5] = 1;
6935                 etest->flags |= ETH_TEST_FL_FAILED;
6936
6937         }
6938 }
6939
6940 static void
6941 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6942 {
6943         switch (stringset) {
6944         case ETH_SS_STATS:
6945                 memcpy(buf, bnx2_stats_str_arr,
6946                         sizeof(bnx2_stats_str_arr));
6947                 break;
6948         case ETH_SS_TEST:
6949                 memcpy(buf, bnx2_tests_str_arr,
6950                         sizeof(bnx2_tests_str_arr));
6951                 break;
6952         }
6953 }
6954
6955 static void
6956 bnx2_get_ethtool_stats(struct net_device *dev,
6957                 struct ethtool_stats *stats, u64 *buf)
6958 {
6959         struct bnx2 *bp = netdev_priv(dev);
6960         int i;
6961         u32 *hw_stats = (u32 *) bp->stats_blk;
6962         u8 *stats_len_arr = NULL;
6963
6964         if (hw_stats == NULL) {
6965                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6966                 return;
6967         }
6968
6969         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6970             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6971             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6972             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6973                 stats_len_arr = bnx2_5706_stats_len_arr;
6974         else
6975                 stats_len_arr = bnx2_5708_stats_len_arr;
6976
6977         for (i = 0; i < BNX2_NUM_STATS; i++) {
6978                 if (stats_len_arr[i] == 0) {
6979                         /* skip this counter */
6980                         buf[i] = 0;
6981                         continue;
6982                 }
6983                 if (stats_len_arr[i] == 4) {
6984                         /* 4-byte counter */
6985                         buf[i] = (u64)
6986                                 *(hw_stats + bnx2_stats_offset_arr[i]);
6987                         continue;
6988                 }
6989                 /* 8-byte counter */
6990                 buf[i] = (((u64) *(hw_stats +
6991                                         bnx2_stats_offset_arr[i])) << 32) +
6992                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6993         }
6994 }
6995
6996 static int
6997 bnx2_phys_id(struct net_device *dev, u32 data)
6998 {
6999         struct bnx2 *bp = netdev_priv(dev);
7000         int i;
7001         u32 save;
7002
7003         if (data == 0)
7004                 data = 2;
7005
7006         save = REG_RD(bp, BNX2_MISC_CFG);
7007         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7008
7009         for (i = 0; i < (data * 2); i++) {
7010                 if ((i % 2) == 0) {
7011                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7012                 }
7013                 else {
7014                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7015                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
7016                                 BNX2_EMAC_LED_100MB_OVERRIDE |
7017                                 BNX2_EMAC_LED_10MB_OVERRIDE |
7018                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7019                                 BNX2_EMAC_LED_TRAFFIC);
7020                 }
7021                 msleep_interruptible(500);
7022                 if (signal_pending(current))
7023                         break;
7024         }
7025         REG_WR(bp, BNX2_EMAC_LED, 0);
7026         REG_WR(bp, BNX2_MISC_CFG, save);
7027         return 0;
7028 }
7029
7030 static int
7031 bnx2_set_tx_csum(struct net_device *dev, u32 data)
7032 {
7033         struct bnx2 *bp = netdev_priv(dev);
7034
7035         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7036                 return (ethtool_op_set_tx_ipv6_csum(dev, data));
7037         else
7038                 return (ethtool_op_set_tx_csum(dev, data));
7039 }
7040
7041 static const struct ethtool_ops bnx2_ethtool_ops = {
7042         .get_settings           = bnx2_get_settings,
7043         .set_settings           = bnx2_set_settings,
7044         .get_drvinfo            = bnx2_get_drvinfo,
7045         .get_regs_len           = bnx2_get_regs_len,
7046         .get_regs               = bnx2_get_regs,
7047         .get_wol                = bnx2_get_wol,
7048         .set_wol                = bnx2_set_wol,
7049         .nway_reset             = bnx2_nway_reset,
7050         .get_link               = ethtool_op_get_link,
7051         .get_eeprom_len         = bnx2_get_eeprom_len,
7052         .get_eeprom             = bnx2_get_eeprom,
7053         .set_eeprom             = bnx2_set_eeprom,
7054         .get_coalesce           = bnx2_get_coalesce,
7055         .set_coalesce           = bnx2_set_coalesce,
7056         .get_ringparam          = bnx2_get_ringparam,
7057         .set_ringparam          = bnx2_set_ringparam,
7058         .get_pauseparam         = bnx2_get_pauseparam,
7059         .set_pauseparam         = bnx2_set_pauseparam,
7060         .get_rx_csum            = bnx2_get_rx_csum,
7061         .set_rx_csum            = bnx2_set_rx_csum,
7062         .set_tx_csum            = bnx2_set_tx_csum,
7063         .set_sg                 = ethtool_op_set_sg,
7064         .set_tso                = bnx2_set_tso,
7065         .self_test              = bnx2_self_test,
7066         .get_strings            = bnx2_get_strings,
7067         .phys_id                = bnx2_phys_id,
7068         .get_ethtool_stats      = bnx2_get_ethtool_stats,
7069         .get_sset_count         = bnx2_get_sset_count,
7070 };
7071
7072 /* Called with rtnl_lock */
7073 static int
7074 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7075 {
7076         struct mii_ioctl_data *data = if_mii(ifr);
7077         struct bnx2 *bp = netdev_priv(dev);
7078         int err;
7079
7080         switch(cmd) {
7081         case SIOCGMIIPHY:
7082                 data->phy_id = bp->phy_addr;
7083
7084                 /* fallthru */
7085         case SIOCGMIIREG: {
7086                 u32 mii_regval;
7087
7088                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7089                         return -EOPNOTSUPP;
7090
7091                 if (!netif_running(dev))
7092                         return -EAGAIN;
7093
7094                 spin_lock_bh(&bp->phy_lock);
7095                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7096                 spin_unlock_bh(&bp->phy_lock);
7097
7098                 data->val_out = mii_regval;
7099
7100                 return err;
7101         }
7102
7103         case SIOCSMIIREG:
7104                 if (!capable(CAP_NET_ADMIN))
7105                         return -EPERM;
7106
7107                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7108                         return -EOPNOTSUPP;
7109
7110                 if (!netif_running(dev))
7111                         return -EAGAIN;
7112
7113                 spin_lock_bh(&bp->phy_lock);
7114                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7115                 spin_unlock_bh(&bp->phy_lock);
7116
7117                 return err;
7118
7119         default:
7120                 /* do nothing */
7121                 break;
7122         }
7123         return -EOPNOTSUPP;
7124 }
7125
7126 /* Called with rtnl_lock */
7127 static int
7128 bnx2_change_mac_addr(struct net_device *dev, void *p)
7129 {
7130         struct sockaddr *addr = p;
7131         struct bnx2 *bp = netdev_priv(dev);
7132
7133         if (!is_valid_ether_addr(addr->sa_data))
7134                 return -EINVAL;
7135
7136         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7137         if (netif_running(dev))
7138                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7139
7140         return 0;
7141 }
7142
7143 /* Called with rtnl_lock */
7144 static int
7145 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7146 {
7147         struct bnx2 *bp = netdev_priv(dev);
7148
7149         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7150                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7151                 return -EINVAL;
7152
7153         dev->mtu = new_mtu;
7154         return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7155 }
7156
7157 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7158 static void
7159 poll_bnx2(struct net_device *dev)
7160 {
7161         struct bnx2 *bp = netdev_priv(dev);
7162
7163         disable_irq(bp->pdev->irq);
7164         bnx2_interrupt(bp->pdev->irq, dev);
7165         enable_irq(bp->pdev->irq);
7166 }
7167 #endif
7168
7169 static void __devinit
7170 bnx2_get_5709_media(struct bnx2 *bp)
7171 {
7172         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7173         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7174         u32 strap;
7175
7176         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7177                 return;
7178         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7179                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7180                 return;
7181         }
7182
7183         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7184                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7185         else
7186                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7187
7188         if (PCI_FUNC(bp->pdev->devfn) == 0) {
7189                 switch (strap) {
7190                 case 0x4:
7191                 case 0x5:
7192                 case 0x6:
7193                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7194                         return;
7195                 }
7196         } else {
7197                 switch (strap) {
7198                 case 0x1:
7199                 case 0x2:
7200                 case 0x4:
7201                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7202                         return;
7203                 }
7204         }
7205 }
7206
7207 static void __devinit
7208 bnx2_get_pci_speed(struct bnx2 *bp)
7209 {
7210         u32 reg;
7211
7212         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7213         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7214                 u32 clkreg;
7215
7216                 bp->flags |= BNX2_FLAG_PCIX;
7217
7218                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7219
7220                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7221                 switch (clkreg) {
7222                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7223                         bp->bus_speed_mhz = 133;
7224                         break;
7225
7226                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7227                         bp->bus_speed_mhz = 100;
7228                         break;
7229
7230                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7231                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7232                         bp->bus_speed_mhz = 66;
7233                         break;
7234
7235                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7236                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7237                         bp->bus_speed_mhz = 50;
7238                         break;
7239
7240                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7241                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7242                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7243                         bp->bus_speed_mhz = 33;
7244                         break;
7245                 }
7246         }
7247         else {
7248                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7249                         bp->bus_speed_mhz = 66;
7250                 else
7251                         bp->bus_speed_mhz = 33;
7252         }
7253
7254         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7255                 bp->flags |= BNX2_FLAG_PCI_32BIT;
7256
7257 }
7258
7259 static int __devinit
7260 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7261 {
7262         struct bnx2 *bp;
7263         unsigned long mem_len;
7264         int rc, i, j;
7265         u32 reg;
7266         u64 dma_mask, persist_dma_mask;
7267
7268         SET_NETDEV_DEV(dev, &pdev->dev);
7269         bp = netdev_priv(dev);
7270
7271         bp->flags = 0;
7272         bp->phy_flags = 0;
7273
7274         /* enable device (incl. PCI PM wakeup), and bus-mastering */
7275         rc = pci_enable_device(pdev);
7276         if (rc) {
7277                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7278                 goto err_out;
7279         }
7280
7281         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7282                 dev_err(&pdev->dev,
7283                         "Cannot find PCI device base address, aborting.\n");
7284                 rc = -ENODEV;
7285                 goto err_out_disable;
7286         }
7287
7288         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7289         if (rc) {
7290                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7291                 goto err_out_disable;
7292         }
7293
7294         pci_set_master(pdev);
7295         pci_save_state(pdev);
7296
7297         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7298         if (bp->pm_cap == 0) {
7299                 dev_err(&pdev->dev,
7300                         "Cannot find power management capability, aborting.\n");
7301                 rc = -EIO;
7302                 goto err_out_release;
7303         }
7304
7305         bp->dev = dev;
7306         bp->pdev = pdev;
7307
7308         spin_lock_init(&bp->phy_lock);
7309         spin_lock_init(&bp->indirect_lock);
7310         INIT_WORK(&bp->reset_task, bnx2_reset_task);
7311
7312         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7313         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS);
7314         dev->mem_end = dev->mem_start + mem_len;
7315         dev->irq = pdev->irq;
7316
7317         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7318
7319         if (!bp->regview) {
7320                 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7321                 rc = -ENOMEM;
7322                 goto err_out_release;
7323         }
7324
7325         /* Configure byte swap and enable write to the reg_window registers.
7326          * Rely on CPU to do target byte swapping on big endian systems
7327          * The chip's target access swapping will not swap all accesses
7328          */
7329         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7330                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7331                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7332
7333         bnx2_set_power_state(bp, PCI_D0);
7334
7335         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7336
7337         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7338                 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7339                         dev_err(&pdev->dev,
7340                                 "Cannot find PCIE capability, aborting.\n");
7341                         rc = -EIO;
7342                         goto err_out_unmap;
7343                 }
7344                 bp->flags |= BNX2_FLAG_PCIE;
7345                 if (CHIP_REV(bp) == CHIP_REV_Ax)
7346                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7347         } else {
7348                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7349                 if (bp->pcix_cap == 0) {
7350                         dev_err(&pdev->dev,
7351                                 "Cannot find PCIX capability, aborting.\n");
7352                         rc = -EIO;
7353                         goto err_out_unmap;
7354                 }
7355         }
7356
7357         if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7358                 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7359                         bp->flags |= BNX2_FLAG_MSIX_CAP;
7360         }
7361
7362         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7363                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7364                         bp->flags |= BNX2_FLAG_MSI_CAP;
7365         }
7366
7367         /* 5708 cannot support DMA addresses > 40-bit.  */
7368         if (CHIP_NUM(bp) == CHIP_NUM_5708)
7369                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
7370         else
7371                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
7372
7373         /* Configure DMA attributes. */
7374         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7375                 dev->features |= NETIF_F_HIGHDMA;
7376                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7377                 if (rc) {
7378                         dev_err(&pdev->dev,
7379                                 "pci_set_consistent_dma_mask failed, aborting.\n");
7380                         goto err_out_unmap;
7381                 }
7382         } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
7383                 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7384                 goto err_out_unmap;
7385         }
7386
7387         if (!(bp->flags & BNX2_FLAG_PCIE))
7388                 bnx2_get_pci_speed(bp);
7389
7390         /* 5706A0 may falsely detect SERR and PERR. */
7391         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7392                 reg = REG_RD(bp, PCI_COMMAND);
7393                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7394                 REG_WR(bp, PCI_COMMAND, reg);
7395         }
7396         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7397                 !(bp->flags & BNX2_FLAG_PCIX)) {
7398
7399                 dev_err(&pdev->dev,
7400                         "5706 A1 can only be used in a PCIX bus, aborting.\n");
7401                 goto err_out_unmap;
7402         }
7403
7404         bnx2_init_nvram(bp);
7405
7406         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7407
7408         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7409             BNX2_SHM_HDR_SIGNATURE_SIG) {
7410                 u32 off = PCI_FUNC(pdev->devfn) << 2;
7411
7412                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7413         } else
7414                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7415
7416         /* Get the permanent MAC address.  First we need to make sure the
7417          * firmware is actually running.
7418          */
7419         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7420
7421         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7422             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7423                 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7424                 rc = -ENODEV;
7425                 goto err_out_unmap;
7426         }
7427
7428         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7429         for (i = 0, j = 0; i < 3; i++) {
7430                 u8 num, k, skip0;
7431
7432                 num = (u8) (reg >> (24 - (i * 8)));
7433                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7434                         if (num >= k || !skip0 || k == 1) {
7435                                 bp->fw_version[j++] = (num / k) + '0';
7436                                 skip0 = 0;
7437                         }
7438                 }
7439                 if (i != 2)
7440                         bp->fw_version[j++] = '.';
7441         }
7442         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
7443         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7444                 bp->wol = 1;
7445
7446         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7447                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
7448
7449                 for (i = 0; i < 30; i++) {
7450                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7451                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7452                                 break;
7453                         msleep(10);
7454                 }
7455         }
7456         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7457         reg &= BNX2_CONDITION_MFW_RUN_MASK;
7458         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7459             reg != BNX2_CONDITION_MFW_RUN_NONE) {
7460                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7461
7462                 bp->fw_version[j++] = ' ';
7463                 for (i = 0; i < 3; i++) {
7464                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
7465                         reg = swab32(reg);
7466                         memcpy(&bp->fw_version[j], &reg, 4);
7467                         j += 4;
7468                 }
7469         }
7470
7471         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
7472         bp->mac_addr[0] = (u8) (reg >> 8);
7473         bp->mac_addr[1] = (u8) reg;
7474
7475         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
7476         bp->mac_addr[2] = (u8) (reg >> 24);
7477         bp->mac_addr[3] = (u8) (reg >> 16);
7478         bp->mac_addr[4] = (u8) (reg >> 8);
7479         bp->mac_addr[5] = (u8) reg;
7480
7481         bp->tx_ring_size = MAX_TX_DESC_CNT;
7482         bnx2_set_rx_ring_size(bp, 255);
7483
7484         bp->rx_csum = 1;
7485
7486         bp->tx_quick_cons_trip_int = 20;
7487         bp->tx_quick_cons_trip = 20;
7488         bp->tx_ticks_int = 80;
7489         bp->tx_ticks = 80;
7490
7491         bp->rx_quick_cons_trip_int = 6;
7492         bp->rx_quick_cons_trip = 6;
7493         bp->rx_ticks_int = 18;
7494         bp->rx_ticks = 18;
7495
7496         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7497
7498         bp->timer_interval =  HZ;
7499         bp->current_interval =  HZ;
7500
7501         bp->phy_addr = 1;
7502
7503         /* Disable WOL support if we are running on a SERDES chip. */
7504         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7505                 bnx2_get_5709_media(bp);
7506         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7507                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7508
7509         bp->phy_port = PORT_TP;
7510         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7511                 bp->phy_port = PORT_FIBRE;
7512                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
7513                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7514                         bp->flags |= BNX2_FLAG_NO_WOL;
7515                         bp->wol = 0;
7516                 }
7517                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7518                         /* Don't do parallel detect on this board because of
7519                          * some board problems.  The link will not go down
7520                          * if we do parallel detect.
7521                          */
7522                         if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7523                             pdev->subsystem_device == 0x310c)
7524                                 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7525                 } else {
7526                         bp->phy_addr = 2;
7527                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7528                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7529                 }
7530         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7531                    CHIP_NUM(bp) == CHIP_NUM_5708)
7532                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
7533         else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7534                  (CHIP_REV(bp) == CHIP_REV_Ax ||
7535                   CHIP_REV(bp) == CHIP_REV_Bx))
7536                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7537
7538         bnx2_init_fw_cap(bp);
7539
7540         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7541             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7542             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
7543                 bp->flags |= BNX2_FLAG_NO_WOL;
7544                 bp->wol = 0;
7545         }
7546
7547         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7548                 bp->tx_quick_cons_trip_int =
7549                         bp->tx_quick_cons_trip;
7550                 bp->tx_ticks_int = bp->tx_ticks;
7551                 bp->rx_quick_cons_trip_int =
7552                         bp->rx_quick_cons_trip;
7553                 bp->rx_ticks_int = bp->rx_ticks;
7554                 bp->comp_prod_trip_int = bp->comp_prod_trip;
7555                 bp->com_ticks_int = bp->com_ticks;
7556                 bp->cmd_ticks_int = bp->cmd_ticks;
7557         }
7558
7559         /* Disable MSI on 5706 if AMD 8132 bridge is found.
7560          *
7561          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
7562          * with byte enables disabled on the unused 32-bit word.  This is legal
7563          * but causes problems on the AMD 8132 which will eventually stop
7564          * responding after a while.
7565          *
7566          * AMD believes this incompatibility is unique to the 5706, and
7567          * prefers to locally disable MSI rather than globally disabling it.
7568          */
7569         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7570                 struct pci_dev *amd_8132 = NULL;
7571
7572                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7573                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
7574                                                   amd_8132))) {
7575
7576                         if (amd_8132->revision >= 0x10 &&
7577                             amd_8132->revision <= 0x13) {
7578                                 disable_msi = 1;
7579                                 pci_dev_put(amd_8132);
7580                                 break;
7581                         }
7582                 }
7583         }
7584
7585         bnx2_set_default_link(bp);
7586         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7587
7588         init_timer(&bp->timer);
7589         bp->timer.expires = RUN_AT(bp->timer_interval);
7590         bp->timer.data = (unsigned long) bp;
7591         bp->timer.function = bnx2_timer;
7592
7593         return 0;
7594
7595 err_out_unmap:
7596         if (bp->regview) {
7597                 iounmap(bp->regview);
7598                 bp->regview = NULL;
7599         }
7600
7601 err_out_release:
7602         pci_release_regions(pdev);
7603
7604 err_out_disable:
7605         pci_disable_device(pdev);
7606         pci_set_drvdata(pdev, NULL);
7607
7608 err_out:
7609         return rc;
7610 }
7611
7612 static char * __devinit
7613 bnx2_bus_string(struct bnx2 *bp, char *str)
7614 {
7615         char *s = str;
7616
7617         if (bp->flags & BNX2_FLAG_PCIE) {
7618                 s += sprintf(s, "PCI Express");
7619         } else {
7620                 s += sprintf(s, "PCI");
7621                 if (bp->flags & BNX2_FLAG_PCIX)
7622                         s += sprintf(s, "-X");
7623                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
7624                         s += sprintf(s, " 32-bit");
7625                 else
7626                         s += sprintf(s, " 64-bit");
7627                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7628         }
7629         return str;
7630 }
7631
7632 static void __devinit
7633 bnx2_init_napi(struct bnx2 *bp)
7634 {
7635         int i;
7636
7637         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7638                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
7639                 int (*poll)(struct napi_struct *, int);
7640
7641                 if (i == 0)
7642                         poll = bnx2_poll;
7643                 else
7644                         poll = bnx2_poll_msix;
7645
7646                 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
7647                 bnapi->bp = bp;
7648         }
7649 }
7650
7651 static int __devinit
7652 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7653 {
7654         static int version_printed = 0;
7655         struct net_device *dev = NULL;
7656         struct bnx2 *bp;
7657         int rc;
7658         char str[40];
7659         DECLARE_MAC_BUF(mac);
7660
7661         if (version_printed++ == 0)
7662                 printk(KERN_INFO "%s", version);
7663
7664         /* dev zeroed in init_etherdev */
7665         dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
7666
7667         if (!dev)
7668                 return -ENOMEM;
7669
7670         rc = bnx2_init_board(pdev, dev);
7671         if (rc < 0) {
7672                 free_netdev(dev);
7673                 return rc;
7674         }
7675
7676         dev->open = bnx2_open;
7677         dev->hard_start_xmit = bnx2_start_xmit;
7678         dev->stop = bnx2_close;
7679         dev->get_stats = bnx2_get_stats;
7680         dev->set_rx_mode = bnx2_set_rx_mode;
7681         dev->do_ioctl = bnx2_ioctl;
7682         dev->set_mac_address = bnx2_change_mac_addr;
7683         dev->change_mtu = bnx2_change_mtu;
7684         dev->tx_timeout = bnx2_tx_timeout;
7685         dev->watchdog_timeo = TX_TIMEOUT;
7686 #ifdef BCM_VLAN
7687         dev->vlan_rx_register = bnx2_vlan_rx_register;
7688 #endif
7689         dev->ethtool_ops = &bnx2_ethtool_ops;
7690
7691         bp = netdev_priv(dev);
7692         bnx2_init_napi(bp);
7693
7694 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7695         dev->poll_controller = poll_bnx2;
7696 #endif
7697
7698         pci_set_drvdata(pdev, dev);
7699
7700         memcpy(dev->dev_addr, bp->mac_addr, 6);
7701         memcpy(dev->perm_addr, bp->mac_addr, 6);
7702         bp->name = board_info[ent->driver_data].name;
7703
7704         dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7705         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7706                 dev->features |= NETIF_F_IPV6_CSUM;
7707
7708 #ifdef BCM_VLAN
7709         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7710 #endif
7711         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7712         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7713                 dev->features |= NETIF_F_TSO6;
7714
7715         if ((rc = register_netdev(dev))) {
7716                 dev_err(&pdev->dev, "Cannot register net device\n");
7717                 if (bp->regview)
7718                         iounmap(bp->regview);
7719                 pci_release_regions(pdev);
7720                 pci_disable_device(pdev);
7721                 pci_set_drvdata(pdev, NULL);
7722                 free_netdev(dev);
7723                 return rc;
7724         }
7725
7726         printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7727                 "IRQ %d, node addr %s\n",
7728                 dev->name,
7729                 bp->name,
7730                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7731                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7732                 bnx2_bus_string(bp, str),
7733                 dev->base_addr,
7734                 bp->pdev->irq, print_mac(mac, dev->dev_addr));
7735
7736         return 0;
7737 }
7738
7739 static void __devexit
7740 bnx2_remove_one(struct pci_dev *pdev)
7741 {
7742         struct net_device *dev = pci_get_drvdata(pdev);
7743         struct bnx2 *bp = netdev_priv(dev);
7744
7745         flush_scheduled_work();
7746
7747         unregister_netdev(dev);
7748
7749         if (bp->regview)
7750                 iounmap(bp->regview);
7751
7752         free_netdev(dev);
7753         pci_release_regions(pdev);
7754         pci_disable_device(pdev);
7755         pci_set_drvdata(pdev, NULL);
7756 }
7757
7758 static int
7759 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7760 {
7761         struct net_device *dev = pci_get_drvdata(pdev);
7762         struct bnx2 *bp = netdev_priv(dev);
7763         u32 reset_code;
7764
7765         /* PCI register 4 needs to be saved whether netif_running() or not.
7766          * MSI address and data need to be saved if using MSI and
7767          * netif_running().
7768          */
7769         pci_save_state(pdev);
7770         if (!netif_running(dev))
7771                 return 0;
7772
7773         flush_scheduled_work();
7774         bnx2_netif_stop(bp);
7775         netif_device_detach(dev);
7776         del_timer_sync(&bp->timer);
7777         if (bp->flags & BNX2_FLAG_NO_WOL)
7778                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
7779         else if (bp->wol)
7780                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7781         else
7782                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7783         bnx2_reset_chip(bp, reset_code);
7784         bnx2_free_skbs(bp);
7785         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7786         return 0;
7787 }
7788
7789 static int
7790 bnx2_resume(struct pci_dev *pdev)
7791 {
7792         struct net_device *dev = pci_get_drvdata(pdev);
7793         struct bnx2 *bp = netdev_priv(dev);
7794
7795         pci_restore_state(pdev);
7796         if (!netif_running(dev))
7797                 return 0;
7798
7799         bnx2_set_power_state(bp, PCI_D0);
7800         netif_device_attach(dev);
7801         bnx2_init_nic(bp, 1);
7802         bnx2_netif_start(bp);
7803         return 0;
7804 }
7805
7806 /**
7807  * bnx2_io_error_detected - called when PCI error is detected
7808  * @pdev: Pointer to PCI device
7809  * @state: The current pci connection state
7810  *
7811  * This function is called after a PCI bus error affecting
7812  * this device has been detected.
7813  */
7814 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
7815                                                pci_channel_state_t state)
7816 {
7817         struct net_device *dev = pci_get_drvdata(pdev);
7818         struct bnx2 *bp = netdev_priv(dev);
7819
7820         rtnl_lock();
7821         netif_device_detach(dev);
7822
7823         if (netif_running(dev)) {
7824                 bnx2_netif_stop(bp);
7825                 del_timer_sync(&bp->timer);
7826                 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
7827         }
7828
7829         pci_disable_device(pdev);
7830         rtnl_unlock();
7831
7832         /* Request a slot slot reset. */
7833         return PCI_ERS_RESULT_NEED_RESET;
7834 }
7835
7836 /**
7837  * bnx2_io_slot_reset - called after the pci bus has been reset.
7838  * @pdev: Pointer to PCI device
7839  *
7840  * Restart the card from scratch, as if from a cold-boot.
7841  */
7842 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
7843 {
7844         struct net_device *dev = pci_get_drvdata(pdev);
7845         struct bnx2 *bp = netdev_priv(dev);
7846
7847         rtnl_lock();
7848         if (pci_enable_device(pdev)) {
7849                 dev_err(&pdev->dev,
7850                         "Cannot re-enable PCI device after reset.\n");
7851                 rtnl_unlock();
7852                 return PCI_ERS_RESULT_DISCONNECT;
7853         }
7854         pci_set_master(pdev);
7855         pci_restore_state(pdev);
7856
7857         if (netif_running(dev)) {
7858                 bnx2_set_power_state(bp, PCI_D0);
7859                 bnx2_init_nic(bp, 1);
7860         }
7861
7862         rtnl_unlock();
7863         return PCI_ERS_RESULT_RECOVERED;
7864 }
7865
7866 /**
7867  * bnx2_io_resume - called when traffic can start flowing again.
7868  * @pdev: Pointer to PCI device
7869  *
7870  * This callback is called when the error recovery driver tells us that
7871  * its OK to resume normal operation.
7872  */
7873 static void bnx2_io_resume(struct pci_dev *pdev)
7874 {
7875         struct net_device *dev = pci_get_drvdata(pdev);
7876         struct bnx2 *bp = netdev_priv(dev);
7877
7878         rtnl_lock();
7879         if (netif_running(dev))
7880                 bnx2_netif_start(bp);
7881
7882         netif_device_attach(dev);
7883         rtnl_unlock();
7884 }
7885
7886 static struct pci_error_handlers bnx2_err_handler = {
7887         .error_detected = bnx2_io_error_detected,
7888         .slot_reset     = bnx2_io_slot_reset,
7889         .resume         = bnx2_io_resume,
7890 };
7891
7892 static struct pci_driver bnx2_pci_driver = {
7893         .name           = DRV_MODULE_NAME,
7894         .id_table       = bnx2_pci_tbl,
7895         .probe          = bnx2_init_one,
7896         .remove         = __devexit_p(bnx2_remove_one),
7897         .suspend        = bnx2_suspend,
7898         .resume         = bnx2_resume,
7899         .err_handler    = &bnx2_err_handler,
7900 };
7901
7902 static int __init bnx2_init(void)
7903 {
7904         return pci_register_driver(&bnx2_pci_driver);
7905 }
7906
7907 static void __exit bnx2_cleanup(void)
7908 {
7909         pci_unregister_driver(&bnx2_pci_driver);
7910 }
7911
7912 module_init(bnx2_init);
7913 module_exit(bnx2_cleanup);
7914
7915
7916