Automatic merge of rsync://www.fr.zoreil.com/linux-2.6.git branch HEAD
[linux-2.6] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004, 2005 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12 #include "bnx2.h"
13 #include "bnx2_fw.h"
14
15 #define DRV_MODULE_NAME         "bnx2"
16 #define PFX DRV_MODULE_NAME     ": "
17 #define DRV_MODULE_VERSION      "1.2.19"
18 #define DRV_MODULE_RELDATE      "May 23, 2005"
19
20 #define RUN_AT(x) (jiffies + (x))
21
22 /* Time in jiffies before concluding the transmitter is hung. */
23 #define TX_TIMEOUT  (5*HZ)
24
25 static char version[] __devinitdata =
26         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
27
28 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
29 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706 Driver");
30 MODULE_LICENSE("GPL");
31 MODULE_VERSION(DRV_MODULE_VERSION);
32
33 static int disable_msi = 0;
34
35 module_param(disable_msi, int, 0);
36 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
37
38 typedef enum {
39         BCM5706 = 0,
40         NC370T,
41         NC370I,
42         BCM5706S,
43         NC370F,
44 } board_t;
45
46 /* indexed by board_t, above */
47 static struct {
48         char *name;
49 } board_info[] __devinitdata = {
50         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
51         { "HP NC370T Multifunction Gigabit Server Adapter" },
52         { "HP NC370i Multifunction Gigabit Server Adapter" },
53         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
54         { "HP NC370F Multifunction Gigabit Server Adapter" },
55         { 0 },
56         };
57
58 static struct pci_device_id bnx2_pci_tbl[] = {
59         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
60           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
61         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
62           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
63         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
64           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
65         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
66           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
67         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
68           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
69         { 0, }
70 };
71
72 static struct flash_spec flash_table[] =
73 {
74         /* Slow EEPROM */
75         {0x00000000, 0x40030380, 0x009f0081, 0xa184a053, 0xaf000400,
76          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
77          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
78          "EEPROM - slow"},
79         /* Fast EEPROM */
80         {0x02000000, 0x62008380, 0x009f0081, 0xa184a053, 0xaf000400,
81          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
82          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
83          "EEPROM - fast"},
84         /* ATMEL AT45DB011B (buffered flash) */
85         {0x02000003, 0x6e008173, 0x00570081, 0x68848353, 0xaf000400,
86          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
87          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
88          "Buffered flash"},
89         /* Saifun SA25F005 (non-buffered flash) */
90         /* strap, cfg1, & write1 need updates */
91         {0x01000003, 0x5f008081, 0x00050081, 0x03840253, 0xaf020406,
92          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
93          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
94          "Non-buffered flash (64kB)"},
95         /* Saifun SA25F010 (non-buffered flash) */
96         /* strap, cfg1, & write1 need updates */
97         {0x00000001, 0x47008081, 0x00050081, 0x03840253, 0xaf020406,
98          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
99          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
100          "Non-buffered flash (128kB)"},
101         /* Saifun SA25F020 (non-buffered flash) */
102         /* strap, cfg1, & write1 need updates */
103         {0x00000003, 0x4f008081, 0x00050081, 0x03840253, 0xaf020406,
104          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
105          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
106          "Non-buffered flash (256kB)"},
107 };
108
109 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
110
111 static u32
112 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
113 {
114         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
115         return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
116 }
117
118 static void
119 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
120 {
121         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
122         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
123 }
124
125 static void
126 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
127 {
128         offset += cid_addr;
129         REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
130         REG_WR(bp, BNX2_CTX_DATA, val);
131 }
132
133 static int
134 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
135 {
136         u32 val1;
137         int i, ret;
138
139         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
140                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
141                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
142
143                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
144                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
145
146                 udelay(40);
147         }
148
149         val1 = (bp->phy_addr << 21) | (reg << 16) |
150                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
151                 BNX2_EMAC_MDIO_COMM_START_BUSY;
152         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
153
154         for (i = 0; i < 50; i++) {
155                 udelay(10);
156
157                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
158                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
159                         udelay(5);
160
161                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
162                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
163
164                         break;
165                 }
166         }
167
168         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
169                 *val = 0x0;
170                 ret = -EBUSY;
171         }
172         else {
173                 *val = val1;
174                 ret = 0;
175         }
176
177         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
178                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
179                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
180
181                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
182                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
183
184                 udelay(40);
185         }
186
187         return ret;
188 }
189
190 static int
191 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
192 {
193         u32 val1;
194         int i, ret;
195
196         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
197                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
198                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
199
200                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
201                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
202
203                 udelay(40);
204         }
205
206         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
207                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
208                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
209         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
210     
211         for (i = 0; i < 50; i++) {
212                 udelay(10);
213
214                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
215                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
216                         udelay(5);
217                         break;
218                 }
219         }
220
221         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
222                 ret = -EBUSY;
223         else
224                 ret = 0;
225
226         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
227                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
228                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
229
230                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
231                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
232
233                 udelay(40);
234         }
235
236         return ret;
237 }
238
239 static void
240 bnx2_disable_int(struct bnx2 *bp)
241 {
242         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
243                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
244         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
245 }
246
247 static void
248 bnx2_enable_int(struct bnx2 *bp)
249 {
250         u32 val;
251
252         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
253                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
254
255         val = REG_RD(bp, BNX2_HC_COMMAND);
256         REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW);
257 }
258
259 static void
260 bnx2_disable_int_sync(struct bnx2 *bp)
261 {
262         atomic_inc(&bp->intr_sem);
263         bnx2_disable_int(bp);
264         synchronize_irq(bp->pdev->irq);
265 }
266
267 static void
268 bnx2_netif_stop(struct bnx2 *bp)
269 {
270         bnx2_disable_int_sync(bp);
271         if (netif_running(bp->dev)) {
272                 netif_poll_disable(bp->dev);
273                 netif_tx_disable(bp->dev);
274                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
275         }
276 }
277
278 static void
279 bnx2_netif_start(struct bnx2 *bp)
280 {
281         if (atomic_dec_and_test(&bp->intr_sem)) {
282                 if (netif_running(bp->dev)) {
283                         netif_wake_queue(bp->dev);
284                         netif_poll_enable(bp->dev);
285                         bnx2_enable_int(bp);
286                 }
287         }
288 }
289
290 static void
291 bnx2_free_mem(struct bnx2 *bp)
292 {
293         if (bp->stats_blk) {
294                 pci_free_consistent(bp->pdev, sizeof(struct statistics_block),
295                                     bp->stats_blk, bp->stats_blk_mapping);
296                 bp->stats_blk = NULL;
297         }
298         if (bp->status_blk) {
299                 pci_free_consistent(bp->pdev, sizeof(struct status_block),
300                                     bp->status_blk, bp->status_blk_mapping);
301                 bp->status_blk = NULL;
302         }
303         if (bp->tx_desc_ring) {
304                 pci_free_consistent(bp->pdev,
305                                     sizeof(struct tx_bd) * TX_DESC_CNT,
306                                     bp->tx_desc_ring, bp->tx_desc_mapping);
307                 bp->tx_desc_ring = NULL;
308         }
309         if (bp->tx_buf_ring) {
310                 kfree(bp->tx_buf_ring);
311                 bp->tx_buf_ring = NULL;
312         }
313         if (bp->rx_desc_ring) {
314                 pci_free_consistent(bp->pdev,
315                                     sizeof(struct rx_bd) * RX_DESC_CNT,
316                                     bp->rx_desc_ring, bp->rx_desc_mapping);
317                 bp->rx_desc_ring = NULL;
318         }
319         if (bp->rx_buf_ring) {
320                 kfree(bp->rx_buf_ring);
321                 bp->rx_buf_ring = NULL;
322         }
323 }
324
325 static int
326 bnx2_alloc_mem(struct bnx2 *bp)
327 {
328         bp->tx_buf_ring = kmalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
329                                      GFP_KERNEL);
330         if (bp->tx_buf_ring == NULL)
331                 return -ENOMEM;
332
333         memset(bp->tx_buf_ring, 0, sizeof(struct sw_bd) * TX_DESC_CNT);
334         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
335                                                 sizeof(struct tx_bd) *
336                                                 TX_DESC_CNT,
337                                                 &bp->tx_desc_mapping);
338         if (bp->tx_desc_ring == NULL)
339                 goto alloc_mem_err;
340
341         bp->rx_buf_ring = kmalloc(sizeof(struct sw_bd) * RX_DESC_CNT,
342                                      GFP_KERNEL);
343         if (bp->rx_buf_ring == NULL)
344                 goto alloc_mem_err;
345
346         memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT);
347         bp->rx_desc_ring = pci_alloc_consistent(bp->pdev,
348                                                 sizeof(struct rx_bd) *
349                                                 RX_DESC_CNT,
350                                                 &bp->rx_desc_mapping);
351         if (bp->rx_desc_ring == NULL)
352                 goto alloc_mem_err;
353
354         bp->status_blk = pci_alloc_consistent(bp->pdev,
355                                               sizeof(struct status_block),
356                                               &bp->status_blk_mapping);
357         if (bp->status_blk == NULL)
358                 goto alloc_mem_err;
359
360         memset(bp->status_blk, 0, sizeof(struct status_block));
361
362         bp->stats_blk = pci_alloc_consistent(bp->pdev,
363                                              sizeof(struct statistics_block),
364                                              &bp->stats_blk_mapping);
365         if (bp->stats_blk == NULL)
366                 goto alloc_mem_err;
367
368         memset(bp->stats_blk, 0, sizeof(struct statistics_block));
369
370         return 0;
371
372 alloc_mem_err:
373         bnx2_free_mem(bp);
374         return -ENOMEM;
375 }
376
377 static void
378 bnx2_report_link(struct bnx2 *bp)
379 {
380         if (bp->link_up) {
381                 netif_carrier_on(bp->dev);
382                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
383
384                 printk("%d Mbps ", bp->line_speed);
385
386                 if (bp->duplex == DUPLEX_FULL)
387                         printk("full duplex");
388                 else
389                         printk("half duplex");
390
391                 if (bp->flow_ctrl) {
392                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
393                                 printk(", receive ");
394                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
395                                         printk("& transmit ");
396                         }
397                         else {
398                                 printk(", transmit ");
399                         }
400                         printk("flow control ON");
401                 }
402                 printk("\n");
403         }
404         else {
405                 netif_carrier_off(bp->dev);
406                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
407         }
408 }
409
410 static void
411 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
412 {
413         u32 local_adv, remote_adv;
414
415         bp->flow_ctrl = 0;
416         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) != 
417                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
418
419                 if (bp->duplex == DUPLEX_FULL) {
420                         bp->flow_ctrl = bp->req_flow_ctrl;
421                 }
422                 return;
423         }
424
425         if (bp->duplex != DUPLEX_FULL) {
426                 return;
427         }
428
429         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
430         bnx2_read_phy(bp, MII_LPA, &remote_adv);
431
432         if (bp->phy_flags & PHY_SERDES_FLAG) {
433                 u32 new_local_adv = 0;
434                 u32 new_remote_adv = 0;
435
436                 if (local_adv & ADVERTISE_1000XPAUSE)
437                         new_local_adv |= ADVERTISE_PAUSE_CAP;
438                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
439                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
440                 if (remote_adv & ADVERTISE_1000XPAUSE)
441                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
442                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
443                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
444
445                 local_adv = new_local_adv;
446                 remote_adv = new_remote_adv;
447         }
448
449         /* See Table 28B-3 of 802.3ab-1999 spec. */
450         if (local_adv & ADVERTISE_PAUSE_CAP) {
451                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
452                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
453                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
454                         }
455                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
456                                 bp->flow_ctrl = FLOW_CTRL_RX;
457                         }
458                 }
459                 else {
460                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
461                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
462                         }
463                 }
464         }
465         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
466                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
467                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
468
469                         bp->flow_ctrl = FLOW_CTRL_TX;
470                 }
471         }
472 }
473
474 static int
475 bnx2_serdes_linkup(struct bnx2 *bp)
476 {
477         u32 bmcr, local_adv, remote_adv, common;
478
479         bp->link_up = 1;
480         bp->line_speed = SPEED_1000;
481
482         bnx2_read_phy(bp, MII_BMCR, &bmcr);
483         if (bmcr & BMCR_FULLDPLX) {
484                 bp->duplex = DUPLEX_FULL;
485         }
486         else {
487                 bp->duplex = DUPLEX_HALF;
488         }
489
490         if (!(bmcr & BMCR_ANENABLE)) {
491                 return 0;
492         }
493
494         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
495         bnx2_read_phy(bp, MII_LPA, &remote_adv);
496
497         common = local_adv & remote_adv;
498         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
499
500                 if (common & ADVERTISE_1000XFULL) {
501                         bp->duplex = DUPLEX_FULL;
502                 }
503                 else {
504                         bp->duplex = DUPLEX_HALF;
505                 }
506         }
507
508         return 0;
509 }
510
511 static int
512 bnx2_copper_linkup(struct bnx2 *bp)
513 {
514         u32 bmcr;
515
516         bnx2_read_phy(bp, MII_BMCR, &bmcr);
517         if (bmcr & BMCR_ANENABLE) {
518                 u32 local_adv, remote_adv, common;
519
520                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
521                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
522
523                 common = local_adv & (remote_adv >> 2);
524                 if (common & ADVERTISE_1000FULL) {
525                         bp->line_speed = SPEED_1000;
526                         bp->duplex = DUPLEX_FULL;
527                 }
528                 else if (common & ADVERTISE_1000HALF) {
529                         bp->line_speed = SPEED_1000;
530                         bp->duplex = DUPLEX_HALF;
531                 }
532                 else {
533                         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
534                         bnx2_read_phy(bp, MII_LPA, &remote_adv);
535
536                         common = local_adv & remote_adv;
537                         if (common & ADVERTISE_100FULL) {
538                                 bp->line_speed = SPEED_100;
539                                 bp->duplex = DUPLEX_FULL;
540                         }
541                         else if (common & ADVERTISE_100HALF) {
542                                 bp->line_speed = SPEED_100;
543                                 bp->duplex = DUPLEX_HALF;
544                         }
545                         else if (common & ADVERTISE_10FULL) {
546                                 bp->line_speed = SPEED_10;
547                                 bp->duplex = DUPLEX_FULL;
548                         }
549                         else if (common & ADVERTISE_10HALF) {
550                                 bp->line_speed = SPEED_10;
551                                 bp->duplex = DUPLEX_HALF;
552                         }
553                         else {
554                                 bp->line_speed = 0;
555                                 bp->link_up = 0;
556                         }
557                 }
558         }
559         else {
560                 if (bmcr & BMCR_SPEED100) {
561                         bp->line_speed = SPEED_100;
562                 }
563                 else {
564                         bp->line_speed = SPEED_10;
565                 }
566                 if (bmcr & BMCR_FULLDPLX) {
567                         bp->duplex = DUPLEX_FULL;
568                 }
569                 else {
570                         bp->duplex = DUPLEX_HALF;
571                 }
572         }
573
574         return 0;
575 }
576
577 static int
578 bnx2_set_mac_link(struct bnx2 *bp)
579 {
580         u32 val;
581
582         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
583         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
584                 (bp->duplex == DUPLEX_HALF)) {
585                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
586         }
587
588         /* Configure the EMAC mode register. */
589         val = REG_RD(bp, BNX2_EMAC_MODE);
590
591         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
592                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK);
593
594         if (bp->link_up) {
595                 if (bp->line_speed != SPEED_1000)
596                         val |= BNX2_EMAC_MODE_PORT_MII;
597                 else
598                         val |= BNX2_EMAC_MODE_PORT_GMII;
599         }
600         else {
601                 val |= BNX2_EMAC_MODE_PORT_GMII;
602         }
603
604         /* Set the MAC to operate in the appropriate duplex mode. */
605         if (bp->duplex == DUPLEX_HALF)
606                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
607         REG_WR(bp, BNX2_EMAC_MODE, val);
608
609         /* Enable/disable rx PAUSE. */
610         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
611
612         if (bp->flow_ctrl & FLOW_CTRL_RX)
613                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
614         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
615
616         /* Enable/disable tx PAUSE. */
617         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
618         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
619
620         if (bp->flow_ctrl & FLOW_CTRL_TX)
621                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
622         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
623
624         /* Acknowledge the interrupt. */
625         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
626
627         return 0;
628 }
629
630 static int
631 bnx2_set_link(struct bnx2 *bp)
632 {
633         u32 bmsr;
634         u8 link_up;
635
636         if (bp->loopback == MAC_LOOPBACK) {
637                 bp->link_up = 1;
638                 return 0;
639         }
640
641         link_up = bp->link_up;
642
643         bnx2_read_phy(bp, MII_BMSR, &bmsr);
644         bnx2_read_phy(bp, MII_BMSR, &bmsr);
645
646         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
647             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
648                 u32 val;
649
650                 val = REG_RD(bp, BNX2_EMAC_STATUS);
651                 if (val & BNX2_EMAC_STATUS_LINK)
652                         bmsr |= BMSR_LSTATUS;
653                 else
654                         bmsr &= ~BMSR_LSTATUS;
655         }
656
657         if (bmsr & BMSR_LSTATUS) {
658                 bp->link_up = 1;
659
660                 if (bp->phy_flags & PHY_SERDES_FLAG) {
661                         bnx2_serdes_linkup(bp);
662                 }
663                 else {
664                         bnx2_copper_linkup(bp);
665                 }
666                 bnx2_resolve_flow_ctrl(bp);
667         }
668         else {
669                 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
670                         (bp->autoneg & AUTONEG_SPEED)) {
671
672                         u32 bmcr;
673
674                         bnx2_read_phy(bp, MII_BMCR, &bmcr);
675                         if (!(bmcr & BMCR_ANENABLE)) {
676                                 bnx2_write_phy(bp, MII_BMCR, bmcr |
677                                         BMCR_ANENABLE);
678                         }
679                 }
680                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
681                 bp->link_up = 0;
682         }
683
684         if (bp->link_up != link_up) {
685                 bnx2_report_link(bp);
686         }
687
688         bnx2_set_mac_link(bp);
689
690         return 0;
691 }
692
693 static int
694 bnx2_reset_phy(struct bnx2 *bp)
695 {
696         int i;
697         u32 reg;
698
699         bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
700
701 #define PHY_RESET_MAX_WAIT 100
702         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
703                 udelay(10);
704
705                 bnx2_read_phy(bp, MII_BMCR, &reg);
706                 if (!(reg & BMCR_RESET)) {
707                         udelay(20);
708                         break;
709                 }
710         }
711         if (i == PHY_RESET_MAX_WAIT) {
712                 return -EBUSY;
713         }
714         return 0;
715 }
716
717 static u32
718 bnx2_phy_get_pause_adv(struct bnx2 *bp)
719 {
720         u32 adv = 0;
721
722         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
723                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
724
725                 if (bp->phy_flags & PHY_SERDES_FLAG) {
726                         adv = ADVERTISE_1000XPAUSE;
727                 }
728                 else {
729                         adv = ADVERTISE_PAUSE_CAP;
730                 }
731         }
732         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
733                 if (bp->phy_flags & PHY_SERDES_FLAG) {
734                         adv = ADVERTISE_1000XPSE_ASYM;
735                 }
736                 else {
737                         adv = ADVERTISE_PAUSE_ASYM;
738                 }
739         }
740         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
741                 if (bp->phy_flags & PHY_SERDES_FLAG) {
742                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
743                 }
744                 else {
745                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
746                 }
747         }
748         return adv;
749 }
750
751 static int
752 bnx2_setup_serdes_phy(struct bnx2 *bp)
753 {
754         u32 adv, bmcr;
755         u32 new_adv = 0;
756
757         if (!(bp->autoneg & AUTONEG_SPEED)) {
758                 u32 new_bmcr;
759
760                 bnx2_read_phy(bp, MII_BMCR, &bmcr);
761                 new_bmcr = bmcr & ~BMCR_ANENABLE;
762                 new_bmcr |= BMCR_SPEED1000;
763                 if (bp->req_duplex == DUPLEX_FULL) {
764                         new_bmcr |= BMCR_FULLDPLX;
765                 }
766                 else {
767                         new_bmcr &= ~BMCR_FULLDPLX;
768                 }
769                 if (new_bmcr != bmcr) {
770                         /* Force a link down visible on the other side */
771                         if (bp->link_up) {
772                                 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
773                                 adv &= ~(ADVERTISE_1000XFULL |
774                                         ADVERTISE_1000XHALF);
775                                 bnx2_write_phy(bp, MII_ADVERTISE, adv);
776                                 bnx2_write_phy(bp, MII_BMCR, bmcr |
777                                         BMCR_ANRESTART | BMCR_ANENABLE);
778
779                                 bp->link_up = 0;
780                                 netif_carrier_off(bp->dev);
781                         }
782                         bnx2_write_phy(bp, MII_BMCR, new_bmcr);
783                 }
784                 return 0;
785         }
786
787         if (bp->advertising & ADVERTISED_1000baseT_Full)
788                 new_adv |= ADVERTISE_1000XFULL;
789
790         new_adv |= bnx2_phy_get_pause_adv(bp);
791
792         bnx2_read_phy(bp, MII_ADVERTISE, &adv);
793         bnx2_read_phy(bp, MII_BMCR, &bmcr);
794
795         bp->serdes_an_pending = 0;
796         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
797                 /* Force a link down visible on the other side */
798                 if (bp->link_up) {
799                         int i;
800
801                         bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
802                         for (i = 0; i < 110; i++) {
803                                 udelay(100);
804                         }
805                 }
806
807                 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
808                 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
809                         BMCR_ANENABLE);
810                 bp->serdes_an_pending = SERDES_AN_TIMEOUT / bp->timer_interval;
811         }
812
813         return 0;
814 }
815
816 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
817         (ADVERTISED_1000baseT_Full)
818
819 #define ETHTOOL_ALL_COPPER_SPEED                                        \
820         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
821         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
822         ADVERTISED_1000baseT_Full)
823
824 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
825         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
826         
827 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
828
829 static int
830 bnx2_setup_copper_phy(struct bnx2 *bp)
831 {
832         u32 bmcr;
833         u32 new_bmcr;
834
835         bnx2_read_phy(bp, MII_BMCR, &bmcr);
836
837         if (bp->autoneg & AUTONEG_SPEED) {
838                 u32 adv_reg, adv1000_reg;
839                 u32 new_adv_reg = 0;
840                 u32 new_adv1000_reg = 0;
841
842                 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
843                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
844                         ADVERTISE_PAUSE_ASYM);
845
846                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
847                 adv1000_reg &= PHY_ALL_1000_SPEED;
848
849                 if (bp->advertising & ADVERTISED_10baseT_Half)
850                         new_adv_reg |= ADVERTISE_10HALF;
851                 if (bp->advertising & ADVERTISED_10baseT_Full)
852                         new_adv_reg |= ADVERTISE_10FULL;
853                 if (bp->advertising & ADVERTISED_100baseT_Half)
854                         new_adv_reg |= ADVERTISE_100HALF;
855                 if (bp->advertising & ADVERTISED_100baseT_Full)
856                         new_adv_reg |= ADVERTISE_100FULL;
857                 if (bp->advertising & ADVERTISED_1000baseT_Full)
858                         new_adv1000_reg |= ADVERTISE_1000FULL;
859                 
860                 new_adv_reg |= ADVERTISE_CSMA;
861
862                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
863
864                 if ((adv1000_reg != new_adv1000_reg) ||
865                         (adv_reg != new_adv_reg) ||
866                         ((bmcr & BMCR_ANENABLE) == 0)) {
867
868                         bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
869                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
870                         bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
871                                 BMCR_ANENABLE);
872                 }
873                 else if (bp->link_up) {
874                         /* Flow ctrl may have changed from auto to forced */
875                         /* or vice-versa. */
876
877                         bnx2_resolve_flow_ctrl(bp);
878                         bnx2_set_mac_link(bp);
879                 }
880                 return 0;
881         }
882
883         new_bmcr = 0;
884         if (bp->req_line_speed == SPEED_100) {
885                 new_bmcr |= BMCR_SPEED100;
886         }
887         if (bp->req_duplex == DUPLEX_FULL) {
888                 new_bmcr |= BMCR_FULLDPLX;
889         }
890         if (new_bmcr != bmcr) {
891                 u32 bmsr;
892                 int i = 0;
893
894                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
895                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
896                 
897                 if (bmsr & BMSR_LSTATUS) {
898                         /* Force link down */
899                         bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
900                         do {
901                                 udelay(100);
902                                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
903                                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
904                                 i++;
905                         } while ((bmsr & BMSR_LSTATUS) && (i < 620));
906                 }
907
908                 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
909
910                 /* Normally, the new speed is setup after the link has
911                  * gone down and up again. In some cases, link will not go
912                  * down so we need to set up the new speed here.
913                  */
914                 if (bmsr & BMSR_LSTATUS) {
915                         bp->line_speed = bp->req_line_speed;
916                         bp->duplex = bp->req_duplex;
917                         bnx2_resolve_flow_ctrl(bp);
918                         bnx2_set_mac_link(bp);
919                 }
920         }
921         return 0;
922 }
923
924 static int
925 bnx2_setup_phy(struct bnx2 *bp)
926 {
927         if (bp->loopback == MAC_LOOPBACK)
928                 return 0;
929
930         if (bp->phy_flags & PHY_SERDES_FLAG) {
931                 return (bnx2_setup_serdes_phy(bp));
932         }
933         else {
934                 return (bnx2_setup_copper_phy(bp));
935         }
936 }
937
938 static int
939 bnx2_init_serdes_phy(struct bnx2 *bp)
940 {
941         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
942
943         if (CHIP_NUM(bp) == CHIP_NUM_5706) {
944                 REG_WR(bp, BNX2_MISC_UNUSED0, 0x300);
945         }
946
947         if (bp->dev->mtu > 1500) {
948                 u32 val;
949
950                 /* Set extended packet length bit */
951                 bnx2_write_phy(bp, 0x18, 0x7);
952                 bnx2_read_phy(bp, 0x18, &val);
953                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
954
955                 bnx2_write_phy(bp, 0x1c, 0x6c00);
956                 bnx2_read_phy(bp, 0x1c, &val);
957                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
958         }
959         else {
960                 u32 val;
961
962                 bnx2_write_phy(bp, 0x18, 0x7);
963                 bnx2_read_phy(bp, 0x18, &val);
964                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
965
966                 bnx2_write_phy(bp, 0x1c, 0x6c00);
967                 bnx2_read_phy(bp, 0x1c, &val);
968                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
969         }
970
971         return 0;
972 }
973
974 static int
975 bnx2_init_copper_phy(struct bnx2 *bp)
976 {
977         bp->phy_flags |= PHY_CRC_FIX_FLAG;
978
979         if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
980                 bnx2_write_phy(bp, 0x18, 0x0c00);
981                 bnx2_write_phy(bp, 0x17, 0x000a);
982                 bnx2_write_phy(bp, 0x15, 0x310b);
983                 bnx2_write_phy(bp, 0x17, 0x201f);
984                 bnx2_write_phy(bp, 0x15, 0x9506);
985                 bnx2_write_phy(bp, 0x17, 0x401f);
986                 bnx2_write_phy(bp, 0x15, 0x14e2);
987                 bnx2_write_phy(bp, 0x18, 0x0400);
988         }
989
990         if (bp->dev->mtu > 1500) {
991                 u32 val;
992
993                 /* Set extended packet length bit */
994                 bnx2_write_phy(bp, 0x18, 0x7);
995                 bnx2_read_phy(bp, 0x18, &val);
996                 bnx2_write_phy(bp, 0x18, val | 0x4000);
997
998                 bnx2_read_phy(bp, 0x10, &val);
999                 bnx2_write_phy(bp, 0x10, val | 0x1);
1000         }
1001         else {
1002                 u32 val;
1003
1004                 bnx2_write_phy(bp, 0x18, 0x7);
1005                 bnx2_read_phy(bp, 0x18, &val);
1006                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1007
1008                 bnx2_read_phy(bp, 0x10, &val);
1009                 bnx2_write_phy(bp, 0x10, val & ~0x1);
1010         }
1011
1012         return 0;
1013 }
1014
1015
1016 static int
1017 bnx2_init_phy(struct bnx2 *bp)
1018 {
1019         u32 val;
1020         int rc = 0;
1021
1022         bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1023         bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1024
1025         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1026
1027         bnx2_reset_phy(bp);
1028
1029         bnx2_read_phy(bp, MII_PHYSID1, &val);
1030         bp->phy_id = val << 16;
1031         bnx2_read_phy(bp, MII_PHYSID2, &val);
1032         bp->phy_id |= val & 0xffff;
1033
1034         if (bp->phy_flags & PHY_SERDES_FLAG) {
1035                 rc = bnx2_init_serdes_phy(bp);
1036         }
1037         else {
1038                 rc = bnx2_init_copper_phy(bp);
1039         }
1040
1041         bnx2_setup_phy(bp);
1042
1043         return rc;
1044 }
1045
1046 static int
1047 bnx2_set_mac_loopback(struct bnx2 *bp)
1048 {
1049         u32 mac_mode;
1050
1051         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1052         mac_mode &= ~BNX2_EMAC_MODE_PORT;
1053         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1054         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1055         bp->link_up = 1;
1056         return 0;
1057 }
1058
1059 static int
1060 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data)
1061 {
1062         int i;
1063         u32 val;
1064
1065         if (bp->fw_timed_out)
1066                 return -EBUSY;
1067
1068         bp->fw_wr_seq++;
1069         msg_data |= bp->fw_wr_seq;
1070
1071         REG_WR_IND(bp, HOST_VIEW_SHMEM_BASE + BNX2_DRV_MB, msg_data);
1072
1073         /* wait for an acknowledgement. */
1074         for (i = 0; i < (FW_ACK_TIME_OUT_MS * 1000)/5; i++) {
1075                 udelay(5);
1076
1077                 val = REG_RD_IND(bp, HOST_VIEW_SHMEM_BASE + BNX2_FW_MB);
1078
1079                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1080                         break;
1081         }
1082
1083         /* If we timed out, inform the firmware that this is the case. */
1084         if (((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) &&
1085                 ((msg_data & BNX2_DRV_MSG_DATA) != BNX2_DRV_MSG_DATA_WAIT0)) {
1086
1087                 msg_data &= ~BNX2_DRV_MSG_CODE;
1088                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1089
1090                 REG_WR_IND(bp, HOST_VIEW_SHMEM_BASE + BNX2_DRV_MB, msg_data);
1091
1092                 bp->fw_timed_out = 1;
1093
1094                 return -EBUSY;
1095         }
1096
1097         return 0;
1098 }
1099
1100 static void
1101 bnx2_init_context(struct bnx2 *bp)
1102 {
1103         u32 vcid;
1104
1105         vcid = 96;
1106         while (vcid) {
1107                 u32 vcid_addr, pcid_addr, offset;
1108
1109                 vcid--;
1110
1111                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1112                         u32 new_vcid;
1113
1114                         vcid_addr = GET_PCID_ADDR(vcid);
1115                         if (vcid & 0x8) {
1116                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1117                         }
1118                         else {
1119                                 new_vcid = vcid;
1120                         }
1121                         pcid_addr = GET_PCID_ADDR(new_vcid);
1122                 }
1123                 else {
1124                         vcid_addr = GET_CID_ADDR(vcid);
1125                         pcid_addr = vcid_addr;
1126                 }
1127
1128                 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1129                 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1130
1131                 /* Zero out the context. */
1132                 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1133                         CTX_WR(bp, 0x00, offset, 0);
1134                 }
1135
1136                 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1137                 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1138         }
1139 }
1140
1141 static int
1142 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1143 {
1144         u16 *good_mbuf;
1145         u32 good_mbuf_cnt;
1146         u32 val;
1147
1148         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1149         if (good_mbuf == NULL) {
1150                 printk(KERN_ERR PFX "Failed to allocate memory in "
1151                                     "bnx2_alloc_bad_rbuf\n");
1152                 return -ENOMEM;
1153         }
1154
1155         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1156                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1157
1158         good_mbuf_cnt = 0;
1159
1160         /* Allocate a bunch of mbufs and save the good ones in an array. */
1161         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1162         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1163                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1164
1165                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1166
1167                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1168
1169                 /* The addresses with Bit 9 set are bad memory blocks. */
1170                 if (!(val & (1 << 9))) {
1171                         good_mbuf[good_mbuf_cnt] = (u16) val;
1172                         good_mbuf_cnt++;
1173                 }
1174
1175                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1176         }
1177
1178         /* Free the good ones back to the mbuf pool thus discarding
1179          * all the bad ones. */
1180         while (good_mbuf_cnt) {
1181                 good_mbuf_cnt--;
1182
1183                 val = good_mbuf[good_mbuf_cnt];
1184                 val = (val << 9) | val | 1;
1185
1186                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1187         }
1188         kfree(good_mbuf);
1189         return 0;
1190 }
1191
1192 static void
1193 bnx2_set_mac_addr(struct bnx2 *bp) 
1194 {
1195         u32 val;
1196         u8 *mac_addr = bp->dev->dev_addr;
1197
1198         val = (mac_addr[0] << 8) | mac_addr[1];
1199
1200         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1201
1202         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 
1203                 (mac_addr[4] << 8) | mac_addr[5];
1204
1205         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1206 }
1207
1208 static inline int
1209 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1210 {
1211         struct sk_buff *skb;
1212         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1213         dma_addr_t mapping;
1214         struct rx_bd *rxbd = &bp->rx_desc_ring[index];
1215         unsigned long align;
1216
1217         skb = dev_alloc_skb(bp->rx_buf_size);
1218         if (skb == NULL) {
1219                 return -ENOMEM;
1220         }
1221
1222         if (unlikely((align = (unsigned long) skb->data & 0x7))) {
1223                 skb_reserve(skb, 8 - align);
1224         }
1225
1226         skb->dev = bp->dev;
1227         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1228                 PCI_DMA_FROMDEVICE);
1229
1230         rx_buf->skb = skb;
1231         pci_unmap_addr_set(rx_buf, mapping, mapping);
1232
1233         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1234         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1235
1236         bp->rx_prod_bseq += bp->rx_buf_use_size;
1237
1238         return 0;
1239 }
1240
1241 static void
1242 bnx2_phy_int(struct bnx2 *bp)
1243 {
1244         u32 new_link_state, old_link_state;
1245
1246         new_link_state = bp->status_blk->status_attn_bits &
1247                 STATUS_ATTN_BITS_LINK_STATE;
1248         old_link_state = bp->status_blk->status_attn_bits_ack &
1249                 STATUS_ATTN_BITS_LINK_STATE;
1250         if (new_link_state != old_link_state) {
1251                 if (new_link_state) {
1252                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1253                                 STATUS_ATTN_BITS_LINK_STATE);
1254                 }
1255                 else {
1256                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1257                                 STATUS_ATTN_BITS_LINK_STATE);
1258                 }
1259                 bnx2_set_link(bp);
1260         }
1261 }
1262
1263 static void
1264 bnx2_tx_int(struct bnx2 *bp)
1265 {
1266         u16 hw_cons, sw_cons, sw_ring_cons;
1267         int tx_free_bd = 0;
1268
1269         hw_cons = bp->status_blk->status_tx_quick_consumer_index0;
1270         if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1271                 hw_cons++;
1272         }
1273         sw_cons = bp->tx_cons;
1274
1275         while (sw_cons != hw_cons) {
1276                 struct sw_bd *tx_buf;
1277                 struct sk_buff *skb;
1278                 int i, last;
1279
1280                 sw_ring_cons = TX_RING_IDX(sw_cons);
1281
1282                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1283                 skb = tx_buf->skb;
1284 #ifdef BCM_TSO 
1285                 /* partial BD completions possible with TSO packets */
1286                 if (skb_shinfo(skb)->tso_size) {
1287                         u16 last_idx, last_ring_idx;
1288
1289                         last_idx = sw_cons +
1290                                 skb_shinfo(skb)->nr_frags + 1;
1291                         last_ring_idx = sw_ring_cons +
1292                                 skb_shinfo(skb)->nr_frags + 1;
1293                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1294                                 last_idx++;
1295                         }
1296                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1297                                 break;
1298                         }
1299                 }
1300 #endif
1301                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1302                         skb_headlen(skb), PCI_DMA_TODEVICE);
1303
1304                 tx_buf->skb = NULL;
1305                 last = skb_shinfo(skb)->nr_frags;
1306
1307                 for (i = 0; i < last; i++) {
1308                         sw_cons = NEXT_TX_BD(sw_cons);
1309
1310                         pci_unmap_page(bp->pdev,
1311                                 pci_unmap_addr(
1312                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1313                                         mapping),
1314                                 skb_shinfo(skb)->frags[i].size,
1315                                 PCI_DMA_TODEVICE);
1316                 }
1317
1318                 sw_cons = NEXT_TX_BD(sw_cons);
1319
1320                 tx_free_bd += last + 1;
1321
1322                 dev_kfree_skb_irq(skb);
1323
1324                 hw_cons = bp->status_blk->status_tx_quick_consumer_index0;
1325                 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1326                         hw_cons++;
1327                 }
1328         }
1329
1330         atomic_add(tx_free_bd, &bp->tx_avail_bd);
1331
1332         if (unlikely(netif_queue_stopped(bp->dev))) {
1333                 unsigned long flags;
1334
1335                 spin_lock_irqsave(&bp->tx_lock, flags);
1336                 if ((netif_queue_stopped(bp->dev)) &&
1337                         (atomic_read(&bp->tx_avail_bd) > MAX_SKB_FRAGS)) {
1338
1339                         netif_wake_queue(bp->dev);
1340                 }
1341                 spin_unlock_irqrestore(&bp->tx_lock, flags);
1342         }
1343
1344         bp->tx_cons = sw_cons;
1345
1346 }
1347
1348 static inline void
1349 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1350         u16 cons, u16 prod)
1351 {
1352         struct sw_bd *cons_rx_buf = &bp->rx_buf_ring[cons];
1353         struct sw_bd *prod_rx_buf = &bp->rx_buf_ring[prod];
1354         struct rx_bd *cons_bd = &bp->rx_desc_ring[cons];
1355         struct rx_bd *prod_bd = &bp->rx_desc_ring[prod];
1356
1357         pci_dma_sync_single_for_device(bp->pdev,
1358                 pci_unmap_addr(cons_rx_buf, mapping),
1359                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1360
1361         prod_rx_buf->skb = cons_rx_buf->skb;
1362         pci_unmap_addr_set(prod_rx_buf, mapping,
1363                         pci_unmap_addr(cons_rx_buf, mapping));
1364
1365         memcpy(prod_bd, cons_bd, 8);
1366
1367         bp->rx_prod_bseq += bp->rx_buf_use_size;
1368
1369 }
1370
1371 static int
1372 bnx2_rx_int(struct bnx2 *bp, int budget)
1373 {
1374         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1375         struct l2_fhdr *rx_hdr;
1376         int rx_pkt = 0;
1377
1378         hw_cons = bp->status_blk->status_rx_quick_consumer_index0;
1379         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1380                 hw_cons++;
1381         }
1382         sw_cons = bp->rx_cons;
1383         sw_prod = bp->rx_prod;
1384
1385         /* Memory barrier necessary as speculative reads of the rx
1386          * buffer can be ahead of the index in the status block
1387          */
1388         rmb();
1389         while (sw_cons != hw_cons) {
1390                 unsigned int len;
1391                 u16 status;
1392                 struct sw_bd *rx_buf;
1393                 struct sk_buff *skb;
1394
1395                 sw_ring_cons = RX_RING_IDX(sw_cons);
1396                 sw_ring_prod = RX_RING_IDX(sw_prod);
1397
1398                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1399                 skb = rx_buf->skb;
1400                 pci_dma_sync_single_for_cpu(bp->pdev,
1401                         pci_unmap_addr(rx_buf, mapping),
1402                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1403
1404                 rx_hdr = (struct l2_fhdr *) skb->data;
1405                 len = rx_hdr->l2_fhdr_pkt_len - 4;
1406
1407                 if (rx_hdr->l2_fhdr_errors &
1408                         (L2_FHDR_ERRORS_BAD_CRC |
1409                         L2_FHDR_ERRORS_PHY_DECODE |
1410                         L2_FHDR_ERRORS_ALIGNMENT |
1411                         L2_FHDR_ERRORS_TOO_SHORT |
1412                         L2_FHDR_ERRORS_GIANT_FRAME)) {
1413
1414                         goto reuse_rx;
1415                 }
1416
1417                 /* Since we don't have a jumbo ring, copy small packets
1418                  * if mtu > 1500
1419                  */
1420                 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1421                         struct sk_buff *new_skb;
1422
1423                         new_skb = dev_alloc_skb(len + 2);
1424                         if (new_skb == NULL)
1425                                 goto reuse_rx;
1426
1427                         /* aligned copy */
1428                         memcpy(new_skb->data,
1429                                 skb->data + bp->rx_offset - 2,
1430                                 len + 2);
1431
1432                         skb_reserve(new_skb, 2);
1433                         skb_put(new_skb, len);
1434                         new_skb->dev = bp->dev;
1435
1436                         bnx2_reuse_rx_skb(bp, skb,
1437                                 sw_ring_cons, sw_ring_prod);
1438
1439                         skb = new_skb;
1440                 }
1441                 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1442                         pci_unmap_single(bp->pdev,
1443                                 pci_unmap_addr(rx_buf, mapping),
1444                                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1445
1446                         skb_reserve(skb, bp->rx_offset);
1447                         skb_put(skb, len);
1448                 }
1449                 else {
1450 reuse_rx:
1451                         bnx2_reuse_rx_skb(bp, skb,
1452                                 sw_ring_cons, sw_ring_prod);
1453                         goto next_rx;
1454                 }
1455
1456                 skb->protocol = eth_type_trans(skb, bp->dev);
1457
1458                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1459                         (htons(skb->protocol) != 0x8100)) {
1460
1461                         dev_kfree_skb_irq(skb);
1462                         goto next_rx;
1463
1464                 }
1465
1466                 status = rx_hdr->l2_fhdr_status;
1467                 skb->ip_summed = CHECKSUM_NONE;
1468                 if (bp->rx_csum &&
1469                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1470                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
1471
1472                         u16 cksum = rx_hdr->l2_fhdr_tcp_udp_xsum;
1473
1474                         if (cksum == 0xffff)
1475                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1476                 }
1477
1478 #ifdef BCM_VLAN
1479                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1480                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1481                                 rx_hdr->l2_fhdr_vlan_tag);
1482                 }
1483                 else
1484 #endif
1485                         netif_receive_skb(skb);
1486
1487                 bp->dev->last_rx = jiffies;
1488                 rx_pkt++;
1489
1490 next_rx:
1491                 rx_buf->skb = NULL;
1492
1493                 sw_cons = NEXT_RX_BD(sw_cons);
1494                 sw_prod = NEXT_RX_BD(sw_prod);
1495
1496                 if ((rx_pkt == budget))
1497                         break;
1498         }
1499         bp->rx_cons = sw_cons;
1500         bp->rx_prod = sw_prod;
1501
1502         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1503
1504         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1505
1506         mmiowb();
1507
1508         return rx_pkt;
1509
1510 }
1511
1512 /* MSI ISR - The only difference between this and the INTx ISR
1513  * is that the MSI interrupt is always serviced.
1514  */
1515 static irqreturn_t
1516 bnx2_msi(int irq, void *dev_instance, struct pt_regs *regs)
1517 {
1518         struct net_device *dev = dev_instance;
1519         struct bnx2 *bp = dev->priv;
1520
1521         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1522                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1523                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1524
1525         /* Return here if interrupt is disabled. */
1526         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1527                 return IRQ_RETVAL(1);
1528         }
1529
1530         if (netif_rx_schedule_prep(dev)) {
1531                 __netif_rx_schedule(dev);
1532         }
1533
1534         return IRQ_RETVAL(1);
1535 }
1536
1537 static irqreturn_t
1538 bnx2_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1539 {
1540         struct net_device *dev = dev_instance;
1541         struct bnx2 *bp = dev->priv;
1542
1543         /* When using INTx, it is possible for the interrupt to arrive
1544          * at the CPU before the status block posted prior to the
1545          * interrupt. Reading a register will flush the status block.
1546          * When using MSI, the MSI message will always complete after
1547          * the status block write.
1548          */
1549         if ((bp->status_blk->status_idx == bp->last_status_idx) ||
1550             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1551              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
1552                 return IRQ_RETVAL(0);
1553
1554         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1555                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1556                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1557
1558         /* Return here if interrupt is shared and is disabled. */
1559         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1560                 return IRQ_RETVAL(1);
1561         }
1562
1563         if (netif_rx_schedule_prep(dev)) {
1564                 __netif_rx_schedule(dev);
1565         }
1566
1567         return IRQ_RETVAL(1);
1568 }
1569
1570 static int
1571 bnx2_poll(struct net_device *dev, int *budget)
1572 {
1573         struct bnx2 *bp = dev->priv;
1574         int rx_done = 1;
1575
1576         bp->last_status_idx = bp->status_blk->status_idx;
1577
1578         rmb();
1579         if ((bp->status_blk->status_attn_bits &
1580                 STATUS_ATTN_BITS_LINK_STATE) !=
1581                 (bp->status_blk->status_attn_bits_ack &
1582                 STATUS_ATTN_BITS_LINK_STATE)) {
1583
1584                 unsigned long flags;
1585
1586                 spin_lock_irqsave(&bp->phy_lock, flags);
1587                 bnx2_phy_int(bp);
1588                 spin_unlock_irqrestore(&bp->phy_lock, flags);
1589         }
1590
1591         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_cons) {
1592                 bnx2_tx_int(bp);
1593         }
1594
1595         if (bp->status_blk->status_rx_quick_consumer_index0 != bp->rx_cons) {
1596                 int orig_budget = *budget;
1597                 int work_done;
1598
1599                 if (orig_budget > dev->quota)
1600                         orig_budget = dev->quota;
1601                 
1602                 work_done = bnx2_rx_int(bp, orig_budget);
1603                 *budget -= work_done;
1604                 dev->quota -= work_done;
1605                 
1606                 if (work_done >= orig_budget) {
1607                         rx_done = 0;
1608                 }
1609         }
1610         
1611         if (rx_done) {
1612                 netif_rx_complete(dev);
1613                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1614                         BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
1615                         bp->last_status_idx);
1616                 return 0;
1617         }
1618
1619         return 1;
1620 }
1621
1622 /* Called with rtnl_lock from vlan functions and also dev->xmit_lock
1623  * from set_multicast.
1624  */
1625 static void
1626 bnx2_set_rx_mode(struct net_device *dev)
1627 {
1628         struct bnx2 *bp = dev->priv;
1629         u32 rx_mode, sort_mode;
1630         int i;
1631         unsigned long flags;
1632
1633         spin_lock_irqsave(&bp->phy_lock, flags);
1634
1635         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
1636                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
1637         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
1638 #ifdef BCM_VLAN
1639         if (!bp->vlgrp) {
1640                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
1641         }
1642 #else
1643         rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
1644 #endif
1645         if (dev->flags & IFF_PROMISC) {
1646                 /* Promiscuous mode. */
1647                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
1648                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN;
1649         }
1650         else if (dev->flags & IFF_ALLMULTI) {
1651                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
1652                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
1653                                0xffffffff);
1654                 }
1655                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
1656         }
1657         else {
1658                 /* Accept one or more multicast(s). */
1659                 struct dev_mc_list *mclist;
1660                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
1661                 u32 regidx;
1662                 u32 bit;
1663                 u32 crc;
1664
1665                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
1666
1667                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1668                      i++, mclist = mclist->next) {
1669
1670                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
1671                         bit = crc & 0xff;
1672                         regidx = (bit & 0xe0) >> 5;
1673                         bit &= 0x1f;
1674                         mc_filter[regidx] |= (1 << bit);
1675                 }
1676
1677                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
1678                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
1679                                mc_filter[i]);
1680                 }
1681
1682                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
1683         }
1684
1685         if (rx_mode != bp->rx_mode) {
1686                 bp->rx_mode = rx_mode;
1687                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
1688         }
1689
1690         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
1691         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
1692         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
1693
1694         spin_unlock_irqrestore(&bp->phy_lock, flags);
1695 }
1696
1697 static void
1698 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
1699         u32 rv2p_proc)
1700 {
1701         int i;
1702         u32 val;
1703
1704
1705         for (i = 0; i < rv2p_code_len; i += 8) {
1706                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, *rv2p_code);
1707                 rv2p_code++;
1708                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, *rv2p_code);
1709                 rv2p_code++;
1710
1711                 if (rv2p_proc == RV2P_PROC1) {
1712                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
1713                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
1714                 }
1715                 else {
1716                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
1717                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
1718                 }
1719         }
1720
1721         /* Reset the processor, un-stall is done later. */
1722         if (rv2p_proc == RV2P_PROC1) {
1723                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
1724         }
1725         else {
1726                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
1727         }
1728 }
1729
1730 static void
1731 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
1732 {
1733         u32 offset;
1734         u32 val;
1735
1736         /* Halt the CPU. */
1737         val = REG_RD_IND(bp, cpu_reg->mode);
1738         val |= cpu_reg->mode_value_halt;
1739         REG_WR_IND(bp, cpu_reg->mode, val);
1740         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
1741
1742         /* Load the Text area. */
1743         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
1744         if (fw->text) {
1745                 int j;
1746
1747                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
1748                         REG_WR_IND(bp, offset, fw->text[j]);
1749                 }
1750         }
1751
1752         /* Load the Data area. */
1753         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
1754         if (fw->data) {
1755                 int j;
1756
1757                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
1758                         REG_WR_IND(bp, offset, fw->data[j]);
1759                 }
1760         }
1761
1762         /* Load the SBSS area. */
1763         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
1764         if (fw->sbss) {
1765                 int j;
1766
1767                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
1768                         REG_WR_IND(bp, offset, fw->sbss[j]);
1769                 }
1770         }
1771
1772         /* Load the BSS area. */
1773         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
1774         if (fw->bss) {
1775                 int j;
1776
1777                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
1778                         REG_WR_IND(bp, offset, fw->bss[j]);
1779                 }
1780         }
1781
1782         /* Load the Read-Only area. */
1783         offset = cpu_reg->spad_base +
1784                 (fw->rodata_addr - cpu_reg->mips_view_base);
1785         if (fw->rodata) {
1786                 int j;
1787
1788                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
1789                         REG_WR_IND(bp, offset, fw->rodata[j]);
1790                 }
1791         }
1792
1793         /* Clear the pre-fetch instruction. */
1794         REG_WR_IND(bp, cpu_reg->inst, 0);
1795         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
1796
1797         /* Start the CPU. */
1798         val = REG_RD_IND(bp, cpu_reg->mode);
1799         val &= ~cpu_reg->mode_value_halt;
1800         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
1801         REG_WR_IND(bp, cpu_reg->mode, val);
1802 }
1803
1804 static void
1805 bnx2_init_cpus(struct bnx2 *bp)
1806 {
1807         struct cpu_reg cpu_reg;
1808         struct fw_info fw;
1809
1810         /* Initialize the RV2P processor. */
1811         load_rv2p_fw(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), RV2P_PROC1);
1812         load_rv2p_fw(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), RV2P_PROC2);
1813
1814         /* Initialize the RX Processor. */
1815         cpu_reg.mode = BNX2_RXP_CPU_MODE;
1816         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
1817         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
1818         cpu_reg.state = BNX2_RXP_CPU_STATE;
1819         cpu_reg.state_value_clear = 0xffffff;
1820         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
1821         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
1822         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
1823         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
1824         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
1825         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
1826         cpu_reg.mips_view_base = 0x8000000;
1827     
1828         fw.ver_major = bnx2_RXP_b06FwReleaseMajor;
1829         fw.ver_minor = bnx2_RXP_b06FwReleaseMinor;
1830         fw.ver_fix = bnx2_RXP_b06FwReleaseFix;
1831         fw.start_addr = bnx2_RXP_b06FwStartAddr;
1832
1833         fw.text_addr = bnx2_RXP_b06FwTextAddr;
1834         fw.text_len = bnx2_RXP_b06FwTextLen;
1835         fw.text_index = 0;
1836         fw.text = bnx2_RXP_b06FwText;
1837
1838         fw.data_addr = bnx2_RXP_b06FwDataAddr;
1839         fw.data_len = bnx2_RXP_b06FwDataLen;
1840         fw.data_index = 0;
1841         fw.data = bnx2_RXP_b06FwData;
1842
1843         fw.sbss_addr = bnx2_RXP_b06FwSbssAddr;
1844         fw.sbss_len = bnx2_RXP_b06FwSbssLen;
1845         fw.sbss_index = 0;
1846         fw.sbss = bnx2_RXP_b06FwSbss;
1847
1848         fw.bss_addr = bnx2_RXP_b06FwBssAddr;
1849         fw.bss_len = bnx2_RXP_b06FwBssLen;
1850         fw.bss_index = 0;
1851         fw.bss = bnx2_RXP_b06FwBss;
1852
1853         fw.rodata_addr = bnx2_RXP_b06FwRodataAddr;
1854         fw.rodata_len = bnx2_RXP_b06FwRodataLen;
1855         fw.rodata_index = 0;
1856         fw.rodata = bnx2_RXP_b06FwRodata;
1857
1858         load_cpu_fw(bp, &cpu_reg, &fw);
1859
1860         /* Initialize the TX Processor. */
1861         cpu_reg.mode = BNX2_TXP_CPU_MODE;
1862         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
1863         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
1864         cpu_reg.state = BNX2_TXP_CPU_STATE;
1865         cpu_reg.state_value_clear = 0xffffff;
1866         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
1867         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
1868         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
1869         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
1870         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
1871         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
1872         cpu_reg.mips_view_base = 0x8000000;
1873     
1874         fw.ver_major = bnx2_TXP_b06FwReleaseMajor;
1875         fw.ver_minor = bnx2_TXP_b06FwReleaseMinor;
1876         fw.ver_fix = bnx2_TXP_b06FwReleaseFix;
1877         fw.start_addr = bnx2_TXP_b06FwStartAddr;
1878
1879         fw.text_addr = bnx2_TXP_b06FwTextAddr;
1880         fw.text_len = bnx2_TXP_b06FwTextLen;
1881         fw.text_index = 0;
1882         fw.text = bnx2_TXP_b06FwText;
1883
1884         fw.data_addr = bnx2_TXP_b06FwDataAddr;
1885         fw.data_len = bnx2_TXP_b06FwDataLen;
1886         fw.data_index = 0;
1887         fw.data = bnx2_TXP_b06FwData;
1888
1889         fw.sbss_addr = bnx2_TXP_b06FwSbssAddr;
1890         fw.sbss_len = bnx2_TXP_b06FwSbssLen;
1891         fw.sbss_index = 0;
1892         fw.sbss = bnx2_TXP_b06FwSbss;
1893
1894         fw.bss_addr = bnx2_TXP_b06FwBssAddr;
1895         fw.bss_len = bnx2_TXP_b06FwBssLen;
1896         fw.bss_index = 0;
1897         fw.bss = bnx2_TXP_b06FwBss;
1898
1899         fw.rodata_addr = bnx2_TXP_b06FwRodataAddr;
1900         fw.rodata_len = bnx2_TXP_b06FwRodataLen;
1901         fw.rodata_index = 0;
1902         fw.rodata = bnx2_TXP_b06FwRodata;
1903
1904         load_cpu_fw(bp, &cpu_reg, &fw);
1905
1906         /* Initialize the TX Patch-up Processor. */
1907         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
1908         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
1909         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
1910         cpu_reg.state = BNX2_TPAT_CPU_STATE;
1911         cpu_reg.state_value_clear = 0xffffff;
1912         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
1913         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
1914         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
1915         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
1916         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
1917         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
1918         cpu_reg.mips_view_base = 0x8000000;
1919     
1920         fw.ver_major = bnx2_TPAT_b06FwReleaseMajor;
1921         fw.ver_minor = bnx2_TPAT_b06FwReleaseMinor;
1922         fw.ver_fix = bnx2_TPAT_b06FwReleaseFix;
1923         fw.start_addr = bnx2_TPAT_b06FwStartAddr;
1924
1925         fw.text_addr = bnx2_TPAT_b06FwTextAddr;
1926         fw.text_len = bnx2_TPAT_b06FwTextLen;
1927         fw.text_index = 0;
1928         fw.text = bnx2_TPAT_b06FwText;
1929
1930         fw.data_addr = bnx2_TPAT_b06FwDataAddr;
1931         fw.data_len = bnx2_TPAT_b06FwDataLen;
1932         fw.data_index = 0;
1933         fw.data = bnx2_TPAT_b06FwData;
1934
1935         fw.sbss_addr = bnx2_TPAT_b06FwSbssAddr;
1936         fw.sbss_len = bnx2_TPAT_b06FwSbssLen;
1937         fw.sbss_index = 0;
1938         fw.sbss = bnx2_TPAT_b06FwSbss;
1939
1940         fw.bss_addr = bnx2_TPAT_b06FwBssAddr;
1941         fw.bss_len = bnx2_TPAT_b06FwBssLen;
1942         fw.bss_index = 0;
1943         fw.bss = bnx2_TPAT_b06FwBss;
1944
1945         fw.rodata_addr = bnx2_TPAT_b06FwRodataAddr;
1946         fw.rodata_len = bnx2_TPAT_b06FwRodataLen;
1947         fw.rodata_index = 0;
1948         fw.rodata = bnx2_TPAT_b06FwRodata;
1949
1950         load_cpu_fw(bp, &cpu_reg, &fw);
1951
1952         /* Initialize the Completion Processor. */
1953         cpu_reg.mode = BNX2_COM_CPU_MODE;
1954         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
1955         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
1956         cpu_reg.state = BNX2_COM_CPU_STATE;
1957         cpu_reg.state_value_clear = 0xffffff;
1958         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
1959         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
1960         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
1961         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
1962         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
1963         cpu_reg.spad_base = BNX2_COM_SCRATCH;
1964         cpu_reg.mips_view_base = 0x8000000;
1965     
1966         fw.ver_major = bnx2_COM_b06FwReleaseMajor;
1967         fw.ver_minor = bnx2_COM_b06FwReleaseMinor;
1968         fw.ver_fix = bnx2_COM_b06FwReleaseFix;
1969         fw.start_addr = bnx2_COM_b06FwStartAddr;
1970
1971         fw.text_addr = bnx2_COM_b06FwTextAddr;
1972         fw.text_len = bnx2_COM_b06FwTextLen;
1973         fw.text_index = 0;
1974         fw.text = bnx2_COM_b06FwText;
1975
1976         fw.data_addr = bnx2_COM_b06FwDataAddr;
1977         fw.data_len = bnx2_COM_b06FwDataLen;
1978         fw.data_index = 0;
1979         fw.data = bnx2_COM_b06FwData;
1980
1981         fw.sbss_addr = bnx2_COM_b06FwSbssAddr;
1982         fw.sbss_len = bnx2_COM_b06FwSbssLen;
1983         fw.sbss_index = 0;
1984         fw.sbss = bnx2_COM_b06FwSbss;
1985
1986         fw.bss_addr = bnx2_COM_b06FwBssAddr;
1987         fw.bss_len = bnx2_COM_b06FwBssLen;
1988         fw.bss_index = 0;
1989         fw.bss = bnx2_COM_b06FwBss;
1990
1991         fw.rodata_addr = bnx2_COM_b06FwRodataAddr;
1992         fw.rodata_len = bnx2_COM_b06FwRodataLen;
1993         fw.rodata_index = 0;
1994         fw.rodata = bnx2_COM_b06FwRodata;
1995
1996         load_cpu_fw(bp, &cpu_reg, &fw);
1997
1998 }
1999
2000 static int
2001 bnx2_set_power_state(struct bnx2 *bp, int state)
2002 {
2003         u16 pmcsr;
2004
2005         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2006
2007         switch (state) {
2008         case 0: {
2009                 u32 val;
2010
2011                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2012                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2013                         PCI_PM_CTRL_PME_STATUS);
2014
2015                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2016                         /* delay required during transition out of D3hot */
2017                         msleep(20);
2018
2019                 val = REG_RD(bp, BNX2_EMAC_MODE);
2020                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2021                 val &= ~BNX2_EMAC_MODE_MPKT;
2022                 REG_WR(bp, BNX2_EMAC_MODE, val);
2023
2024                 val = REG_RD(bp, BNX2_RPM_CONFIG);
2025                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2026                 REG_WR(bp, BNX2_RPM_CONFIG, val);
2027                 break;
2028         }
2029         case 3: {
2030                 int i;
2031                 u32 val, wol_msg;
2032
2033                 if (bp->wol) {
2034                         u32 advertising;
2035                         u8 autoneg;
2036
2037                         autoneg = bp->autoneg;
2038                         advertising = bp->advertising;
2039
2040                         bp->autoneg = AUTONEG_SPEED;
2041                         bp->advertising = ADVERTISED_10baseT_Half |
2042                                 ADVERTISED_10baseT_Full |
2043                                 ADVERTISED_100baseT_Half |
2044                                 ADVERTISED_100baseT_Full |
2045                                 ADVERTISED_Autoneg;
2046
2047                         bnx2_setup_copper_phy(bp);
2048
2049                         bp->autoneg = autoneg;
2050                         bp->advertising = advertising;
2051
2052                         bnx2_set_mac_addr(bp);
2053
2054                         val = REG_RD(bp, BNX2_EMAC_MODE);
2055
2056                         /* Enable port mode. */
2057                         val &= ~BNX2_EMAC_MODE_PORT;
2058                         val |= BNX2_EMAC_MODE_PORT_MII |
2059                                BNX2_EMAC_MODE_MPKT_RCVD |
2060                                BNX2_EMAC_MODE_ACPI_RCVD |
2061                                BNX2_EMAC_MODE_FORCE_LINK |
2062                                BNX2_EMAC_MODE_MPKT;
2063
2064                         REG_WR(bp, BNX2_EMAC_MODE, val);
2065
2066                         /* receive all multicast */
2067                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2068                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2069                                        0xffffffff);
2070                         }
2071                         REG_WR(bp, BNX2_EMAC_RX_MODE,
2072                                BNX2_EMAC_RX_MODE_SORT_MODE);
2073
2074                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2075                               BNX2_RPM_SORT_USER0_MC_EN;
2076                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2077                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2078                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2079                                BNX2_RPM_SORT_USER0_ENA);
2080
2081                         /* Need to enable EMAC and RPM for WOL. */
2082                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2083                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2084                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2085                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2086
2087                         val = REG_RD(bp, BNX2_RPM_CONFIG);
2088                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2089                         REG_WR(bp, BNX2_RPM_CONFIG, val);
2090
2091                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2092                 }
2093                 else {
2094                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2095                 }
2096
2097                 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg);
2098
2099                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2100                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2101                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2102
2103                         if (bp->wol)
2104                                 pmcsr |= 3;
2105                 }
2106                 else {
2107                         pmcsr |= 3;
2108                 }
2109                 if (bp->wol) {
2110                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2111                 }
2112                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2113                                       pmcsr);
2114
2115                 /* No more memory access after this point until
2116                  * device is brought back to D0.
2117                  */
2118                 udelay(50);
2119                 break;
2120         }
2121         default:
2122                 return -EINVAL;
2123         }
2124         return 0;
2125 }
2126
2127 static int
2128 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2129 {
2130         u32 val;
2131         int j;
2132
2133         /* Request access to the flash interface. */
2134         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2135         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2136                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2137                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2138                         break;
2139
2140                 udelay(5);
2141         }
2142
2143         if (j >= NVRAM_TIMEOUT_COUNT)
2144                 return -EBUSY;
2145
2146         return 0;
2147 }
2148
2149 static int
2150 bnx2_release_nvram_lock(struct bnx2 *bp)
2151 {
2152         int j;
2153         u32 val;
2154
2155         /* Relinquish nvram interface. */
2156         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2157
2158         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2159                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2160                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2161                         break;
2162
2163                 udelay(5);
2164         }
2165
2166         if (j >= NVRAM_TIMEOUT_COUNT)
2167                 return -EBUSY;
2168
2169         return 0;
2170 }
2171
2172
2173 static int
2174 bnx2_enable_nvram_write(struct bnx2 *bp)
2175 {
2176         u32 val;
2177
2178         val = REG_RD(bp, BNX2_MISC_CFG);
2179         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2180
2181         if (!bp->flash_info->buffered) {
2182                 int j;
2183
2184                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2185                 REG_WR(bp, BNX2_NVM_COMMAND,
2186                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2187
2188                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2189                         udelay(5);
2190
2191                         val = REG_RD(bp, BNX2_NVM_COMMAND);
2192                         if (val & BNX2_NVM_COMMAND_DONE)
2193                                 break;
2194                 }
2195
2196                 if (j >= NVRAM_TIMEOUT_COUNT)
2197                         return -EBUSY;
2198         }
2199         return 0;
2200 }
2201
2202 static void
2203 bnx2_disable_nvram_write(struct bnx2 *bp)
2204 {
2205         u32 val;
2206
2207         val = REG_RD(bp, BNX2_MISC_CFG);
2208         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2209 }
2210
2211
2212 static void
2213 bnx2_enable_nvram_access(struct bnx2 *bp)
2214 {
2215         u32 val;
2216
2217         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2218         /* Enable both bits, even on read. */
2219         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE, 
2220                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2221 }
2222
2223 static void
2224 bnx2_disable_nvram_access(struct bnx2 *bp)
2225 {
2226         u32 val;
2227
2228         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2229         /* Disable both bits, even after read. */
2230         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE, 
2231                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2232                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
2233 }
2234
2235 static int
2236 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2237 {
2238         u32 cmd;
2239         int j;
2240
2241         if (bp->flash_info->buffered)
2242                 /* Buffered flash, no erase needed */
2243                 return 0;
2244
2245         /* Build an erase command */
2246         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2247               BNX2_NVM_COMMAND_DOIT;
2248
2249         /* Need to clear DONE bit separately. */
2250         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2251
2252         /* Address of the NVRAM to read from. */
2253         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2254
2255         /* Issue an erase command. */
2256         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2257
2258         /* Wait for completion. */
2259         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2260                 u32 val;
2261
2262                 udelay(5);
2263
2264                 val = REG_RD(bp, BNX2_NVM_COMMAND);
2265                 if (val & BNX2_NVM_COMMAND_DONE)
2266                         break;
2267         }
2268
2269         if (j >= NVRAM_TIMEOUT_COUNT)
2270                 return -EBUSY;
2271
2272         return 0;
2273 }
2274
2275 static int
2276 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2277 {
2278         u32 cmd;
2279         int j;
2280
2281         /* Build the command word. */
2282         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2283
2284         /* Calculate an offset of a buffered flash. */
2285         if (bp->flash_info->buffered) {
2286                 offset = ((offset / bp->flash_info->page_size) <<
2287                            bp->flash_info->page_bits) +
2288                           (offset % bp->flash_info->page_size);
2289         }
2290
2291         /* Need to clear DONE bit separately. */
2292         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2293
2294         /* Address of the NVRAM to read from. */
2295         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2296
2297         /* Issue a read command. */
2298         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2299
2300         /* Wait for completion. */
2301         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2302                 u32 val;
2303
2304                 udelay(5);
2305
2306                 val = REG_RD(bp, BNX2_NVM_COMMAND);
2307                 if (val & BNX2_NVM_COMMAND_DONE) {
2308                         val = REG_RD(bp, BNX2_NVM_READ);
2309
2310                         val = be32_to_cpu(val);
2311                         memcpy(ret_val, &val, 4);
2312                         break;
2313                 }
2314         }
2315         if (j >= NVRAM_TIMEOUT_COUNT)
2316                 return -EBUSY;
2317
2318         return 0;
2319 }
2320
2321
2322 static int
2323 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2324 {
2325         u32 cmd, val32;
2326         int j;
2327
2328         /* Build the command word. */
2329         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2330
2331         /* Calculate an offset of a buffered flash. */
2332         if (bp->flash_info->buffered) {
2333                 offset = ((offset / bp->flash_info->page_size) <<
2334                           bp->flash_info->page_bits) +
2335                          (offset % bp->flash_info->page_size);
2336         }
2337
2338         /* Need to clear DONE bit separately. */
2339         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2340
2341         memcpy(&val32, val, 4);
2342         val32 = cpu_to_be32(val32);
2343
2344         /* Write the data. */
2345         REG_WR(bp, BNX2_NVM_WRITE, val32);
2346
2347         /* Address of the NVRAM to write to. */
2348         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2349
2350         /* Issue the write command. */
2351         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2352
2353         /* Wait for completion. */
2354         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2355                 udelay(5);
2356
2357                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2358                         break;
2359         }
2360         if (j >= NVRAM_TIMEOUT_COUNT)
2361                 return -EBUSY;
2362
2363         return 0;
2364 }
2365
2366 static int
2367 bnx2_init_nvram(struct bnx2 *bp)
2368 {
2369         u32 val;
2370         int j, entry_count, rc;
2371         struct flash_spec *flash;
2372
2373         /* Determine the selected interface. */
2374         val = REG_RD(bp, BNX2_NVM_CFG1);
2375
2376         entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2377
2378         rc = 0;
2379         if (val & 0x40000000) {
2380
2381                 /* Flash interface has been reconfigured */
2382                 for (j = 0, flash = &flash_table[0]; j < entry_count;
2383                         j++, flash++) {
2384
2385                         if (val == flash->config1) {
2386                                 bp->flash_info = flash;
2387                                 break;
2388                         }
2389                 }
2390         }
2391         else {
2392                 /* Not yet been reconfigured */
2393
2394                 for (j = 0, flash = &flash_table[0]; j < entry_count;
2395                         j++, flash++) {
2396
2397                         if ((val & FLASH_STRAP_MASK) == flash->strapping) {
2398                                 bp->flash_info = flash;
2399
2400                                 /* Request access to the flash interface. */
2401                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2402                                         return rc;
2403
2404                                 /* Enable access to flash interface */
2405                                 bnx2_enable_nvram_access(bp);
2406
2407                                 /* Reconfigure the flash interface */
2408                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2409                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2410                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2411                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2412
2413                                 /* Disable access to flash interface */
2414                                 bnx2_disable_nvram_access(bp);
2415                                 bnx2_release_nvram_lock(bp);
2416
2417                                 break;
2418                         }
2419                 }
2420         } /* if (val & 0x40000000) */
2421
2422         if (j == entry_count) {
2423                 bp->flash_info = NULL;
2424                 printk(KERN_ALERT "Unknown flash/EEPROM type.\n");
2425                 rc = -ENODEV;
2426         }
2427
2428         return rc;
2429 }
2430
2431 static int
2432 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2433                 int buf_size)
2434 {
2435         int rc = 0;
2436         u32 cmd_flags, offset32, len32, extra;
2437
2438         if (buf_size == 0)
2439                 return 0;
2440
2441         /* Request access to the flash interface. */
2442         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2443                 return rc;
2444
2445         /* Enable access to flash interface */
2446         bnx2_enable_nvram_access(bp);
2447
2448         len32 = buf_size;
2449         offset32 = offset;
2450         extra = 0;
2451
2452         cmd_flags = 0;
2453
2454         if (offset32 & 3) {
2455                 u8 buf[4];
2456                 u32 pre_len;
2457
2458                 offset32 &= ~3;
2459                 pre_len = 4 - (offset & 3);
2460
2461                 if (pre_len >= len32) {
2462                         pre_len = len32;
2463                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
2464                                     BNX2_NVM_COMMAND_LAST;
2465                 }
2466                 else {
2467                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
2468                 }
2469
2470                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2471
2472                 if (rc)
2473                         return rc;
2474
2475                 memcpy(ret_buf, buf + (offset & 3), pre_len);
2476
2477                 offset32 += 4;
2478                 ret_buf += pre_len;
2479                 len32 -= pre_len;
2480         }
2481         if (len32 & 3) {
2482                 extra = 4 - (len32 & 3);
2483                 len32 = (len32 + 4) & ~3;
2484         }
2485
2486         if (len32 == 4) {
2487                 u8 buf[4];
2488
2489                 if (cmd_flags)
2490                         cmd_flags = BNX2_NVM_COMMAND_LAST;
2491                 else
2492                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
2493                                     BNX2_NVM_COMMAND_LAST;
2494
2495                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2496
2497                 memcpy(ret_buf, buf, 4 - extra);
2498         }
2499         else if (len32 > 0) {
2500                 u8 buf[4];
2501
2502                 /* Read the first word. */
2503                 if (cmd_flags)
2504                         cmd_flags = 0;
2505                 else
2506                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
2507
2508                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
2509
2510                 /* Advance to the next dword. */
2511                 offset32 += 4;
2512                 ret_buf += 4;
2513                 len32 -= 4;
2514
2515                 while (len32 > 4 && rc == 0) {
2516                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
2517
2518                         /* Advance to the next dword. */
2519                         offset32 += 4;
2520                         ret_buf += 4;
2521                         len32 -= 4;
2522                 }
2523
2524                 if (rc)
2525                         return rc;
2526
2527                 cmd_flags = BNX2_NVM_COMMAND_LAST;
2528                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2529
2530                 memcpy(ret_buf, buf, 4 - extra);
2531         }
2532
2533         /* Disable access to flash interface */
2534         bnx2_disable_nvram_access(bp);
2535
2536         bnx2_release_nvram_lock(bp);
2537
2538         return rc;
2539 }
2540
2541 static int
2542 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
2543                 int buf_size)
2544 {
2545         u32 written, offset32, len32;
2546         u8 *buf, start[4], end[4];
2547         int rc = 0;
2548         int align_start, align_end;
2549
2550         buf = data_buf;
2551         offset32 = offset;
2552         len32 = buf_size;
2553         align_start = align_end = 0;
2554
2555         if ((align_start = (offset32 & 3))) {
2556                 offset32 &= ~3;
2557                 len32 += align_start;
2558                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
2559                         return rc;
2560         }
2561
2562         if (len32 & 3) {
2563                 if ((len32 > 4) || !align_start) {
2564                         align_end = 4 - (len32 & 3);
2565                         len32 += align_end;
2566                         if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
2567                                 end, 4))) {
2568                                 return rc;
2569                         }
2570                 }
2571         }
2572
2573         if (align_start || align_end) {
2574                 buf = kmalloc(len32, GFP_KERNEL);
2575                 if (buf == 0)
2576                         return -ENOMEM;
2577                 if (align_start) {
2578                         memcpy(buf, start, 4);
2579                 }
2580                 if (align_end) {
2581                         memcpy(buf + len32 - 4, end, 4);
2582                 }
2583                 memcpy(buf + align_start, data_buf, buf_size);
2584         }
2585
2586         written = 0;
2587         while ((written < len32) && (rc == 0)) {
2588                 u32 page_start, page_end, data_start, data_end;
2589                 u32 addr, cmd_flags;
2590                 int i;
2591                 u8 flash_buffer[264];
2592
2593                 /* Find the page_start addr */
2594                 page_start = offset32 + written;
2595                 page_start -= (page_start % bp->flash_info->page_size);
2596                 /* Find the page_end addr */
2597                 page_end = page_start + bp->flash_info->page_size;
2598                 /* Find the data_start addr */
2599                 data_start = (written == 0) ? offset32 : page_start;
2600                 /* Find the data_end addr */
2601                 data_end = (page_end > offset32 + len32) ? 
2602                         (offset32 + len32) : page_end;
2603
2604                 /* Request access to the flash interface. */
2605                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2606                         goto nvram_write_end;
2607
2608                 /* Enable access to flash interface */
2609                 bnx2_enable_nvram_access(bp);
2610
2611                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2612                 if (bp->flash_info->buffered == 0) {
2613                         int j;
2614
2615                         /* Read the whole page into the buffer
2616                          * (non-buffer flash only) */
2617                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
2618                                 if (j == (bp->flash_info->page_size - 4)) {
2619                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
2620                                 }
2621                                 rc = bnx2_nvram_read_dword(bp,
2622                                         page_start + j, 
2623                                         &flash_buffer[j], 
2624                                         cmd_flags);
2625
2626                                 if (rc)
2627                                         goto nvram_write_end;
2628
2629                                 cmd_flags = 0;
2630                         }
2631                 }
2632
2633                 /* Enable writes to flash interface (unlock write-protect) */
2634                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
2635                         goto nvram_write_end;
2636
2637                 /* Erase the page */
2638                 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
2639                         goto nvram_write_end;
2640
2641                 /* Re-enable the write again for the actual write */
2642                 bnx2_enable_nvram_write(bp);
2643
2644                 /* Loop to write back the buffer data from page_start to
2645                  * data_start */
2646                 i = 0;
2647                 if (bp->flash_info->buffered == 0) {
2648                         for (addr = page_start; addr < data_start;
2649                                 addr += 4, i += 4) {
2650                                 
2651                                 rc = bnx2_nvram_write_dword(bp, addr,
2652                                         &flash_buffer[i], cmd_flags);
2653
2654                                 if (rc != 0)
2655                                         goto nvram_write_end;
2656
2657                                 cmd_flags = 0;
2658                         }
2659                 }
2660
2661                 /* Loop to write the new data from data_start to data_end */
2662                 for (addr = data_start; addr < data_end; addr += 4, i++) {
2663                         if ((addr == page_end - 4) ||
2664                                 ((bp->flash_info->buffered) &&
2665                                  (addr == data_end - 4))) {
2666
2667                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
2668                         }
2669                         rc = bnx2_nvram_write_dword(bp, addr, buf,
2670                                 cmd_flags);
2671
2672                         if (rc != 0)
2673                                 goto nvram_write_end;
2674
2675                         cmd_flags = 0;
2676                         buf += 4;
2677                 }
2678
2679                 /* Loop to write back the buffer data from data_end
2680                  * to page_end */
2681                 if (bp->flash_info->buffered == 0) {
2682                         for (addr = data_end; addr < page_end;
2683                                 addr += 4, i += 4) {
2684                         
2685                                 if (addr == page_end-4) {
2686                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
2687                                 }
2688                                 rc = bnx2_nvram_write_dword(bp, addr,
2689                                         &flash_buffer[i], cmd_flags);
2690
2691                                 if (rc != 0)
2692                                         goto nvram_write_end;
2693
2694                                 cmd_flags = 0;
2695                         }
2696                 }
2697
2698                 /* Disable writes to flash interface (lock write-protect) */
2699                 bnx2_disable_nvram_write(bp);
2700
2701                 /* Disable access to flash interface */
2702                 bnx2_disable_nvram_access(bp);
2703                 bnx2_release_nvram_lock(bp);
2704
2705                 /* Increment written */
2706                 written += data_end - data_start;
2707         }
2708
2709 nvram_write_end:
2710         if (align_start || align_end)
2711                 kfree(buf);
2712         return rc;
2713 }
2714
2715 static int
2716 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
2717 {
2718         u32 val;
2719         int i, rc = 0;
2720
2721         /* Wait for the current PCI transaction to complete before
2722          * issuing a reset. */
2723         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
2724                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
2725                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
2726                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
2727                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
2728         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
2729         udelay(5);
2730
2731         /* Deposit a driver reset signature so the firmware knows that
2732          * this is a soft reset. */
2733         REG_WR_IND(bp, HOST_VIEW_SHMEM_BASE + BNX2_DRV_RESET_SIGNATURE,
2734                    BNX2_DRV_RESET_SIGNATURE_MAGIC);
2735
2736         bp->fw_timed_out = 0;
2737
2738         /* Wait for the firmware to tell us it is ok to issue a reset. */
2739         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code);
2740
2741         /* Do a dummy read to force the chip to complete all current transaction
2742          * before we issue a reset. */
2743         val = REG_RD(bp, BNX2_MISC_ID);
2744
2745         val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
2746               BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
2747               BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
2748
2749         /* Chip reset. */
2750         REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
2751
2752         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2753             (CHIP_ID(bp) == CHIP_ID_5706_A1))
2754                 msleep(15);
2755
2756         /* Reset takes approximate 30 usec */
2757         for (i = 0; i < 10; i++) {
2758                 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
2759                 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
2760                             BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
2761                         break;
2762                 }
2763                 udelay(10);
2764         }
2765
2766         if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
2767                    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
2768                 printk(KERN_ERR PFX "Chip reset did not complete\n");
2769                 return -EBUSY;
2770         }
2771
2772         /* Make sure byte swapping is properly configured. */
2773         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
2774         if (val != 0x01020304) {
2775                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
2776                 return -ENODEV;
2777         }
2778
2779         bp->fw_timed_out = 0;
2780
2781         /* Wait for the firmware to finish its initialization. */
2782         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code);
2783
2784         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2785                 /* Adjust the voltage regular to two steps lower.  The default
2786                  * of this register is 0x0000000e. */
2787                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
2788
2789                 /* Remove bad rbuf memory from the free pool. */
2790                 rc = bnx2_alloc_bad_rbuf(bp);
2791         }
2792
2793         return rc;
2794 }
2795
2796 static int
2797 bnx2_init_chip(struct bnx2 *bp)
2798 {
2799         u32 val;
2800
2801         /* Make sure the interrupt is not active. */
2802         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2803
2804         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
2805               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
2806 #ifdef __BIG_ENDIAN
2807               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP | 
2808 #endif
2809               BNX2_DMA_CONFIG_CNTL_WORD_SWAP | 
2810               DMA_READ_CHANS << 12 |
2811               DMA_WRITE_CHANS << 16;
2812
2813         val |= (0x2 << 20) | (1 << 11);
2814
2815         if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz = 133))
2816                 val |= (1 << 23);
2817
2818         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
2819             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
2820                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
2821
2822         REG_WR(bp, BNX2_DMA_CONFIG, val);
2823
2824         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2825                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
2826                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
2827                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
2828         }
2829
2830         if (bp->flags & PCIX_FLAG) {
2831                 u16 val16;
2832
2833                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
2834                                      &val16);
2835                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
2836                                       val16 & ~PCI_X_CMD_ERO);
2837         }
2838
2839         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2840                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
2841                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
2842                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
2843
2844         /* Initialize context mapping and zero out the quick contexts.  The
2845          * context block must have already been enabled. */
2846         bnx2_init_context(bp);
2847
2848         bnx2_init_cpus(bp);
2849         bnx2_init_nvram(bp);
2850
2851         bnx2_set_mac_addr(bp);
2852
2853         val = REG_RD(bp, BNX2_MQ_CONFIG);
2854         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
2855         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
2856         REG_WR(bp, BNX2_MQ_CONFIG, val);
2857
2858         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
2859         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
2860         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
2861
2862         val = (BCM_PAGE_BITS - 8) << 24;
2863         REG_WR(bp, BNX2_RV2P_CONFIG, val);
2864
2865         /* Configure page size. */
2866         val = REG_RD(bp, BNX2_TBDR_CONFIG);
2867         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
2868         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
2869         REG_WR(bp, BNX2_TBDR_CONFIG, val);
2870
2871         val = bp->mac_addr[0] +
2872               (bp->mac_addr[1] << 8) +
2873               (bp->mac_addr[2] << 16) +
2874               bp->mac_addr[3] +
2875               (bp->mac_addr[4] << 8) +
2876               (bp->mac_addr[5] << 16);
2877         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
2878
2879         /* Program the MTU.  Also include 4 bytes for CRC32. */
2880         val = bp->dev->mtu + ETH_HLEN + 4;
2881         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
2882                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
2883         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
2884
2885         bp->last_status_idx = 0;
2886         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
2887
2888         /* Set up how to generate a link change interrupt. */
2889         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2890
2891         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
2892                (u64) bp->status_blk_mapping & 0xffffffff);
2893         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
2894
2895         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
2896                (u64) bp->stats_blk_mapping & 0xffffffff);
2897         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
2898                (u64) bp->stats_blk_mapping >> 32);
2899
2900         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP, 
2901                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
2902
2903         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
2904                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
2905
2906         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
2907                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
2908
2909         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
2910
2911         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
2912
2913         REG_WR(bp, BNX2_HC_COM_TICKS,
2914                (bp->com_ticks_int << 16) | bp->com_ticks);
2915
2916         REG_WR(bp, BNX2_HC_CMD_TICKS,
2917                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
2918
2919         REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
2920         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
2921
2922         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
2923                 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
2924         else {
2925                 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
2926                        BNX2_HC_CONFIG_TX_TMR_MODE |
2927                        BNX2_HC_CONFIG_COLLECT_STATS);
2928         }
2929
2930         /* Clear internal stats counters. */
2931         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
2932
2933         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
2934
2935         /* Initialize the receive filter. */
2936         bnx2_set_rx_mode(bp->dev);
2937
2938         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET);
2939
2940         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
2941         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
2942
2943         udelay(20);
2944
2945         return 0;
2946 }
2947
2948
2949 static void
2950 bnx2_init_tx_ring(struct bnx2 *bp)
2951 {
2952         struct tx_bd *txbd;
2953         u32 val;
2954
2955         txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
2956                 
2957         txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
2958         txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
2959
2960         bp->tx_prod = 0;
2961         bp->tx_cons = 0;
2962         bp->tx_prod_bseq = 0;
2963         atomic_set(&bp->tx_avail_bd, bp->tx_ring_size);
2964         
2965         val = BNX2_L2CTX_TYPE_TYPE_L2;
2966         val |= BNX2_L2CTX_TYPE_SIZE_L2;
2967         CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TYPE, val);
2968
2969         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2;
2970         val |= 8 << 16;
2971         CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_CMD_TYPE, val);
2972
2973         val = (u64) bp->tx_desc_mapping >> 32;
2974         CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_HI, val);
2975
2976         val = (u64) bp->tx_desc_mapping & 0xffffffff;
2977         CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_LO, val);
2978 }
2979
2980 static void
2981 bnx2_init_rx_ring(struct bnx2 *bp)
2982 {
2983         struct rx_bd *rxbd;
2984         int i;
2985         u16 prod, ring_prod; 
2986         u32 val;
2987
2988         /* 8 for CRC and VLAN */
2989         bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
2990         /* 8 for alignment */
2991         bp->rx_buf_size = bp->rx_buf_use_size + 8;
2992
2993         ring_prod = prod = bp->rx_prod = 0;
2994         bp->rx_cons = 0;
2995         bp->rx_prod_bseq = 0;
2996                 
2997         rxbd = &bp->rx_desc_ring[0];
2998         for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
2999                 rxbd->rx_bd_len = bp->rx_buf_use_size;
3000                 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3001         }
3002
3003         rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping >> 32;
3004         rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping & 0xffffffff;
3005
3006         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3007         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3008         val |= 0x02 << 8;
3009         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3010
3011         val = (u64) bp->rx_desc_mapping >> 32;
3012         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3013
3014         val = (u64) bp->rx_desc_mapping & 0xffffffff;
3015         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3016
3017         for ( ;ring_prod < bp->rx_ring_size; ) {
3018                 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3019                         break;
3020                 }
3021                 prod = NEXT_RX_BD(prod);
3022                 ring_prod = RX_RING_IDX(prod);
3023         }
3024         bp->rx_prod = prod;
3025
3026         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3027
3028         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3029 }
3030
3031 static void
3032 bnx2_free_tx_skbs(struct bnx2 *bp)
3033 {
3034         int i;
3035
3036         if (bp->tx_buf_ring == NULL)
3037                 return;
3038
3039         for (i = 0; i < TX_DESC_CNT; ) {
3040                 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3041                 struct sk_buff *skb = tx_buf->skb;
3042                 int j, last;
3043
3044                 if (skb == NULL) {
3045                         i++;
3046                         continue;
3047                 }
3048
3049                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3050                         skb_headlen(skb), PCI_DMA_TODEVICE);
3051
3052                 tx_buf->skb = NULL;
3053
3054                 last = skb_shinfo(skb)->nr_frags;
3055                 for (j = 0; j < last; j++) {
3056                         tx_buf = &bp->tx_buf_ring[i + j + 1];
3057                         pci_unmap_page(bp->pdev,
3058                                 pci_unmap_addr(tx_buf, mapping),
3059                                 skb_shinfo(skb)->frags[j].size,
3060                                 PCI_DMA_TODEVICE);
3061                 }
3062                 dev_kfree_skb_any(skb);
3063                 i += j + 1;
3064         }
3065
3066 }
3067
3068 static void
3069 bnx2_free_rx_skbs(struct bnx2 *bp)
3070 {
3071         int i;
3072
3073         if (bp->rx_buf_ring == NULL)
3074                 return;
3075
3076         for (i = 0; i < RX_DESC_CNT; i++) {
3077                 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3078                 struct sk_buff *skb = rx_buf->skb;
3079
3080                 if (skb == 0)
3081                         continue;
3082
3083                 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3084                         bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3085
3086                 rx_buf->skb = NULL;
3087
3088                 dev_kfree_skb_any(skb);
3089         }
3090 }
3091
3092 static void
3093 bnx2_free_skbs(struct bnx2 *bp)
3094 {
3095         bnx2_free_tx_skbs(bp);
3096         bnx2_free_rx_skbs(bp);
3097 }
3098
3099 static int
3100 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3101 {
3102         int rc;
3103
3104         rc = bnx2_reset_chip(bp, reset_code);
3105         bnx2_free_skbs(bp);
3106         if (rc)
3107                 return rc;
3108
3109         bnx2_init_chip(bp);
3110         bnx2_init_tx_ring(bp);
3111         bnx2_init_rx_ring(bp);
3112         return 0;
3113 }
3114
3115 static int
3116 bnx2_init_nic(struct bnx2 *bp)
3117 {
3118         int rc;
3119
3120         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3121                 return rc;
3122
3123         bnx2_init_phy(bp);
3124         bnx2_set_link(bp);
3125         return 0;
3126 }
3127
3128 static int
3129 bnx2_test_registers(struct bnx2 *bp)
3130 {
3131         int ret;
3132         int i;
3133         static struct {
3134                 u16   offset;
3135                 u16   flags;
3136                 u32   rw_mask;
3137                 u32   ro_mask;
3138         } reg_tbl[] = {
3139                 { 0x006c, 0, 0x00000000, 0x0000003f },
3140                 { 0x0090, 0, 0xffffffff, 0x00000000 },
3141                 { 0x0094, 0, 0x00000000, 0x00000000 },
3142
3143                 { 0x0404, 0, 0x00003f00, 0x00000000 },
3144                 { 0x0418, 0, 0x00000000, 0xffffffff },
3145                 { 0x041c, 0, 0x00000000, 0xffffffff },
3146                 { 0x0420, 0, 0x00000000, 0x80ffffff },
3147                 { 0x0424, 0, 0x00000000, 0x00000000 },
3148                 { 0x0428, 0, 0x00000000, 0x00000001 },
3149                 { 0x0450, 0, 0x00000000, 0x0000ffff },
3150                 { 0x0454, 0, 0x00000000, 0xffffffff },
3151                 { 0x0458, 0, 0x00000000, 0xffffffff },
3152
3153                 { 0x0808, 0, 0x00000000, 0xffffffff },
3154                 { 0x0854, 0, 0x00000000, 0xffffffff },
3155                 { 0x0868, 0, 0x00000000, 0x77777777 },
3156                 { 0x086c, 0, 0x00000000, 0x77777777 },
3157                 { 0x0870, 0, 0x00000000, 0x77777777 },
3158                 { 0x0874, 0, 0x00000000, 0x77777777 },
3159
3160                 { 0x0c00, 0, 0x00000000, 0x00000001 },
3161                 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3162                 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
3163                 { 0x0c0c, 0, 0x00ffffff, 0x00000000 },
3164                 { 0x0c30, 0, 0x00000000, 0xffffffff },
3165                 { 0x0c34, 0, 0x00000000, 0xffffffff },
3166                 { 0x0c38, 0, 0x00000000, 0xffffffff },
3167                 { 0x0c3c, 0, 0x00000000, 0xffffffff },
3168                 { 0x0c40, 0, 0x00000000, 0xffffffff },
3169                 { 0x0c44, 0, 0x00000000, 0xffffffff },
3170                 { 0x0c48, 0, 0x00000000, 0x0007ffff },
3171                 { 0x0c4c, 0, 0x00000000, 0xffffffff },
3172                 { 0x0c50, 0, 0x00000000, 0xffffffff },
3173                 { 0x0c54, 0, 0x00000000, 0xffffffff },
3174                 { 0x0c58, 0, 0x00000000, 0xffffffff },
3175                 { 0x0c5c, 0, 0x00000000, 0xffffffff },
3176                 { 0x0c60, 0, 0x00000000, 0xffffffff },
3177                 { 0x0c64, 0, 0x00000000, 0xffffffff },
3178                 { 0x0c68, 0, 0x00000000, 0xffffffff },
3179                 { 0x0c6c, 0, 0x00000000, 0xffffffff },
3180                 { 0x0c70, 0, 0x00000000, 0xffffffff },
3181                 { 0x0c74, 0, 0x00000000, 0xffffffff },
3182                 { 0x0c78, 0, 0x00000000, 0xffffffff },
3183                 { 0x0c7c, 0, 0x00000000, 0xffffffff },
3184                 { 0x0c80, 0, 0x00000000, 0xffffffff },
3185                 { 0x0c84, 0, 0x00000000, 0xffffffff },
3186                 { 0x0c88, 0, 0x00000000, 0xffffffff },
3187                 { 0x0c8c, 0, 0x00000000, 0xffffffff },
3188                 { 0x0c90, 0, 0x00000000, 0xffffffff },
3189                 { 0x0c94, 0, 0x00000000, 0xffffffff },
3190                 { 0x0c98, 0, 0x00000000, 0xffffffff },
3191                 { 0x0c9c, 0, 0x00000000, 0xffffffff },
3192                 { 0x0ca0, 0, 0x00000000, 0xffffffff },
3193                 { 0x0ca4, 0, 0x00000000, 0xffffffff },
3194                 { 0x0ca8, 0, 0x00000000, 0x0007ffff },
3195                 { 0x0cac, 0, 0x00000000, 0xffffffff },
3196                 { 0x0cb0, 0, 0x00000000, 0xffffffff },
3197                 { 0x0cb4, 0, 0x00000000, 0xffffffff },
3198                 { 0x0cb8, 0, 0x00000000, 0xffffffff },
3199                 { 0x0cbc, 0, 0x00000000, 0xffffffff },
3200                 { 0x0cc0, 0, 0x00000000, 0xffffffff },
3201                 { 0x0cc4, 0, 0x00000000, 0xffffffff },
3202                 { 0x0cc8, 0, 0x00000000, 0xffffffff },
3203                 { 0x0ccc, 0, 0x00000000, 0xffffffff },
3204                 { 0x0cd0, 0, 0x00000000, 0xffffffff },
3205                 { 0x0cd4, 0, 0x00000000, 0xffffffff },
3206                 { 0x0cd8, 0, 0x00000000, 0xffffffff },
3207                 { 0x0cdc, 0, 0x00000000, 0xffffffff },
3208                 { 0x0ce0, 0, 0x00000000, 0xffffffff },
3209                 { 0x0ce4, 0, 0x00000000, 0xffffffff },
3210                 { 0x0ce8, 0, 0x00000000, 0xffffffff },
3211                 { 0x0cec, 0, 0x00000000, 0xffffffff },
3212                 { 0x0cf0, 0, 0x00000000, 0xffffffff },
3213                 { 0x0cf4, 0, 0x00000000, 0xffffffff },
3214                 { 0x0cf8, 0, 0x00000000, 0xffffffff },
3215                 { 0x0cfc, 0, 0x00000000, 0xffffffff },
3216                 { 0x0d00, 0, 0x00000000, 0xffffffff },
3217                 { 0x0d04, 0, 0x00000000, 0xffffffff },
3218
3219                 { 0x1000, 0, 0x00000000, 0x00000001 },
3220                 { 0x1004, 0, 0x00000000, 0x000f0001 },
3221                 { 0x1044, 0, 0x00000000, 0xffc003ff },
3222                 { 0x1080, 0, 0x00000000, 0x0001ffff },
3223                 { 0x1084, 0, 0x00000000, 0xffffffff },
3224                 { 0x1088, 0, 0x00000000, 0xffffffff },
3225                 { 0x108c, 0, 0x00000000, 0xffffffff },
3226                 { 0x1090, 0, 0x00000000, 0xffffffff },
3227                 { 0x1094, 0, 0x00000000, 0xffffffff },
3228                 { 0x1098, 0, 0x00000000, 0xffffffff },
3229                 { 0x109c, 0, 0x00000000, 0xffffffff },
3230                 { 0x10a0, 0, 0x00000000, 0xffffffff },
3231
3232                 { 0x1408, 0, 0x01c00800, 0x00000000 },
3233                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3234                 { 0x14a8, 0, 0x00000000, 0x000001ff },
3235                 { 0x14ac, 0, 0x4fffffff, 0x10000000 },
3236                 { 0x14b0, 0, 0x00000002, 0x00000001 },
3237                 { 0x14b8, 0, 0x00000000, 0x00000000 },
3238                 { 0x14c0, 0, 0x00000000, 0x00000009 },
3239                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3240                 { 0x14cc, 0, 0x00000000, 0x00000001 },
3241                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3242                 { 0x1500, 0, 0x00000000, 0xffffffff },
3243                 { 0x1504, 0, 0x00000000, 0xffffffff },
3244                 { 0x1508, 0, 0x00000000, 0xffffffff },
3245                 { 0x150c, 0, 0x00000000, 0xffffffff },
3246                 { 0x1510, 0, 0x00000000, 0xffffffff },
3247                 { 0x1514, 0, 0x00000000, 0xffffffff },
3248                 { 0x1518, 0, 0x00000000, 0xffffffff },
3249                 { 0x151c, 0, 0x00000000, 0xffffffff },
3250                 { 0x1520, 0, 0x00000000, 0xffffffff },
3251                 { 0x1524, 0, 0x00000000, 0xffffffff },
3252                 { 0x1528, 0, 0x00000000, 0xffffffff },
3253                 { 0x152c, 0, 0x00000000, 0xffffffff },
3254                 { 0x1530, 0, 0x00000000, 0xffffffff },
3255                 { 0x1534, 0, 0x00000000, 0xffffffff },
3256                 { 0x1538, 0, 0x00000000, 0xffffffff },
3257                 { 0x153c, 0, 0x00000000, 0xffffffff },
3258                 { 0x1540, 0, 0x00000000, 0xffffffff },
3259                 { 0x1544, 0, 0x00000000, 0xffffffff },
3260                 { 0x1548, 0, 0x00000000, 0xffffffff },
3261                 { 0x154c, 0, 0x00000000, 0xffffffff },
3262                 { 0x1550, 0, 0x00000000, 0xffffffff },
3263                 { 0x1554, 0, 0x00000000, 0xffffffff },
3264                 { 0x1558, 0, 0x00000000, 0xffffffff },
3265                 { 0x1600, 0, 0x00000000, 0xffffffff },
3266                 { 0x1604, 0, 0x00000000, 0xffffffff },
3267                 { 0x1608, 0, 0x00000000, 0xffffffff },
3268                 { 0x160c, 0, 0x00000000, 0xffffffff },
3269                 { 0x1610, 0, 0x00000000, 0xffffffff },
3270                 { 0x1614, 0, 0x00000000, 0xffffffff },
3271                 { 0x1618, 0, 0x00000000, 0xffffffff },
3272                 { 0x161c, 0, 0x00000000, 0xffffffff },
3273                 { 0x1620, 0, 0x00000000, 0xffffffff },
3274                 { 0x1624, 0, 0x00000000, 0xffffffff },
3275                 { 0x1628, 0, 0x00000000, 0xffffffff },
3276                 { 0x162c, 0, 0x00000000, 0xffffffff },
3277                 { 0x1630, 0, 0x00000000, 0xffffffff },
3278                 { 0x1634, 0, 0x00000000, 0xffffffff },
3279                 { 0x1638, 0, 0x00000000, 0xffffffff },
3280                 { 0x163c, 0, 0x00000000, 0xffffffff },
3281                 { 0x1640, 0, 0x00000000, 0xffffffff },
3282                 { 0x1644, 0, 0x00000000, 0xffffffff },
3283                 { 0x1648, 0, 0x00000000, 0xffffffff },
3284                 { 0x164c, 0, 0x00000000, 0xffffffff },
3285                 { 0x1650, 0, 0x00000000, 0xffffffff },
3286                 { 0x1654, 0, 0x00000000, 0xffffffff },
3287
3288                 { 0x1800, 0, 0x00000000, 0x00000001 },
3289                 { 0x1804, 0, 0x00000000, 0x00000003 },
3290                 { 0x1840, 0, 0x00000000, 0xffffffff },
3291                 { 0x1844, 0, 0x00000000, 0xffffffff },
3292                 { 0x1848, 0, 0x00000000, 0xffffffff },
3293                 { 0x184c, 0, 0x00000000, 0xffffffff },
3294                 { 0x1850, 0, 0x00000000, 0xffffffff },
3295                 { 0x1900, 0, 0x7ffbffff, 0x00000000 },
3296                 { 0x1904, 0, 0xffffffff, 0x00000000 },
3297                 { 0x190c, 0, 0xffffffff, 0x00000000 },
3298                 { 0x1914, 0, 0xffffffff, 0x00000000 },
3299                 { 0x191c, 0, 0xffffffff, 0x00000000 },
3300                 { 0x1924, 0, 0xffffffff, 0x00000000 },
3301                 { 0x192c, 0, 0xffffffff, 0x00000000 },
3302                 { 0x1934, 0, 0xffffffff, 0x00000000 },
3303                 { 0x193c, 0, 0xffffffff, 0x00000000 },
3304                 { 0x1944, 0, 0xffffffff, 0x00000000 },
3305                 { 0x194c, 0, 0xffffffff, 0x00000000 },
3306                 { 0x1954, 0, 0xffffffff, 0x00000000 },
3307                 { 0x195c, 0, 0xffffffff, 0x00000000 },
3308                 { 0x1964, 0, 0xffffffff, 0x00000000 },
3309                 { 0x196c, 0, 0xffffffff, 0x00000000 },
3310                 { 0x1974, 0, 0xffffffff, 0x00000000 },
3311                 { 0x197c, 0, 0xffffffff, 0x00000000 },
3312                 { 0x1980, 0, 0x0700ffff, 0x00000000 },
3313
3314                 { 0x1c00, 0, 0x00000000, 0x00000001 },
3315                 { 0x1c04, 0, 0x00000000, 0x00000003 },
3316                 { 0x1c08, 0, 0x0000000f, 0x00000000 },
3317                 { 0x1c40, 0, 0x00000000, 0xffffffff },
3318                 { 0x1c44, 0, 0x00000000, 0xffffffff },
3319                 { 0x1c48, 0, 0x00000000, 0xffffffff },
3320                 { 0x1c4c, 0, 0x00000000, 0xffffffff },
3321                 { 0x1c50, 0, 0x00000000, 0xffffffff },
3322                 { 0x1d00, 0, 0x7ffbffff, 0x00000000 },
3323                 { 0x1d04, 0, 0xffffffff, 0x00000000 },
3324                 { 0x1d0c, 0, 0xffffffff, 0x00000000 },
3325                 { 0x1d14, 0, 0xffffffff, 0x00000000 },
3326                 { 0x1d1c, 0, 0xffffffff, 0x00000000 },
3327                 { 0x1d24, 0, 0xffffffff, 0x00000000 },
3328                 { 0x1d2c, 0, 0xffffffff, 0x00000000 },
3329                 { 0x1d34, 0, 0xffffffff, 0x00000000 },
3330                 { 0x1d3c, 0, 0xffffffff, 0x00000000 },
3331                 { 0x1d44, 0, 0xffffffff, 0x00000000 },
3332                 { 0x1d4c, 0, 0xffffffff, 0x00000000 },
3333                 { 0x1d54, 0, 0xffffffff, 0x00000000 },
3334                 { 0x1d5c, 0, 0xffffffff, 0x00000000 },
3335                 { 0x1d64, 0, 0xffffffff, 0x00000000 },
3336                 { 0x1d6c, 0, 0xffffffff, 0x00000000 },
3337                 { 0x1d74, 0, 0xffffffff, 0x00000000 },
3338                 { 0x1d7c, 0, 0xffffffff, 0x00000000 },
3339                 { 0x1d80, 0, 0x0700ffff, 0x00000000 },
3340
3341                 { 0x2004, 0, 0x00000000, 0x0337000f },
3342                 { 0x2008, 0, 0xffffffff, 0x00000000 },
3343                 { 0x200c, 0, 0xffffffff, 0x00000000 },
3344                 { 0x2010, 0, 0xffffffff, 0x00000000 },
3345                 { 0x2014, 0, 0x801fff80, 0x00000000 },
3346                 { 0x2018, 0, 0x000003ff, 0x00000000 },
3347
3348                 { 0x2800, 0, 0x00000000, 0x00000001 },
3349                 { 0x2804, 0, 0x00000000, 0x00003f01 },
3350                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3351                 { 0x2810, 0, 0xffff0000, 0x00000000 },
3352                 { 0x2814, 0, 0xffff0000, 0x00000000 },
3353                 { 0x2818, 0, 0xffff0000, 0x00000000 },
3354                 { 0x281c, 0, 0xffff0000, 0x00000000 },
3355                 { 0x2834, 0, 0xffffffff, 0x00000000 },
3356                 { 0x2840, 0, 0x00000000, 0xffffffff },
3357                 { 0x2844, 0, 0x00000000, 0xffffffff },
3358                 { 0x2848, 0, 0xffffffff, 0x00000000 },
3359                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3360
3361                 { 0x2c00, 0, 0x00000000, 0x00000011 },
3362                 { 0x2c04, 0, 0x00000000, 0x00030007 },
3363
3364                 { 0x3000, 0, 0x00000000, 0x00000001 },
3365                 { 0x3004, 0, 0x00000000, 0x007007ff },
3366                 { 0x3008, 0, 0x00000003, 0x00000000 },
3367                 { 0x300c, 0, 0xffffffff, 0x00000000 },
3368                 { 0x3010, 0, 0xffffffff, 0x00000000 },
3369                 { 0x3014, 0, 0xffffffff, 0x00000000 },
3370                 { 0x3034, 0, 0xffffffff, 0x00000000 },
3371                 { 0x3038, 0, 0xffffffff, 0x00000000 },
3372                 { 0x3050, 0, 0x00000001, 0x00000000 },
3373
3374                 { 0x3c00, 0, 0x00000000, 0x00000001 },
3375                 { 0x3c04, 0, 0x00000000, 0x00070000 },
3376                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3377                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3378                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3379                 { 0x3c14, 0, 0x00000000, 0xffffffff },
3380                 { 0x3c18, 0, 0x00000000, 0xffffffff },
3381                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3382                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3383                 { 0x3c24, 0, 0xffffffff, 0x00000000 },
3384                 { 0x3c28, 0, 0xffffffff, 0x00000000 },
3385                 { 0x3c2c, 0, 0xffffffff, 0x00000000 },
3386                 { 0x3c30, 0, 0xffffffff, 0x00000000 },
3387                 { 0x3c34, 0, 0xffffffff, 0x00000000 },
3388                 { 0x3c38, 0, 0xffffffff, 0x00000000 },
3389                 { 0x3c3c, 0, 0xffffffff, 0x00000000 },
3390                 { 0x3c40, 0, 0xffffffff, 0x00000000 },
3391                 { 0x3c44, 0, 0xffffffff, 0x00000000 },
3392                 { 0x3c48, 0, 0xffffffff, 0x00000000 },
3393                 { 0x3c4c, 0, 0xffffffff, 0x00000000 },
3394                 { 0x3c50, 0, 0xffffffff, 0x00000000 },
3395                 { 0x3c54, 0, 0xffffffff, 0x00000000 },
3396                 { 0x3c58, 0, 0xffffffff, 0x00000000 },
3397                 { 0x3c5c, 0, 0xffffffff, 0x00000000 },
3398                 { 0x3c60, 0, 0xffffffff, 0x00000000 },
3399                 { 0x3c64, 0, 0xffffffff, 0x00000000 },
3400                 { 0x3c68, 0, 0xffffffff, 0x00000000 },
3401                 { 0x3c6c, 0, 0xffffffff, 0x00000000 },
3402                 { 0x3c70, 0, 0xffffffff, 0x00000000 },
3403                 { 0x3c74, 0, 0x0000003f, 0x00000000 },
3404                 { 0x3c78, 0, 0x00000000, 0x00000000 },
3405                 { 0x3c7c, 0, 0x00000000, 0x00000000 },
3406                 { 0x3c80, 0, 0x3fffffff, 0x00000000 },
3407                 { 0x3c84, 0, 0x0000003f, 0x00000000 },
3408                 { 0x3c88, 0, 0x00000000, 0xffffffff },
3409                 { 0x3c8c, 0, 0x00000000, 0xffffffff },
3410
3411                 { 0x4000, 0, 0x00000000, 0x00000001 },
3412                 { 0x4004, 0, 0x00000000, 0x00030000 },
3413                 { 0x4008, 0, 0x00000ff0, 0x00000000 },
3414                 { 0x400c, 0, 0xffffffff, 0x00000000 },
3415                 { 0x4088, 0, 0x00000000, 0x00070303 },
3416
3417                 { 0x4400, 0, 0x00000000, 0x00000001 },
3418                 { 0x4404, 0, 0x00000000, 0x00003f01 },
3419                 { 0x4408, 0, 0x7fff00ff, 0x00000000 },
3420                 { 0x440c, 0, 0xffffffff, 0x00000000 },
3421                 { 0x4410, 0, 0xffff,     0x0000 },
3422                 { 0x4414, 0, 0xffff,     0x0000 },
3423                 { 0x4418, 0, 0xffff,     0x0000 },
3424                 { 0x441c, 0, 0xffff,     0x0000 },
3425                 { 0x4428, 0, 0xffffffff, 0x00000000 },
3426                 { 0x442c, 0, 0xffffffff, 0x00000000 },
3427                 { 0x4430, 0, 0xffffffff, 0x00000000 },
3428                 { 0x4434, 0, 0xffffffff, 0x00000000 },
3429                 { 0x4438, 0, 0xffffffff, 0x00000000 },
3430                 { 0x443c, 0, 0xffffffff, 0x00000000 },
3431                 { 0x4440, 0, 0xffffffff, 0x00000000 },
3432                 { 0x4444, 0, 0xffffffff, 0x00000000 },
3433
3434                 { 0x4c00, 0, 0x00000000, 0x00000001 },
3435                 { 0x4c04, 0, 0x00000000, 0x0000003f },
3436                 { 0x4c08, 0, 0xffffffff, 0x00000000 },
3437                 { 0x4c0c, 0, 0x0007fc00, 0x00000000 },
3438                 { 0x4c10, 0, 0x80003fe0, 0x00000000 },
3439                 { 0x4c14, 0, 0xffffffff, 0x00000000 },
3440                 { 0x4c44, 0, 0x00000000, 0x9fff9fff },
3441                 { 0x4c48, 0, 0x00000000, 0xb3009fff },
3442                 { 0x4c4c, 0, 0x00000000, 0x77f33b30 },
3443                 { 0x4c50, 0, 0x00000000, 0xffffffff },
3444
3445                 { 0x5004, 0, 0x00000000, 0x0000007f },
3446                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3447                 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3448
3449                 { 0x5400, 0, 0x00000008, 0x00000001 },
3450                 { 0x5404, 0, 0x00000000, 0x0000003f },
3451                 { 0x5408, 0, 0x0000001f, 0x00000000 },
3452                 { 0x540c, 0, 0xffffffff, 0x00000000 },
3453                 { 0x5410, 0, 0xffffffff, 0x00000000 },
3454                 { 0x5414, 0, 0x0000ffff, 0x00000000 },
3455                 { 0x5418, 0, 0x0000ffff, 0x00000000 },
3456                 { 0x541c, 0, 0x0000ffff, 0x00000000 },
3457                 { 0x5420, 0, 0x0000ffff, 0x00000000 },
3458                 { 0x5428, 0, 0x000000ff, 0x00000000 },
3459                 { 0x542c, 0, 0xff00ffff, 0x00000000 },
3460                 { 0x5430, 0, 0x001fff80, 0x00000000 },
3461                 { 0x5438, 0, 0xffffffff, 0x00000000 },
3462                 { 0x543c, 0, 0xffffffff, 0x00000000 },
3463                 { 0x5440, 0, 0xf800f800, 0x07ff07ff },
3464
3465                 { 0x5c00, 0, 0x00000000, 0x00000001 },
3466                 { 0x5c04, 0, 0x00000000, 0x0003000f },
3467                 { 0x5c08, 0, 0x00000003, 0x00000000 },
3468                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3469                 { 0x5c10, 0, 0x00000000, 0xffffffff },
3470                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3471                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3472                 { 0x5c88, 0, 0x00000000, 0x00077373 },
3473                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3474
3475                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3476                 { 0x680c, 0, 0xffffffff, 0x00000000 },
3477                 { 0x6810, 0, 0xffffffff, 0x00000000 },
3478                 { 0x6814, 0, 0xffffffff, 0x00000000 },
3479                 { 0x6818, 0, 0xffffffff, 0x00000000 },
3480                 { 0x681c, 0, 0xffffffff, 0x00000000 },
3481                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3482                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3483                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3484                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3485                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3486                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3487                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3488                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3489                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3490                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3491                 { 0x684c, 0, 0xffffffff, 0x00000000 },
3492                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3493                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3494                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3495                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3496                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3497                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3498
3499                 { 0xffff, 0, 0x00000000, 0x00000000 },
3500         };
3501
3502         ret = 0;
3503         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3504                 u32 offset, rw_mask, ro_mask, save_val, val;
3505
3506                 offset = (u32) reg_tbl[i].offset;
3507                 rw_mask = reg_tbl[i].rw_mask;
3508                 ro_mask = reg_tbl[i].ro_mask;
3509
3510                 save_val = readl((u8 *) bp->regview + offset);
3511
3512                 writel(0, (u8 *) bp->regview + offset);
3513
3514                 val = readl((u8 *) bp->regview + offset);
3515                 if ((val & rw_mask) != 0) {
3516                         goto reg_test_err;
3517                 }
3518
3519                 if ((val & ro_mask) != (save_val & ro_mask)) {
3520                         goto reg_test_err;
3521                 }
3522
3523                 writel(0xffffffff, (u8 *) bp->regview + offset);
3524
3525                 val = readl((u8 *) bp->regview + offset);
3526                 if ((val & rw_mask) != rw_mask) {
3527                         goto reg_test_err;
3528                 }
3529
3530                 if ((val & ro_mask) != (save_val & ro_mask)) {
3531                         goto reg_test_err;
3532                 }
3533
3534                 writel(save_val, (u8 *) bp->regview + offset);
3535                 continue;
3536
3537 reg_test_err:
3538                 writel(save_val, (u8 *) bp->regview + offset);
3539                 ret = -ENODEV;
3540                 break;
3541         }
3542         return ret;
3543 }
3544
3545 static int
3546 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3547 {
3548         static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
3549                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3550         int i;
3551
3552         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3553                 u32 offset;
3554
3555                 for (offset = 0; offset < size; offset += 4) {
3556
3557                         REG_WR_IND(bp, start + offset, test_pattern[i]);
3558
3559                         if (REG_RD_IND(bp, start + offset) !=
3560                                 test_pattern[i]) {
3561                                 return -ENODEV;
3562                         }
3563                 }
3564         }
3565         return 0;
3566 }
3567
3568 static int
3569 bnx2_test_memory(struct bnx2 *bp)
3570 {
3571         int ret = 0;
3572         int i;
3573         static struct {
3574                 u32   offset;
3575                 u32   len;
3576         } mem_tbl[] = {
3577                 { 0x60000,  0x4000 },
3578                 { 0xa0000,  0x4000 },
3579                 { 0xe0000,  0x4000 },
3580                 { 0x120000, 0x4000 },
3581                 { 0x1a0000, 0x4000 },
3582                 { 0x160000, 0x4000 },
3583                 { 0xffffffff, 0    },
3584         };
3585
3586         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3587                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3588                         mem_tbl[i].len)) != 0) {
3589                         return ret;
3590                 }
3591         }
3592         
3593         return ret;
3594 }
3595
3596 static int
3597 bnx2_test_loopback(struct bnx2 *bp)
3598 {
3599         unsigned int pkt_size, num_pkts, i;
3600         struct sk_buff *skb, *rx_skb;
3601         unsigned char *packet;
3602         u16 rx_start_idx, rx_idx, send_idx;
3603         u32 send_bseq, val;
3604         dma_addr_t map;
3605         struct tx_bd *txbd;
3606         struct sw_bd *rx_buf;
3607         struct l2_fhdr *rx_hdr;
3608         int ret = -ENODEV;
3609
3610         if (!netif_running(bp->dev))
3611                 return -ENODEV;
3612
3613         bp->loopback = MAC_LOOPBACK;
3614         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_DIAG);
3615         bnx2_set_mac_loopback(bp);
3616
3617         pkt_size = 1514;
3618         skb = dev_alloc_skb(pkt_size);
3619         packet = skb_put(skb, pkt_size);
3620         memcpy(packet, bp->mac_addr, 6);
3621         memset(packet + 6, 0x0, 8);
3622         for (i = 14; i < pkt_size; i++)
3623                 packet[i] = (unsigned char) (i & 0xff);
3624
3625         map = pci_map_single(bp->pdev, skb->data, pkt_size,
3626                 PCI_DMA_TODEVICE);
3627
3628         val = REG_RD(bp, BNX2_HC_COMMAND);
3629         REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3630         REG_RD(bp, BNX2_HC_COMMAND);
3631
3632         udelay(5);
3633         rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
3634
3635         send_idx = 0;
3636         send_bseq = 0;
3637         num_pkts = 0;
3638
3639         txbd = &bp->tx_desc_ring[send_idx];
3640
3641         txbd->tx_bd_haddr_hi = (u64) map >> 32;
3642         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
3643         txbd->tx_bd_mss_nbytes = pkt_size;
3644         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
3645
3646         num_pkts++;
3647         send_idx = NEXT_TX_BD(send_idx);
3648
3649         send_bseq += pkt_size;
3650
3651         REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, send_idx);
3652         REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, send_bseq);
3653
3654
3655         udelay(100);
3656
3657         val = REG_RD(bp, BNX2_HC_COMMAND);
3658         REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3659         REG_RD(bp, BNX2_HC_COMMAND);
3660
3661         udelay(5);
3662
3663         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
3664         dev_kfree_skb_irq(skb);
3665
3666         if (bp->status_blk->status_tx_quick_consumer_index0 != send_idx) {
3667                 goto loopback_test_done;
3668         }
3669
3670         rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
3671         if (rx_idx != rx_start_idx + num_pkts) {
3672                 goto loopback_test_done;
3673         }
3674
3675         rx_buf = &bp->rx_buf_ring[rx_start_idx];
3676         rx_skb = rx_buf->skb;
3677
3678         rx_hdr = (struct l2_fhdr *) rx_skb->data;
3679         skb_reserve(rx_skb, bp->rx_offset);
3680
3681         pci_dma_sync_single_for_cpu(bp->pdev,
3682                 pci_unmap_addr(rx_buf, mapping),
3683                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
3684
3685         if (rx_hdr->l2_fhdr_errors &
3686                 (L2_FHDR_ERRORS_BAD_CRC |
3687                 L2_FHDR_ERRORS_PHY_DECODE |
3688                 L2_FHDR_ERRORS_ALIGNMENT |
3689                 L2_FHDR_ERRORS_TOO_SHORT |
3690                 L2_FHDR_ERRORS_GIANT_FRAME)) {
3691
3692                 goto loopback_test_done;
3693         }
3694
3695         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
3696                 goto loopback_test_done;
3697         }
3698
3699         for (i = 14; i < pkt_size; i++) {
3700                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
3701                         goto loopback_test_done;
3702                 }
3703         }
3704
3705         ret = 0;
3706
3707 loopback_test_done:
3708         bp->loopback = 0;
3709         return ret;
3710 }
3711
3712 #define NVRAM_SIZE 0x200
3713 #define CRC32_RESIDUAL 0xdebb20e3
3714
3715 static int
3716 bnx2_test_nvram(struct bnx2 *bp)
3717 {
3718         u32 buf[NVRAM_SIZE / 4];
3719         u8 *data = (u8 *) buf;
3720         int rc = 0;
3721         u32 magic, csum;
3722
3723         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
3724                 goto test_nvram_done;
3725
3726         magic = be32_to_cpu(buf[0]);
3727         if (magic != 0x669955aa) {
3728                 rc = -ENODEV;
3729                 goto test_nvram_done;
3730         }
3731
3732         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
3733                 goto test_nvram_done;
3734
3735         csum = ether_crc_le(0x100, data);
3736         if (csum != CRC32_RESIDUAL) {
3737                 rc = -ENODEV;
3738                 goto test_nvram_done;
3739         }
3740
3741         csum = ether_crc_le(0x100, data + 0x100);
3742         if (csum != CRC32_RESIDUAL) {
3743                 rc = -ENODEV;
3744         }
3745
3746 test_nvram_done:
3747         return rc;
3748 }
3749
3750 static int
3751 bnx2_test_link(struct bnx2 *bp)
3752 {
3753         u32 bmsr;
3754
3755         spin_lock_irq(&bp->phy_lock);
3756         bnx2_read_phy(bp, MII_BMSR, &bmsr);
3757         bnx2_read_phy(bp, MII_BMSR, &bmsr);
3758         spin_unlock_irq(&bp->phy_lock);
3759                 
3760         if (bmsr & BMSR_LSTATUS) {
3761                 return 0;
3762         }
3763         return -ENODEV;
3764 }
3765
3766 static int
3767 bnx2_test_intr(struct bnx2 *bp)
3768 {
3769         int i;
3770         u32 val;
3771         u16 status_idx;
3772
3773         if (!netif_running(bp->dev))
3774                 return -ENODEV;
3775
3776         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
3777
3778         /* This register is not touched during run-time. */
3779         val = REG_RD(bp, BNX2_HC_COMMAND);
3780         REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW);
3781         REG_RD(bp, BNX2_HC_COMMAND);
3782
3783         for (i = 0; i < 10; i++) {
3784                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
3785                         status_idx) {
3786
3787                         break;
3788                 }
3789
3790                 msleep_interruptible(10);
3791         }
3792         if (i < 10)
3793                 return 0;
3794
3795         return -ENODEV;
3796 }
3797
3798 static void
3799 bnx2_timer(unsigned long data)
3800 {
3801         struct bnx2 *bp = (struct bnx2 *) data;
3802         u32 msg;
3803
3804         if (atomic_read(&bp->intr_sem) != 0)
3805                 goto bnx2_restart_timer;
3806
3807         msg = (u32) ++bp->fw_drv_pulse_wr_seq;
3808         REG_WR_IND(bp, HOST_VIEW_SHMEM_BASE + BNX2_DRV_PULSE_MB, msg);
3809
3810         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
3811             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
3812                 unsigned long flags;
3813
3814                 spin_lock_irqsave(&bp->phy_lock, flags);
3815                 if (bp->serdes_an_pending) {
3816                         bp->serdes_an_pending--;
3817                 }
3818                 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
3819                         u32 bmcr;
3820
3821                         bnx2_read_phy(bp, MII_BMCR, &bmcr);
3822
3823                         if (bmcr & BMCR_ANENABLE) {
3824                                 u32 phy1, phy2;
3825
3826                                 bnx2_write_phy(bp, 0x1c, 0x7c00);
3827                                 bnx2_read_phy(bp, 0x1c, &phy1);
3828
3829                                 bnx2_write_phy(bp, 0x17, 0x0f01);
3830                                 bnx2_read_phy(bp, 0x15, &phy2);
3831                                 bnx2_write_phy(bp, 0x17, 0x0f01);
3832                                 bnx2_read_phy(bp, 0x15, &phy2);
3833
3834                                 if ((phy1 & 0x10) &&    /* SIGNAL DETECT */
3835                                         !(phy2 & 0x20)) {       /* no CONFIG */
3836
3837                                         bmcr &= ~BMCR_ANENABLE;
3838                                         bmcr |= BMCR_SPEED1000 |
3839                                                 BMCR_FULLDPLX;
3840                                         bnx2_write_phy(bp, MII_BMCR, bmcr);
3841                                         bp->phy_flags |=
3842                                                 PHY_PARALLEL_DETECT_FLAG;
3843                                 }
3844                         }
3845                 }
3846                 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
3847                         (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
3848                         u32 phy2;
3849
3850                         bnx2_write_phy(bp, 0x17, 0x0f01);
3851                         bnx2_read_phy(bp, 0x15, &phy2);
3852                         if (phy2 & 0x20) {
3853                                 u32 bmcr;
3854
3855                                 bnx2_read_phy(bp, MII_BMCR, &bmcr);
3856                                 bmcr |= BMCR_ANENABLE;
3857                                 bnx2_write_phy(bp, MII_BMCR, bmcr);
3858
3859                                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
3860
3861                         }
3862                 }
3863
3864                 spin_unlock_irqrestore(&bp->phy_lock, flags);
3865         }
3866
3867 bnx2_restart_timer:
3868         bp->timer.expires = RUN_AT(bp->timer_interval);
3869
3870         add_timer(&bp->timer);
3871 }
3872
3873 /* Called with rtnl_lock */
3874 static int
3875 bnx2_open(struct net_device *dev)
3876 {
3877         struct bnx2 *bp = dev->priv;
3878         int rc;
3879
3880         bnx2_set_power_state(bp, 0);
3881         bnx2_disable_int(bp);
3882
3883         rc = bnx2_alloc_mem(bp);
3884         if (rc)
3885                 return rc;
3886
3887         if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
3888                 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
3889                 !disable_msi) {
3890
3891                 if (pci_enable_msi(bp->pdev) == 0) {
3892                         bp->flags |= USING_MSI_FLAG;
3893                         rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
3894                                         dev);
3895                 }
3896                 else {
3897                         rc = request_irq(bp->pdev->irq, bnx2_interrupt,
3898                                         SA_SHIRQ, dev->name, dev);
3899                 }
3900         }
3901         else {
3902                 rc = request_irq(bp->pdev->irq, bnx2_interrupt, SA_SHIRQ,
3903                                 dev->name, dev);
3904         }
3905         if (rc) {
3906                 bnx2_free_mem(bp);
3907                 return rc;
3908         }
3909
3910         rc = bnx2_init_nic(bp);
3911
3912         if (rc) {
3913                 free_irq(bp->pdev->irq, dev);
3914                 if (bp->flags & USING_MSI_FLAG) {
3915                         pci_disable_msi(bp->pdev);
3916                         bp->flags &= ~USING_MSI_FLAG;
3917                 }
3918                 bnx2_free_skbs(bp);
3919                 bnx2_free_mem(bp);
3920                 return rc;
3921         }
3922         
3923         init_timer(&bp->timer);
3924
3925         bp->timer.expires = RUN_AT(bp->timer_interval);
3926         bp->timer.data = (unsigned long) bp;
3927         bp->timer.function = bnx2_timer;
3928         add_timer(&bp->timer);
3929
3930         atomic_set(&bp->intr_sem, 0);
3931
3932         bnx2_enable_int(bp);
3933
3934         if (bp->flags & USING_MSI_FLAG) {
3935                 /* Test MSI to make sure it is working
3936                  * If MSI test fails, go back to INTx mode
3937                  */
3938                 if (bnx2_test_intr(bp) != 0) {
3939                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
3940                                " using MSI, switching to INTx mode. Please"
3941                                " report this failure to the PCI maintainer"
3942                                " and include system chipset information.\n",
3943                                bp->dev->name);
3944
3945                         bnx2_disable_int(bp);
3946                         free_irq(bp->pdev->irq, dev);
3947                         pci_disable_msi(bp->pdev);
3948                         bp->flags &= ~USING_MSI_FLAG;
3949
3950                         rc = bnx2_init_nic(bp);
3951
3952                         if (!rc) {
3953                                 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
3954                                         SA_SHIRQ, dev->name, dev);
3955                         }
3956                         if (rc) {
3957                                 bnx2_free_skbs(bp);
3958                                 bnx2_free_mem(bp);
3959                                 del_timer_sync(&bp->timer);
3960                                 return rc;
3961                         }
3962                         bnx2_enable_int(bp);
3963                 }
3964         }
3965         if (bp->flags & USING_MSI_FLAG) {
3966                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
3967         }
3968
3969         netif_start_queue(dev);
3970
3971         return 0;
3972 }
3973
3974 static void
3975 bnx2_reset_task(void *data)
3976 {
3977         struct bnx2 *bp = data;
3978
3979         bnx2_netif_stop(bp);
3980
3981         bnx2_init_nic(bp);
3982
3983         atomic_set(&bp->intr_sem, 1);
3984         bnx2_netif_start(bp);
3985 }
3986
3987 static void
3988 bnx2_tx_timeout(struct net_device *dev)
3989 {
3990         struct bnx2 *bp = dev->priv;
3991
3992         /* This allows the netif to be shutdown gracefully before resetting */
3993         schedule_work(&bp->reset_task);
3994 }
3995
3996 #ifdef BCM_VLAN
3997 /* Called with rtnl_lock */
3998 static void
3999 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4000 {
4001         struct bnx2 *bp = dev->priv;
4002
4003         bnx2_netif_stop(bp);
4004
4005         bp->vlgrp = vlgrp;
4006         bnx2_set_rx_mode(dev);
4007
4008         bnx2_netif_start(bp);
4009 }
4010
4011 /* Called with rtnl_lock */
4012 static void
4013 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4014 {
4015         struct bnx2 *bp = dev->priv;
4016
4017         bnx2_netif_stop(bp);
4018
4019         if (bp->vlgrp)
4020                 bp->vlgrp->vlan_devices[vid] = NULL;
4021         bnx2_set_rx_mode(dev);
4022
4023         bnx2_netif_start(bp);
4024 }
4025 #endif
4026
4027 /* Called with dev->xmit_lock.
4028  * hard_start_xmit is pseudo-lockless - a lock is only required when
4029  * the tx queue is full. This way, we get the benefit of lockless
4030  * operations most of the time without the complexities to handle
4031  * netif_stop_queue/wake_queue race conditions.
4032  */
4033 static int
4034 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4035 {
4036         struct bnx2 *bp = dev->priv;
4037         dma_addr_t mapping;
4038         struct tx_bd *txbd;
4039         struct sw_bd *tx_buf;
4040         u32 len, vlan_tag_flags, last_frag, mss;
4041         u16 prod, ring_prod;
4042         int i;
4043
4044         if (unlikely(atomic_read(&bp->tx_avail_bd) <
4045                 (skb_shinfo(skb)->nr_frags + 1))) {
4046
4047                 netif_stop_queue(dev);
4048                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4049                         dev->name);
4050
4051                 return NETDEV_TX_BUSY;
4052         }
4053         len = skb_headlen(skb);
4054         prod = bp->tx_prod;
4055         ring_prod = TX_RING_IDX(prod);
4056
4057         vlan_tag_flags = 0;
4058         if (skb->ip_summed == CHECKSUM_HW) {
4059                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4060         }
4061
4062         if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4063                 vlan_tag_flags |=
4064                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4065         }
4066 #ifdef BCM_TSO 
4067         if ((mss = skb_shinfo(skb)->tso_size) &&
4068                 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4069                 u32 tcp_opt_len, ip_tcp_len;
4070
4071                 if (skb_header_cloned(skb) &&
4072                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4073                         dev_kfree_skb(skb);
4074                         return NETDEV_TX_OK;
4075                 }
4076
4077                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4078                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4079
4080                 tcp_opt_len = 0;
4081                 if (skb->h.th->doff > 5) {
4082                         tcp_opt_len = (skb->h.th->doff - 5) << 2;
4083                 }
4084                 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4085
4086                 skb->nh.iph->check = 0;
4087                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
4088                 skb->h.th->check =
4089                         ~csum_tcpudp_magic(skb->nh.iph->saddr,
4090                                             skb->nh.iph->daddr,
4091                                             0, IPPROTO_TCP, 0);
4092
4093                 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4094                         vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4095                                 (tcp_opt_len >> 2)) << 8;
4096                 }
4097         }
4098         else
4099 #endif
4100         {
4101                 mss = 0;
4102         }
4103
4104         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4105         
4106         tx_buf = &bp->tx_buf_ring[ring_prod];
4107         tx_buf->skb = skb;
4108         pci_unmap_addr_set(tx_buf, mapping, mapping);
4109
4110         txbd = &bp->tx_desc_ring[ring_prod];
4111
4112         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4113         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4114         txbd->tx_bd_mss_nbytes = len | (mss << 16);
4115         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4116
4117         last_frag = skb_shinfo(skb)->nr_frags;
4118
4119         for (i = 0; i < last_frag; i++) {
4120                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4121
4122                 prod = NEXT_TX_BD(prod);
4123                 ring_prod = TX_RING_IDX(prod);
4124                 txbd = &bp->tx_desc_ring[ring_prod];
4125
4126                 len = frag->size;
4127                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4128                         len, PCI_DMA_TODEVICE);
4129                 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4130                                 mapping, mapping);
4131
4132                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4133                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4134                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4135                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4136
4137         }
4138         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4139
4140         prod = NEXT_TX_BD(prod);
4141         bp->tx_prod_bseq += skb->len;
4142
4143         atomic_sub(last_frag + 1, &bp->tx_avail_bd);
4144
4145         REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
4146         REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4147
4148         mmiowb();
4149
4150         bp->tx_prod = prod;
4151         dev->trans_start = jiffies;
4152
4153         if (unlikely(atomic_read(&bp->tx_avail_bd) <= MAX_SKB_FRAGS)) {
4154                 unsigned long flags;
4155
4156                 spin_lock_irqsave(&bp->tx_lock, flags);
4157                 if (atomic_read(&bp->tx_avail_bd) <= MAX_SKB_FRAGS) {
4158                         netif_stop_queue(dev);
4159
4160                         if (atomic_read(&bp->tx_avail_bd) > MAX_SKB_FRAGS)
4161                                 netif_wake_queue(dev);
4162                 }
4163                 spin_unlock_irqrestore(&bp->tx_lock, flags);
4164         }
4165
4166         return NETDEV_TX_OK;
4167 }
4168
4169 /* Called with rtnl_lock */
4170 static int
4171 bnx2_close(struct net_device *dev)
4172 {
4173         struct bnx2 *bp = dev->priv;
4174         u32 reset_code;
4175
4176         flush_scheduled_work();
4177         bnx2_netif_stop(bp);
4178         del_timer_sync(&bp->timer);
4179         if (bp->wol)
4180                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4181         else
4182                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4183         bnx2_reset_chip(bp, reset_code);
4184         free_irq(bp->pdev->irq, dev);
4185         if (bp->flags & USING_MSI_FLAG) {
4186                 pci_disable_msi(bp->pdev);
4187                 bp->flags &= ~USING_MSI_FLAG;
4188         }
4189         bnx2_free_skbs(bp);
4190         bnx2_free_mem(bp);
4191         bp->link_up = 0;
4192         netif_carrier_off(bp->dev);
4193         bnx2_set_power_state(bp, 3);
4194         return 0;
4195 }
4196
4197 #define GET_NET_STATS64(ctr)                                    \
4198         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
4199         (unsigned long) (ctr##_lo)
4200
4201 #define GET_NET_STATS32(ctr)            \
4202         (ctr##_lo)
4203
4204 #if (BITS_PER_LONG == 64)
4205 #define GET_NET_STATS   GET_NET_STATS64
4206 #else
4207 #define GET_NET_STATS   GET_NET_STATS32
4208 #endif
4209
4210 static struct net_device_stats *
4211 bnx2_get_stats(struct net_device *dev)
4212 {
4213         struct bnx2 *bp = dev->priv;
4214         struct statistics_block *stats_blk = bp->stats_blk;
4215         struct net_device_stats *net_stats = &bp->net_stats;
4216
4217         if (bp->stats_blk == NULL) {
4218                 return net_stats;
4219         }
4220         net_stats->rx_packets =
4221                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4222                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4223                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4224
4225         net_stats->tx_packets =
4226                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4227                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4228                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4229
4230         net_stats->rx_bytes =
4231                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4232
4233         net_stats->tx_bytes =
4234                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4235
4236         net_stats->multicast = 
4237                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4238
4239         net_stats->collisions = 
4240                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4241
4242         net_stats->rx_length_errors = 
4243                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4244                 stats_blk->stat_EtherStatsOverrsizePkts);
4245
4246         net_stats->rx_over_errors = 
4247                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4248
4249         net_stats->rx_frame_errors = 
4250                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4251
4252         net_stats->rx_crc_errors = 
4253                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4254
4255         net_stats->rx_errors = net_stats->rx_length_errors +
4256                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4257                 net_stats->rx_crc_errors;
4258
4259         net_stats->tx_aborted_errors =
4260                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4261                 stats_blk->stat_Dot3StatsLateCollisions);
4262
4263         if (CHIP_NUM(bp) == CHIP_NUM_5706)
4264                 net_stats->tx_carrier_errors = 0;
4265         else {
4266                 net_stats->tx_carrier_errors =
4267                         (unsigned long)
4268                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
4269         }
4270
4271         net_stats->tx_errors =
4272                 (unsigned long) 
4273                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4274                 +
4275                 net_stats->tx_aborted_errors +
4276                 net_stats->tx_carrier_errors;
4277
4278         return net_stats;
4279 }
4280
4281 /* All ethtool functions called with rtnl_lock */
4282
4283 static int
4284 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4285 {
4286         struct bnx2 *bp = dev->priv;
4287
4288         cmd->supported = SUPPORTED_Autoneg;
4289         if (bp->phy_flags & PHY_SERDES_FLAG) {
4290                 cmd->supported |= SUPPORTED_1000baseT_Full |
4291                         SUPPORTED_FIBRE;
4292
4293                 cmd->port = PORT_FIBRE;
4294         }
4295         else {
4296                 cmd->supported |= SUPPORTED_10baseT_Half |
4297                         SUPPORTED_10baseT_Full |
4298                         SUPPORTED_100baseT_Half |
4299                         SUPPORTED_100baseT_Full |
4300                         SUPPORTED_1000baseT_Full |
4301                         SUPPORTED_TP;
4302
4303                 cmd->port = PORT_TP;
4304         }
4305
4306         cmd->advertising = bp->advertising;
4307
4308         if (bp->autoneg & AUTONEG_SPEED) {
4309                 cmd->autoneg = AUTONEG_ENABLE;
4310         }
4311         else {
4312                 cmd->autoneg = AUTONEG_DISABLE;
4313         }
4314
4315         if (netif_carrier_ok(dev)) {
4316                 cmd->speed = bp->line_speed;
4317                 cmd->duplex = bp->duplex;
4318         }
4319         else {
4320                 cmd->speed = -1;
4321                 cmd->duplex = -1;
4322         }
4323
4324         cmd->transceiver = XCVR_INTERNAL;
4325         cmd->phy_address = bp->phy_addr;
4326
4327         return 0;
4328 }
4329   
4330 static int
4331 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4332 {
4333         struct bnx2 *bp = dev->priv;
4334         u8 autoneg = bp->autoneg;
4335         u8 req_duplex = bp->req_duplex;
4336         u16 req_line_speed = bp->req_line_speed;
4337         u32 advertising = bp->advertising;
4338
4339         if (cmd->autoneg == AUTONEG_ENABLE) {
4340                 autoneg |= AUTONEG_SPEED;
4341
4342                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED; 
4343
4344                 /* allow advertising 1 speed */
4345                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4346                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
4347                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
4348                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
4349
4350                         if (bp->phy_flags & PHY_SERDES_FLAG)
4351                                 return -EINVAL;
4352
4353                         advertising = cmd->advertising;
4354
4355                 }
4356                 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4357                         advertising = cmd->advertising;
4358                 }
4359                 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4360                         return -EINVAL;
4361                 }
4362                 else {
4363                         if (bp->phy_flags & PHY_SERDES_FLAG) {
4364                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4365                         }
4366                         else {
4367                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
4368                         }
4369                 }
4370                 advertising |= ADVERTISED_Autoneg;
4371         }
4372         else {
4373                 if (bp->phy_flags & PHY_SERDES_FLAG) {
4374                         if ((cmd->speed != SPEED_1000) ||
4375                                 (cmd->duplex != DUPLEX_FULL)) {
4376                                 return -EINVAL;
4377                         }
4378                 }
4379                 else if (cmd->speed == SPEED_1000) {
4380                         return -EINVAL;
4381                 }
4382                 autoneg &= ~AUTONEG_SPEED;
4383                 req_line_speed = cmd->speed;
4384                 req_duplex = cmd->duplex;
4385                 advertising = 0;
4386         }
4387
4388         bp->autoneg = autoneg;
4389         bp->advertising = advertising;
4390         bp->req_line_speed = req_line_speed;
4391         bp->req_duplex = req_duplex;
4392
4393         spin_lock_irq(&bp->phy_lock);
4394
4395         bnx2_setup_phy(bp);
4396
4397         spin_unlock_irq(&bp->phy_lock);
4398
4399         return 0;
4400 }
4401
4402 static void
4403 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4404 {
4405         struct bnx2 *bp = dev->priv;
4406
4407         strcpy(info->driver, DRV_MODULE_NAME);
4408         strcpy(info->version, DRV_MODULE_VERSION);
4409         strcpy(info->bus_info, pci_name(bp->pdev));
4410         info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4411         info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4412         info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
4413         info->fw_version[6] = (bp->fw_ver & 0xff) + '0';
4414         info->fw_version[1] = info->fw_version[3] = info->fw_version[5] = '.';
4415         info->fw_version[7] = 0;
4416 }
4417
4418 static void
4419 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4420 {
4421         struct bnx2 *bp = dev->priv;
4422
4423         if (bp->flags & NO_WOL_FLAG) {
4424                 wol->supported = 0;
4425                 wol->wolopts = 0;
4426         }
4427         else {
4428                 wol->supported = WAKE_MAGIC;
4429                 if (bp->wol)
4430                         wol->wolopts = WAKE_MAGIC;
4431                 else
4432                         wol->wolopts = 0;
4433         }
4434         memset(&wol->sopass, 0, sizeof(wol->sopass));
4435 }
4436
4437 static int
4438 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4439 {
4440         struct bnx2 *bp = dev->priv;
4441
4442         if (wol->wolopts & ~WAKE_MAGIC)
4443                 return -EINVAL;
4444
4445         if (wol->wolopts & WAKE_MAGIC) {
4446                 if (bp->flags & NO_WOL_FLAG)
4447                         return -EINVAL;
4448
4449                 bp->wol = 1;
4450         }
4451         else {
4452                 bp->wol = 0;
4453         }
4454         return 0;
4455 }
4456
4457 static int
4458 bnx2_nway_reset(struct net_device *dev)
4459 {
4460         struct bnx2 *bp = dev->priv;
4461         u32 bmcr;
4462
4463         if (!(bp->autoneg & AUTONEG_SPEED)) {
4464                 return -EINVAL;
4465         }
4466
4467         spin_lock_irq(&bp->phy_lock);
4468
4469         /* Force a link down visible on the other side */
4470         if (bp->phy_flags & PHY_SERDES_FLAG) {
4471                 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
4472                 spin_unlock_irq(&bp->phy_lock);
4473
4474                 msleep(20);
4475
4476                 spin_lock_irq(&bp->phy_lock);
4477                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
4478                         bp->serdes_an_pending = SERDES_AN_TIMEOUT /
4479                                 bp->timer_interval;
4480                 }
4481         }
4482
4483         bnx2_read_phy(bp, MII_BMCR, &bmcr);
4484         bmcr &= ~BMCR_LOOPBACK;
4485         bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4486
4487         spin_unlock_irq(&bp->phy_lock);
4488
4489         return 0;
4490 }
4491
4492 static int
4493 bnx2_get_eeprom_len(struct net_device *dev)
4494 {
4495         struct bnx2 *bp = dev->priv;
4496
4497         if (bp->flash_info == 0)
4498                 return 0;
4499
4500         return (int) bp->flash_info->total_size;
4501 }
4502
4503 static int
4504 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4505                 u8 *eebuf)
4506 {
4507         struct bnx2 *bp = dev->priv;
4508         int rc;
4509
4510         if (eeprom->offset > bp->flash_info->total_size)
4511                 return -EINVAL;
4512
4513         if ((eeprom->offset + eeprom->len) > bp->flash_info->total_size)
4514                 eeprom->len = bp->flash_info->total_size - eeprom->offset;
4515
4516         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
4517
4518         return rc;
4519 }
4520
4521 static int
4522 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4523                 u8 *eebuf)
4524 {
4525         struct bnx2 *bp = dev->priv;
4526         int rc;
4527
4528         if (eeprom->offset > bp->flash_info->total_size)
4529                 return -EINVAL;
4530
4531         if ((eeprom->offset + eeprom->len) > bp->flash_info->total_size)
4532                 eeprom->len = bp->flash_info->total_size - eeprom->offset;
4533
4534         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
4535
4536         return rc;
4537 }
4538
4539 static int
4540 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4541 {
4542         struct bnx2 *bp = dev->priv;
4543
4544         memset(coal, 0, sizeof(struct ethtool_coalesce));
4545
4546         coal->rx_coalesce_usecs = bp->rx_ticks;
4547         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
4548         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
4549         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
4550
4551         coal->tx_coalesce_usecs = bp->tx_ticks;
4552         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
4553         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
4554         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
4555
4556         coal->stats_block_coalesce_usecs = bp->stats_ticks;
4557
4558         return 0;
4559 }
4560
4561 static int
4562 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4563 {
4564         struct bnx2 *bp = dev->priv;
4565
4566         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
4567         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
4568
4569         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames; 
4570         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
4571
4572         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
4573         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
4574
4575         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
4576         if (bp->rx_quick_cons_trip_int > 0xff)
4577                 bp->rx_quick_cons_trip_int = 0xff;
4578
4579         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
4580         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
4581
4582         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
4583         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
4584
4585         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
4586         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
4587
4588         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
4589         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
4590                 0xff;
4591
4592         bp->stats_ticks = coal->stats_block_coalesce_usecs;
4593         if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
4594         bp->stats_ticks &= 0xffff00;
4595
4596         if (netif_running(bp->dev)) {
4597                 bnx2_netif_stop(bp);
4598                 bnx2_init_nic(bp);
4599                 bnx2_netif_start(bp);
4600         }
4601
4602         return 0;
4603 }
4604
4605 static void
4606 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
4607 {
4608         struct bnx2 *bp = dev->priv;
4609
4610         ering->rx_max_pending = MAX_RX_DESC_CNT;
4611         ering->rx_mini_max_pending = 0;
4612         ering->rx_jumbo_max_pending = 0;
4613
4614         ering->rx_pending = bp->rx_ring_size;
4615         ering->rx_mini_pending = 0;
4616         ering->rx_jumbo_pending = 0;
4617
4618         ering->tx_max_pending = MAX_TX_DESC_CNT;
4619         ering->tx_pending = bp->tx_ring_size;
4620 }
4621
4622 static int
4623 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
4624 {
4625         struct bnx2 *bp = dev->priv;
4626
4627         if ((ering->rx_pending > MAX_RX_DESC_CNT) ||
4628                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
4629                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
4630
4631                 return -EINVAL;
4632         }
4633         bp->rx_ring_size = ering->rx_pending;
4634         bp->tx_ring_size = ering->tx_pending;
4635
4636         if (netif_running(bp->dev)) {
4637                 bnx2_netif_stop(bp);
4638                 bnx2_init_nic(bp);
4639                 bnx2_netif_start(bp);
4640         }
4641
4642         return 0;
4643 }
4644
4645 static void
4646 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
4647 {
4648         struct bnx2 *bp = dev->priv;
4649
4650         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
4651         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
4652         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
4653 }
4654
4655 static int
4656 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
4657 {
4658         struct bnx2 *bp = dev->priv;
4659
4660         bp->req_flow_ctrl = 0;
4661         if (epause->rx_pause)
4662                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
4663         if (epause->tx_pause)
4664                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
4665
4666         if (epause->autoneg) {
4667                 bp->autoneg |= AUTONEG_FLOW_CTRL;
4668         }
4669         else {
4670                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
4671         }
4672
4673         spin_lock_irq(&bp->phy_lock);
4674
4675         bnx2_setup_phy(bp);
4676
4677         spin_unlock_irq(&bp->phy_lock);
4678
4679         return 0;
4680 }
4681
4682 static u32
4683 bnx2_get_rx_csum(struct net_device *dev)
4684 {
4685         struct bnx2 *bp = dev->priv;
4686
4687         return bp->rx_csum;
4688 }
4689
4690 static int
4691 bnx2_set_rx_csum(struct net_device *dev, u32 data)
4692 {
4693         struct bnx2 *bp = dev->priv;
4694
4695         bp->rx_csum = data;
4696         return 0;
4697 }
4698
4699 #define BNX2_NUM_STATS 45
4700
4701 struct {
4702         char string[ETH_GSTRING_LEN];
4703 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
4704         { "rx_bytes" },
4705         { "rx_error_bytes" },
4706         { "tx_bytes" },
4707         { "tx_error_bytes" },
4708         { "rx_ucast_packets" },
4709         { "rx_mcast_packets" },
4710         { "rx_bcast_packets" },
4711         { "tx_ucast_packets" },
4712         { "tx_mcast_packets" },
4713         { "tx_bcast_packets" },
4714         { "tx_mac_errors" },
4715         { "tx_carrier_errors" },
4716         { "rx_crc_errors" },
4717         { "rx_align_errors" },
4718         { "tx_single_collisions" },
4719         { "tx_multi_collisions" },
4720         { "tx_deferred" },
4721         { "tx_excess_collisions" },
4722         { "tx_late_collisions" },
4723         { "tx_total_collisions" },
4724         { "rx_fragments" },
4725         { "rx_jabbers" },
4726         { "rx_undersize_packets" },
4727         { "rx_oversize_packets" },
4728         { "rx_64_byte_packets" },
4729         { "rx_65_to_127_byte_packets" },
4730         { "rx_128_to_255_byte_packets" },
4731         { "rx_256_to_511_byte_packets" },
4732         { "rx_512_to_1023_byte_packets" },
4733         { "rx_1024_to_1522_byte_packets" },
4734         { "rx_1523_to_9022_byte_packets" },
4735         { "tx_64_byte_packets" },
4736         { "tx_65_to_127_byte_packets" },
4737         { "tx_128_to_255_byte_packets" },
4738         { "tx_256_to_511_byte_packets" },
4739         { "tx_512_to_1023_byte_packets" },
4740         { "tx_1024_to_1522_byte_packets" },
4741         { "tx_1523_to_9022_byte_packets" },
4742         { "rx_xon_frames" },
4743         { "rx_xoff_frames" },
4744         { "tx_xon_frames" },
4745         { "tx_xoff_frames" },
4746         { "rx_mac_ctrl_frames" },
4747         { "rx_filtered_packets" },
4748         { "rx_discards" },
4749 };
4750
4751 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
4752
4753 unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
4754     STATS_OFFSET32(stat_IfHCInOctets_hi),
4755     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
4756     STATS_OFFSET32(stat_IfHCOutOctets_hi),
4757     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
4758     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
4759     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
4760     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
4761     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
4762     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
4763     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
4764     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
4765     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),                 
4766     STATS_OFFSET32(stat_Dot3StatsFCSErrors),                          
4767     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),                    
4768     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),              
4769     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),            
4770     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),              
4771     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),                
4772     STATS_OFFSET32(stat_Dot3StatsLateCollisions),                     
4773     STATS_OFFSET32(stat_EtherStatsCollisions),                        
4774     STATS_OFFSET32(stat_EtherStatsFragments),                         
4775     STATS_OFFSET32(stat_EtherStatsJabbers),                           
4776     STATS_OFFSET32(stat_EtherStatsUndersizePkts),                     
4777     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),                     
4778     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),                    
4779     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),         
4780     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),        
4781     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),        
4782     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),       
4783     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),      
4784     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),      
4785     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),                    
4786     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),         
4787     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),        
4788     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),        
4789     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),       
4790     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),      
4791     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),      
4792     STATS_OFFSET32(stat_XonPauseFramesReceived),                      
4793     STATS_OFFSET32(stat_XoffPauseFramesReceived),                     
4794     STATS_OFFSET32(stat_OutXonSent),                                  
4795     STATS_OFFSET32(stat_OutXoffSent),                                 
4796     STATS_OFFSET32(stat_MacControlFramesReceived),                    
4797     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),                  
4798     STATS_OFFSET32(stat_IfInMBUFDiscards),                            
4799 };
4800
4801 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
4802  * skipped because of errata.
4803  */               
4804 u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
4805         8,0,8,8,8,8,8,8,8,8,
4806         4,0,4,4,4,4,4,4,4,4,
4807         4,4,4,4,4,4,4,4,4,4,
4808         4,4,4,4,4,4,4,4,4,4,
4809         4,4,4,4,4,
4810 };
4811
4812 #define BNX2_NUM_TESTS 6
4813
4814 struct {
4815         char string[ETH_GSTRING_LEN];
4816 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
4817         { "register_test (offline)" },
4818         { "memory_test (offline)" },
4819         { "loopback_test (offline)" },
4820         { "nvram_test (online)" },
4821         { "interrupt_test (online)" },
4822         { "link_test (online)" },
4823 };
4824
4825 static int
4826 bnx2_self_test_count(struct net_device *dev)
4827 {
4828         return BNX2_NUM_TESTS;
4829 }
4830
4831 static void
4832 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
4833 {
4834         struct bnx2 *bp = dev->priv;
4835
4836         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
4837         if (etest->flags & ETH_TEST_FL_OFFLINE) {
4838                 bnx2_netif_stop(bp);
4839                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
4840                 bnx2_free_skbs(bp);
4841
4842                 if (bnx2_test_registers(bp) != 0) {
4843                         buf[0] = 1;
4844                         etest->flags |= ETH_TEST_FL_FAILED;
4845                 }
4846                 if (bnx2_test_memory(bp) != 0) {
4847                         buf[1] = 1;
4848                         etest->flags |= ETH_TEST_FL_FAILED;
4849                 }
4850                 if (bnx2_test_loopback(bp) != 0) {
4851                         buf[2] = 1;
4852                         etest->flags |= ETH_TEST_FL_FAILED;
4853                 }
4854
4855                 if (!netif_running(bp->dev)) {
4856                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
4857                 }
4858                 else {
4859                         bnx2_init_nic(bp);
4860                         bnx2_netif_start(bp);
4861                 }
4862
4863                 /* wait for link up */
4864                 msleep_interruptible(3000);
4865                 if ((!bp->link_up) && !(bp->phy_flags & PHY_SERDES_FLAG))
4866                         msleep_interruptible(4000);
4867         }
4868
4869         if (bnx2_test_nvram(bp) != 0) {
4870                 buf[3] = 1;
4871                 etest->flags |= ETH_TEST_FL_FAILED;
4872         }
4873         if (bnx2_test_intr(bp) != 0) {
4874                 buf[4] = 1;
4875                 etest->flags |= ETH_TEST_FL_FAILED;
4876         }
4877
4878         if (bnx2_test_link(bp) != 0) {
4879                 buf[5] = 1;
4880                 etest->flags |= ETH_TEST_FL_FAILED;
4881
4882         }
4883 }
4884
4885 static void
4886 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
4887 {
4888         switch (stringset) {
4889         case ETH_SS_STATS:
4890                 memcpy(buf, bnx2_stats_str_arr,
4891                         sizeof(bnx2_stats_str_arr));
4892                 break;
4893         case ETH_SS_TEST:
4894                 memcpy(buf, bnx2_tests_str_arr,
4895                         sizeof(bnx2_tests_str_arr));
4896                 break;
4897         }
4898 }
4899
4900 static int
4901 bnx2_get_stats_count(struct net_device *dev)
4902 {
4903         return BNX2_NUM_STATS;
4904 }
4905
4906 static void
4907 bnx2_get_ethtool_stats(struct net_device *dev,
4908                 struct ethtool_stats *stats, u64 *buf)
4909 {
4910         struct bnx2 *bp = dev->priv;
4911         int i;
4912         u32 *hw_stats = (u32 *) bp->stats_blk;
4913         u8 *stats_len_arr = 0;
4914
4915         if (hw_stats == NULL) {
4916                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
4917                 return;
4918         }
4919
4920         if (CHIP_NUM(bp) == CHIP_NUM_5706)
4921                 stats_len_arr = bnx2_5706_stats_len_arr;
4922
4923         for (i = 0; i < BNX2_NUM_STATS; i++) {
4924                 if (stats_len_arr[i] == 0) {
4925                         /* skip this counter */
4926                         buf[i] = 0;
4927                         continue;
4928                 }
4929                 if (stats_len_arr[i] == 4) {
4930                         /* 4-byte counter */
4931                         buf[i] = (u64)
4932                                 *(hw_stats + bnx2_stats_offset_arr[i]);
4933                         continue;
4934                 }
4935                 /* 8-byte counter */
4936                 buf[i] = (((u64) *(hw_stats +
4937                                         bnx2_stats_offset_arr[i])) << 32) +
4938                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
4939         }
4940 }
4941
4942 static int
4943 bnx2_phys_id(struct net_device *dev, u32 data)
4944 {
4945         struct bnx2 *bp = dev->priv;
4946         int i;
4947         u32 save;
4948
4949         if (data == 0)
4950                 data = 2;
4951
4952         save = REG_RD(bp, BNX2_MISC_CFG);
4953         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
4954
4955         for (i = 0; i < (data * 2); i++) {
4956                 if ((i % 2) == 0) {
4957                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
4958                 }
4959                 else {
4960                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
4961                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
4962                                 BNX2_EMAC_LED_100MB_OVERRIDE |
4963                                 BNX2_EMAC_LED_10MB_OVERRIDE |
4964                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
4965                                 BNX2_EMAC_LED_TRAFFIC);
4966                 }
4967                 msleep_interruptible(500);
4968                 if (signal_pending(current))
4969                         break;
4970         }
4971         REG_WR(bp, BNX2_EMAC_LED, 0);
4972         REG_WR(bp, BNX2_MISC_CFG, save);
4973         return 0;
4974 }
4975
4976 static struct ethtool_ops bnx2_ethtool_ops = {
4977         .get_settings           = bnx2_get_settings,
4978         .set_settings           = bnx2_set_settings,
4979         .get_drvinfo            = bnx2_get_drvinfo,
4980         .get_wol                = bnx2_get_wol,
4981         .set_wol                = bnx2_set_wol,
4982         .nway_reset             = bnx2_nway_reset,
4983         .get_link               = ethtool_op_get_link,
4984         .get_eeprom_len         = bnx2_get_eeprom_len,
4985         .get_eeprom             = bnx2_get_eeprom,
4986         .set_eeprom             = bnx2_set_eeprom,
4987         .get_coalesce           = bnx2_get_coalesce,
4988         .set_coalesce           = bnx2_set_coalesce,
4989         .get_ringparam          = bnx2_get_ringparam,
4990         .set_ringparam          = bnx2_set_ringparam,
4991         .get_pauseparam         = bnx2_get_pauseparam,
4992         .set_pauseparam         = bnx2_set_pauseparam,
4993         .get_rx_csum            = bnx2_get_rx_csum,
4994         .set_rx_csum            = bnx2_set_rx_csum,
4995         .get_tx_csum            = ethtool_op_get_tx_csum,
4996         .set_tx_csum            = ethtool_op_set_tx_csum,
4997         .get_sg                 = ethtool_op_get_sg,
4998         .set_sg                 = ethtool_op_set_sg,
4999 #ifdef BCM_TSO
5000         .get_tso                = ethtool_op_get_tso,
5001         .set_tso                = ethtool_op_set_tso,
5002 #endif
5003         .self_test_count        = bnx2_self_test_count,
5004         .self_test              = bnx2_self_test,
5005         .get_strings            = bnx2_get_strings,
5006         .phys_id                = bnx2_phys_id,
5007         .get_stats_count        = bnx2_get_stats_count,
5008         .get_ethtool_stats      = bnx2_get_ethtool_stats,
5009 };
5010
5011 /* Called with rtnl_lock */
5012 static int
5013 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5014 {
5015         struct mii_ioctl_data *data = (struct mii_ioctl_data *)&ifr->ifr_data;
5016         struct bnx2 *bp = dev->priv;
5017         int err;
5018
5019         switch(cmd) {
5020         case SIOCGMIIPHY:
5021                 data->phy_id = bp->phy_addr;
5022
5023                 /* fallthru */
5024         case SIOCGMIIREG: {
5025                 u32 mii_regval;
5026
5027                 spin_lock_irq(&bp->phy_lock);
5028                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5029                 spin_unlock_irq(&bp->phy_lock);
5030
5031                 data->val_out = mii_regval;
5032
5033                 return err;
5034         }
5035
5036         case SIOCSMIIREG:
5037                 if (!capable(CAP_NET_ADMIN))
5038                         return -EPERM;
5039
5040                 spin_lock_irq(&bp->phy_lock);
5041                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5042                 spin_unlock_irq(&bp->phy_lock);
5043
5044                 return err;
5045
5046         default:
5047                 /* do nothing */
5048                 break;
5049         }
5050         return -EOPNOTSUPP;
5051 }
5052
5053 /* Called with rtnl_lock */
5054 static int
5055 bnx2_change_mac_addr(struct net_device *dev, void *p)
5056 {
5057         struct sockaddr *addr = p;
5058         struct bnx2 *bp = dev->priv;
5059
5060         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5061         if (netif_running(dev))
5062                 bnx2_set_mac_addr(bp);
5063
5064         return 0;
5065 }
5066
5067 /* Called with rtnl_lock */
5068 static int
5069 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5070 {
5071         struct bnx2 *bp = dev->priv;
5072
5073         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5074                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5075                 return -EINVAL;
5076
5077         dev->mtu = new_mtu;
5078         if (netif_running(dev)) {
5079                 bnx2_netif_stop(bp);
5080
5081                 bnx2_init_nic(bp);
5082
5083                 bnx2_netif_start(bp);
5084         }
5085         return 0;
5086 }
5087
5088 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5089 static void
5090 poll_bnx2(struct net_device *dev)
5091 {
5092         struct bnx2 *bp = dev->priv;
5093
5094         disable_irq(bp->pdev->irq);
5095         bnx2_interrupt(bp->pdev->irq, dev, NULL);
5096         enable_irq(bp->pdev->irq);
5097 }
5098 #endif
5099
5100 static int __devinit
5101 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5102 {
5103         struct bnx2 *bp;
5104         unsigned long mem_len;
5105         int rc;
5106         u32 reg;
5107
5108         SET_MODULE_OWNER(dev);
5109         SET_NETDEV_DEV(dev, &pdev->dev);
5110         bp = dev->priv;
5111
5112         bp->flags = 0;
5113         bp->phy_flags = 0;
5114
5115         /* enable device (incl. PCI PM wakeup), and bus-mastering */
5116         rc = pci_enable_device(pdev);
5117         if (rc) {
5118                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting.");
5119                 goto err_out;
5120         }
5121
5122         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5123                 printk(KERN_ERR PFX "Cannot find PCI device base address, "
5124                        "aborting.\n");
5125                 rc = -ENODEV;
5126                 goto err_out_disable;
5127         }
5128
5129         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5130         if (rc) {
5131                 printk(KERN_ERR PFX "Cannot obtain PCI resources, aborting.\n");
5132                 goto err_out_disable;
5133         }
5134
5135         pci_set_master(pdev);
5136
5137         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5138         if (bp->pm_cap == 0) {
5139                 printk(KERN_ERR PFX "Cannot find power management capability, "
5140                                "aborting.\n");
5141                 rc = -EIO;
5142                 goto err_out_release;
5143         }
5144
5145         bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5146         if (bp->pcix_cap == 0) {
5147                 printk(KERN_ERR PFX "Cannot find PCIX capability, aborting.\n");
5148                 rc = -EIO;
5149                 goto err_out_release;
5150         }
5151
5152         if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5153                 bp->flags |= USING_DAC_FLAG;
5154                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
5155                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask "
5156                                "failed, aborting.\n");
5157                         rc = -EIO;
5158                         goto err_out_release;
5159                 }
5160         }
5161         else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
5162                 printk(KERN_ERR PFX "System does not support DMA, aborting.\n");
5163                 rc = -EIO;
5164                 goto err_out_release;
5165         }
5166
5167         bp->dev = dev;
5168         bp->pdev = pdev;
5169
5170         spin_lock_init(&bp->phy_lock);
5171         spin_lock_init(&bp->tx_lock);
5172         INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5173
5174         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5175         mem_len = MB_GET_CID_ADDR(17);
5176         dev->mem_end = dev->mem_start + mem_len;
5177         dev->irq = pdev->irq;
5178
5179         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5180
5181         if (!bp->regview) {
5182                 printk(KERN_ERR PFX "Cannot map register space, aborting.\n");
5183                 rc = -ENOMEM;
5184                 goto err_out_release;
5185         }
5186
5187         /* Configure byte swap and enable write to the reg_window registers.
5188          * Rely on CPU to do target byte swapping on big endian systems
5189          * The chip's target access swapping will not swap all accesses
5190          */
5191         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5192                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5193                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5194
5195         bnx2_set_power_state(bp, 0);
5196
5197         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5198
5199         bp->phy_addr = 1;
5200
5201         /* Get bus information. */
5202         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5203         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5204                 u32 clkreg;
5205
5206                 bp->flags |= PCIX_FLAG;
5207
5208                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5209                 
5210                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5211                 switch (clkreg) {
5212                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5213                         bp->bus_speed_mhz = 133;
5214                         break;
5215
5216                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5217                         bp->bus_speed_mhz = 100;
5218                         break;
5219
5220                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5221                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5222                         bp->bus_speed_mhz = 66;
5223                         break;
5224
5225                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5226                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5227                         bp->bus_speed_mhz = 50;
5228                         break;
5229
5230                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5231                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5232                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5233                         bp->bus_speed_mhz = 33;
5234                         break;
5235                 }
5236         }
5237         else {
5238                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5239                         bp->bus_speed_mhz = 66;
5240                 else
5241                         bp->bus_speed_mhz = 33;
5242         }
5243
5244         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5245                 bp->flags |= PCI_32BIT_FLAG;
5246
5247         /* 5706A0 may falsely detect SERR and PERR. */
5248         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5249                 reg = REG_RD(bp, PCI_COMMAND);
5250                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5251                 REG_WR(bp, PCI_COMMAND, reg);
5252         }
5253         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5254                 !(bp->flags & PCIX_FLAG)) {
5255
5256                 printk(KERN_ERR PFX "5706 A1 can only be used in a PCIX bus, "
5257                        "aborting.\n");
5258                 goto err_out_unmap;
5259         }
5260
5261         bnx2_init_nvram(bp);
5262
5263         /* Get the permanent MAC address.  First we need to make sure the
5264          * firmware is actually running.
5265          */
5266         reg = REG_RD_IND(bp, HOST_VIEW_SHMEM_BASE + BNX2_DEV_INFO_SIGNATURE);
5267
5268         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5269             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5270                 printk(KERN_ERR PFX "Firmware not running, aborting.\n");
5271                 rc = -ENODEV;
5272                 goto err_out_unmap;
5273         }
5274
5275         bp->fw_ver = REG_RD_IND(bp, HOST_VIEW_SHMEM_BASE +
5276                                 BNX2_DEV_INFO_BC_REV);
5277
5278         reg = REG_RD_IND(bp, HOST_VIEW_SHMEM_BASE + BNX2_PORT_HW_CFG_MAC_UPPER);
5279         bp->mac_addr[0] = (u8) (reg >> 8);
5280         bp->mac_addr[1] = (u8) reg;
5281
5282         reg = REG_RD_IND(bp, HOST_VIEW_SHMEM_BASE + BNX2_PORT_HW_CFG_MAC_LOWER);
5283         bp->mac_addr[2] = (u8) (reg >> 24);
5284         bp->mac_addr[3] = (u8) (reg >> 16);
5285         bp->mac_addr[4] = (u8) (reg >> 8);
5286         bp->mac_addr[5] = (u8) reg;
5287
5288         bp->tx_ring_size = MAX_TX_DESC_CNT;
5289         bp->rx_ring_size = 100;
5290
5291         bp->rx_csum = 1;
5292
5293         bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5294
5295         bp->tx_quick_cons_trip_int = 20;
5296         bp->tx_quick_cons_trip = 20;
5297         bp->tx_ticks_int = 80;
5298         bp->tx_ticks = 80;
5299                 
5300         bp->rx_quick_cons_trip_int = 6;
5301         bp->rx_quick_cons_trip = 6;
5302         bp->rx_ticks_int = 18;
5303         bp->rx_ticks = 18;
5304
5305         bp->stats_ticks = 1000000 & 0xffff00;
5306
5307         bp->timer_interval =  HZ;
5308
5309         /* Disable WOL support if we are running on a SERDES chip. */
5310         if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
5311                 bp->phy_flags |= PHY_SERDES_FLAG;
5312                 bp->flags |= NO_WOL_FLAG;
5313         }
5314
5315         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5316                 bp->tx_quick_cons_trip_int =
5317                         bp->tx_quick_cons_trip;
5318                 bp->tx_ticks_int = bp->tx_ticks;
5319                 bp->rx_quick_cons_trip_int =
5320                         bp->rx_quick_cons_trip;
5321                 bp->rx_ticks_int = bp->rx_ticks;
5322                 bp->comp_prod_trip_int = bp->comp_prod_trip;
5323                 bp->com_ticks_int = bp->com_ticks;
5324                 bp->cmd_ticks_int = bp->cmd_ticks;
5325         }
5326
5327         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5328         bp->req_line_speed = 0;
5329         if (bp->phy_flags & PHY_SERDES_FLAG) {
5330                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
5331         }
5332         else {
5333                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5334         }
5335
5336         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5337
5338         return 0;
5339
5340 err_out_unmap:
5341         if (bp->regview) {
5342                 iounmap(bp->regview);
5343         }
5344
5345 err_out_release:
5346         pci_release_regions(pdev);
5347
5348 err_out_disable:
5349         pci_disable_device(pdev);
5350         pci_set_drvdata(pdev, NULL);
5351
5352 err_out:
5353         return rc;
5354 }
5355
5356 static int __devinit
5357 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5358 {
5359         static int version_printed = 0;
5360         struct net_device *dev = NULL;
5361         struct bnx2 *bp;
5362         int rc, i;
5363
5364         if (version_printed++ == 0)
5365                 printk(KERN_INFO "%s", version);
5366
5367         /* dev zeroed in init_etherdev */
5368         dev = alloc_etherdev(sizeof(*bp));
5369
5370         if (!dev)
5371                 return -ENOMEM;
5372
5373         rc = bnx2_init_board(pdev, dev);
5374         if (rc < 0) {
5375                 free_netdev(dev);
5376                 return rc;
5377         }
5378
5379         dev->open = bnx2_open;
5380         dev->hard_start_xmit = bnx2_start_xmit;
5381         dev->stop = bnx2_close;
5382         dev->get_stats = bnx2_get_stats;
5383         dev->set_multicast_list = bnx2_set_rx_mode;
5384         dev->do_ioctl = bnx2_ioctl;
5385         dev->set_mac_address = bnx2_change_mac_addr;
5386         dev->change_mtu = bnx2_change_mtu;
5387         dev->tx_timeout = bnx2_tx_timeout;
5388         dev->watchdog_timeo = TX_TIMEOUT;
5389 #ifdef BCM_VLAN
5390         dev->vlan_rx_register = bnx2_vlan_rx_register;
5391         dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
5392 #endif
5393         dev->poll = bnx2_poll;
5394         dev->ethtool_ops = &bnx2_ethtool_ops;
5395         dev->weight = 64;
5396
5397         bp = dev->priv;
5398
5399 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5400         dev->poll_controller = poll_bnx2;
5401 #endif
5402
5403         if ((rc = register_netdev(dev))) {
5404                 printk(KERN_ERR PFX "Cannot register net device\n");
5405                 if (bp->regview)
5406                         iounmap(bp->regview);
5407                 pci_release_regions(pdev);
5408                 pci_disable_device(pdev);
5409                 pci_set_drvdata(pdev, NULL);
5410                 free_netdev(dev);
5411                 return rc;
5412         }
5413
5414         pci_set_drvdata(pdev, dev);
5415
5416         memcpy(dev->dev_addr, bp->mac_addr, 6);
5417         bp->name = board_info[ent->driver_data].name,
5418         printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
5419                 "IRQ %d, ",
5420                 dev->name,
5421                 bp->name,
5422                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
5423                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
5424                 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
5425                 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
5426                 bp->bus_speed_mhz,
5427                 dev->base_addr,
5428                 bp->pdev->irq);
5429
5430         printk("node addr ");
5431         for (i = 0; i < 6; i++)
5432                 printk("%2.2x", dev->dev_addr[i]);
5433         printk("\n");
5434
5435         dev->features |= NETIF_F_SG;
5436         if (bp->flags & USING_DAC_FLAG)
5437                 dev->features |= NETIF_F_HIGHDMA;
5438         dev->features |= NETIF_F_IP_CSUM;
5439 #ifdef BCM_VLAN
5440         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5441 #endif
5442 #ifdef BCM_TSO
5443         dev->features |= NETIF_F_TSO;
5444 #endif
5445
5446         netif_carrier_off(bp->dev);
5447
5448         return 0;
5449 }
5450
5451 static void __devexit
5452 bnx2_remove_one(struct pci_dev *pdev)
5453 {
5454         struct net_device *dev = pci_get_drvdata(pdev);
5455         struct bnx2 *bp = dev->priv;
5456
5457         unregister_netdev(dev);
5458
5459         if (bp->regview)
5460                 iounmap(bp->regview);
5461
5462         free_netdev(dev);
5463         pci_release_regions(pdev);
5464         pci_disable_device(pdev);
5465         pci_set_drvdata(pdev, NULL);
5466 }
5467
5468 static int
5469 bnx2_suspend(struct pci_dev *pdev, u32 state)
5470 {
5471         struct net_device *dev = pci_get_drvdata(pdev);
5472         struct bnx2 *bp = dev->priv;
5473         u32 reset_code;
5474
5475         if (!netif_running(dev))
5476                 return 0;
5477
5478         bnx2_netif_stop(bp);
5479         netif_device_detach(dev);
5480         del_timer_sync(&bp->timer);
5481         if (bp->wol)
5482                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5483         else
5484                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5485         bnx2_reset_chip(bp, reset_code);
5486         bnx2_free_skbs(bp);
5487         bnx2_set_power_state(bp, state);
5488         return 0;
5489 }
5490
5491 static int
5492 bnx2_resume(struct pci_dev *pdev)
5493 {
5494         struct net_device *dev = pci_get_drvdata(pdev);
5495         struct bnx2 *bp = dev->priv;
5496
5497         if (!netif_running(dev))
5498                 return 0;
5499
5500         bnx2_set_power_state(bp, 0);
5501         netif_device_attach(dev);
5502         bnx2_init_nic(bp);
5503         bnx2_netif_start(bp);
5504         return 0;
5505 }
5506
5507 static struct pci_driver bnx2_pci_driver = {
5508         name:           DRV_MODULE_NAME,
5509         id_table:       bnx2_pci_tbl,
5510         probe:          bnx2_init_one,
5511         remove:         __devexit_p(bnx2_remove_one),
5512         suspend:        bnx2_suspend,
5513         resume:         bnx2_resume,
5514 };
5515
5516 static int __init bnx2_init(void)
5517 {
5518         return pci_module_init(&bnx2_pci_driver);
5519 }
5520
5521 static void __exit bnx2_cleanup(void)
5522 {
5523         pci_unregister_driver(&bnx2_pci_driver);
5524 }
5525
5526 module_init(bnx2_init);
5527 module_exit(bnx2_cleanup);
5528
5529
5530