V4L/DVB (9589): Properly support capture start on em2874
[linux-2.6] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2008 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #ifdef NETIF_F_HW_VLAN_TX
42         #include <linux/if_vlan.h>
43 #endif
44 #include <net/ip.h>
45 #include <net/tcp.h>
46 #include <net/checksum.h>
47 #include <net/ip6_checksum.h>
48 #include <linux/workqueue.h>
49 #include <linux/crc32.h>
50 #include <linux/crc32c.h>
51 #include <linux/prefetch.h>
52 #include <linux/zlib.h>
53 #include <linux/io.h>
54
55 #include "bnx2x_reg.h"
56 #include "bnx2x_fw_defs.h"
57 #include "bnx2x_hsi.h"
58 #include "bnx2x_link.h"
59 #include "bnx2x.h"
60 #include "bnx2x_init.h"
61
62 #define DRV_MODULE_VERSION      "1.45.23"
63 #define DRV_MODULE_RELDATE      "2008/11/03"
64 #define BNX2X_BC_VER            0x040200
65
66 /* Time in jiffies before concluding the transmitter is hung */
67 #define TX_TIMEOUT              (5*HZ)
68
69 static char version[] __devinitdata =
70         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
71         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
72
73 MODULE_AUTHOR("Eliezer Tamir");
74 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
75 MODULE_LICENSE("GPL");
76 MODULE_VERSION(DRV_MODULE_VERSION);
77
78 static int disable_tpa;
79 static int use_inta;
80 static int poll;
81 static int debug;
82 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
83 static int use_multi;
84
85 module_param(disable_tpa, int, 0);
86 module_param(use_inta, int, 0);
87 module_param(poll, int, 0);
88 module_param(debug, int, 0);
89 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
90 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
91 MODULE_PARM_DESC(poll, "use polling (for debug)");
92 MODULE_PARM_DESC(debug, "default debug msglevel");
93
94 #ifdef BNX2X_MULTI
95 module_param(use_multi, int, 0);
96 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
97 #endif
98
99 enum bnx2x_board_type {
100         BCM57710 = 0,
101         BCM57711 = 1,
102         BCM57711E = 2,
103 };
104
105 /* indexed by board_type, above */
106 static struct {
107         char *name;
108 } board_info[] __devinitdata = {
109         { "Broadcom NetXtreme II BCM57710 XGb" },
110         { "Broadcom NetXtreme II BCM57711 XGb" },
111         { "Broadcom NetXtreme II BCM57711E XGb" }
112 };
113
114
115 static const struct pci_device_id bnx2x_pci_tbl[] = {
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
117                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
119                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
121                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
122         { 0 }
123 };
124
125 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
126
127 /****************************************************************************
128 * General service functions
129 ****************************************************************************/
130
131 /* used only at init
132  * locking is done by mcp
133  */
134 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
135 {
136         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
137         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
138         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
139                                PCICFG_VENDOR_ID_OFFSET);
140 }
141
142 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
143 {
144         u32 val;
145
146         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
147         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
148         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
149                                PCICFG_VENDOR_ID_OFFSET);
150
151         return val;
152 }
153
154 static const u32 dmae_reg_go_c[] = {
155         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
156         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
157         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
158         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
159 };
160
161 /* copy command into DMAE command memory and set DMAE command go */
162 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
163                             int idx)
164 {
165         u32 cmd_offset;
166         int i;
167
168         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
169         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
170                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
171
172                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
173                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
174         }
175         REG_WR(bp, dmae_reg_go_c[idx], 1);
176 }
177
178 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
179                       u32 len32)
180 {
181         struct dmae_command *dmae = &bp->init_dmae;
182         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
183         int cnt = 200;
184
185         if (!bp->dmae_ready) {
186                 u32 *data = bnx2x_sp(bp, wb_data[0]);
187
188                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
189                    "  using indirect\n", dst_addr, len32);
190                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
191                 return;
192         }
193
194         mutex_lock(&bp->dmae_mutex);
195
196         memset(dmae, 0, sizeof(struct dmae_command));
197
198         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
199                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
200                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
201 #ifdef __BIG_ENDIAN
202                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
203 #else
204                         DMAE_CMD_ENDIANITY_DW_SWAP |
205 #endif
206                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
207                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
208         dmae->src_addr_lo = U64_LO(dma_addr);
209         dmae->src_addr_hi = U64_HI(dma_addr);
210         dmae->dst_addr_lo = dst_addr >> 2;
211         dmae->dst_addr_hi = 0;
212         dmae->len = len32;
213         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
214         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
215         dmae->comp_val = DMAE_COMP_VAL;
216
217         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
218            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
219                     "dst_addr [%x:%08x (%08x)]\n"
220            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
221            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
222            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
223            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
224         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
225            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
226            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
227
228         *wb_comp = 0;
229
230         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
231
232         udelay(5);
233
234         while (*wb_comp != DMAE_COMP_VAL) {
235                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
236
237                 if (!cnt) {
238                         BNX2X_ERR("dmae timeout!\n");
239                         break;
240                 }
241                 cnt--;
242                 /* adjust delay for emulation/FPGA */
243                 if (CHIP_REV_IS_SLOW(bp))
244                         msleep(100);
245                 else
246                         udelay(5);
247         }
248
249         mutex_unlock(&bp->dmae_mutex);
250 }
251
252 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
253 {
254         struct dmae_command *dmae = &bp->init_dmae;
255         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
256         int cnt = 200;
257
258         if (!bp->dmae_ready) {
259                 u32 *data = bnx2x_sp(bp, wb_data[0]);
260                 int i;
261
262                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
263                    "  using indirect\n", src_addr, len32);
264                 for (i = 0; i < len32; i++)
265                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
266                 return;
267         }
268
269         mutex_lock(&bp->dmae_mutex);
270
271         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
272         memset(dmae, 0, sizeof(struct dmae_command));
273
274         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
275                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
276                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
277 #ifdef __BIG_ENDIAN
278                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
279 #else
280                         DMAE_CMD_ENDIANITY_DW_SWAP |
281 #endif
282                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
283                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
284         dmae->src_addr_lo = src_addr >> 2;
285         dmae->src_addr_hi = 0;
286         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
287         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
288         dmae->len = len32;
289         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
290         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
291         dmae->comp_val = DMAE_COMP_VAL;
292
293         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
294            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
295                     "dst_addr [%x:%08x (%08x)]\n"
296            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
297            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
298            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
299            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
300
301         *wb_comp = 0;
302
303         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
304
305         udelay(5);
306
307         while (*wb_comp != DMAE_COMP_VAL) {
308
309                 if (!cnt) {
310                         BNX2X_ERR("dmae timeout!\n");
311                         break;
312                 }
313                 cnt--;
314                 /* adjust delay for emulation/FPGA */
315                 if (CHIP_REV_IS_SLOW(bp))
316                         msleep(100);
317                 else
318                         udelay(5);
319         }
320         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
321            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
322            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
323
324         mutex_unlock(&bp->dmae_mutex);
325 }
326
327 /* used only for slowpath so not inlined */
328 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
329 {
330         u32 wb_write[2];
331
332         wb_write[0] = val_hi;
333         wb_write[1] = val_lo;
334         REG_WR_DMAE(bp, reg, wb_write, 2);
335 }
336
337 #ifdef USE_WB_RD
338 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
339 {
340         u32 wb_data[2];
341
342         REG_RD_DMAE(bp, reg, wb_data, 2);
343
344         return HILO_U64(wb_data[0], wb_data[1]);
345 }
346 #endif
347
348 static int bnx2x_mc_assert(struct bnx2x *bp)
349 {
350         char last_idx;
351         int i, rc = 0;
352         u32 row0, row1, row2, row3;
353
354         /* XSTORM */
355         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
356                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
357         if (last_idx)
358                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
359
360         /* print the asserts */
361         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
362
363                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
364                               XSTORM_ASSERT_LIST_OFFSET(i));
365                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
366                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
367                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
368                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
369                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
370                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
371
372                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
373                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
374                                   " 0x%08x 0x%08x 0x%08x\n",
375                                   i, row3, row2, row1, row0);
376                         rc++;
377                 } else {
378                         break;
379                 }
380         }
381
382         /* TSTORM */
383         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
384                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
385         if (last_idx)
386                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
387
388         /* print the asserts */
389         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
390
391                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
392                               TSTORM_ASSERT_LIST_OFFSET(i));
393                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
394                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
395                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
396                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
397                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
398                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
399
400                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
401                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
402                                   " 0x%08x 0x%08x 0x%08x\n",
403                                   i, row3, row2, row1, row0);
404                         rc++;
405                 } else {
406                         break;
407                 }
408         }
409
410         /* CSTORM */
411         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
412                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
413         if (last_idx)
414                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
415
416         /* print the asserts */
417         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
418
419                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
420                               CSTORM_ASSERT_LIST_OFFSET(i));
421                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
422                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
423                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
424                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
425                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
426                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
427
428                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
429                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
430                                   " 0x%08x 0x%08x 0x%08x\n",
431                                   i, row3, row2, row1, row0);
432                         rc++;
433                 } else {
434                         break;
435                 }
436         }
437
438         /* USTORM */
439         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
440                            USTORM_ASSERT_LIST_INDEX_OFFSET);
441         if (last_idx)
442                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
443
444         /* print the asserts */
445         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
446
447                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
448                               USTORM_ASSERT_LIST_OFFSET(i));
449                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
450                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
451                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
452                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
453                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
454                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
455
456                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
457                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
458                                   " 0x%08x 0x%08x 0x%08x\n",
459                                   i, row3, row2, row1, row0);
460                         rc++;
461                 } else {
462                         break;
463                 }
464         }
465
466         return rc;
467 }
468
469 static void bnx2x_fw_dump(struct bnx2x *bp)
470 {
471         u32 mark, offset;
472         u32 data[9];
473         int word;
474
475         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
476         mark = ((mark + 0x3) & ~0x3);
477         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
478
479         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
480                 for (word = 0; word < 8; word++)
481                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
482                                                   offset + 4*word));
483                 data[8] = 0x0;
484                 printk(KERN_CONT "%s", (char *)data);
485         }
486         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
487                 for (word = 0; word < 8; word++)
488                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
489                                                   offset + 4*word));
490                 data[8] = 0x0;
491                 printk(KERN_CONT "%s", (char *)data);
492         }
493         printk("\n" KERN_ERR PFX "end of fw dump\n");
494 }
495
496 static void bnx2x_panic_dump(struct bnx2x *bp)
497 {
498         int i;
499         u16 j, start, end;
500
501         bp->stats_state = STATS_STATE_DISABLED;
502         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
503
504         BNX2X_ERR("begin crash dump -----------------\n");
505
506         for_each_queue(bp, i) {
507                 struct bnx2x_fastpath *fp = &bp->fp[i];
508                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
509
510                 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
511                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
512                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
513                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
514                 BNX2X_ERR("          rx_bd_prod(%x)  rx_bd_cons(%x)"
515                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
516                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
517                           fp->rx_bd_prod, fp->rx_bd_cons,
518                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
519                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
520                 BNX2X_ERR("          rx_sge_prod(%x)  last_max_sge(%x)"
521                           "  fp_c_idx(%x)  *sb_c_idx(%x)  fp_u_idx(%x)"
522                           "  *sb_u_idx(%x)  bd data(%x,%x)\n",
523                           fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
524                           fp->status_blk->c_status_block.status_block_index,
525                           fp->fp_u_idx,
526                           fp->status_blk->u_status_block.status_block_index,
527                           hw_prods->packets_prod, hw_prods->bds_prod);
528
529                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
530                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
531                 for (j = start; j < end; j++) {
532                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
533
534                         BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
535                                   sw_bd->skb, sw_bd->first_bd);
536                 }
537
538                 start = TX_BD(fp->tx_bd_cons - 10);
539                 end = TX_BD(fp->tx_bd_cons + 254);
540                 for (j = start; j < end; j++) {
541                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
542
543                         BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
544                                   j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
545                 }
546
547                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
548                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
549                 for (j = start; j < end; j++) {
550                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
551                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
552
553                         BNX2X_ERR("rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
554                                   j, rx_bd[1], rx_bd[0], sw_bd->skb);
555                 }
556
557                 start = RX_SGE(fp->rx_sge_prod);
558                 end = RX_SGE(fp->last_max_sge);
559                 for (j = start; j < end; j++) {
560                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
561                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
562
563                         BNX2X_ERR("rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
564                                   j, rx_sge[1], rx_sge[0], sw_page->page);
565                 }
566
567                 start = RCQ_BD(fp->rx_comp_cons - 10);
568                 end = RCQ_BD(fp->rx_comp_cons + 503);
569                 for (j = start; j < end; j++) {
570                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
571
572                         BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
573                                   j, cqe[0], cqe[1], cqe[2], cqe[3]);
574                 }
575         }
576
577         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
578                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
579                   "  spq_prod_idx(%u)\n",
580                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
581                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
582
583         bnx2x_fw_dump(bp);
584         bnx2x_mc_assert(bp);
585         BNX2X_ERR("end crash dump -----------------\n");
586 }
587
588 static void bnx2x_int_enable(struct bnx2x *bp)
589 {
590         int port = BP_PORT(bp);
591         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
592         u32 val = REG_RD(bp, addr);
593         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
594
595         if (msix) {
596                 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
597                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
598                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
599         } else {
600                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
601                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
602                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
603                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
604
605                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
606                    val, port, addr, msix);
607
608                 REG_WR(bp, addr, val);
609
610                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
611         }
612
613         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
614            val, port, addr, msix);
615
616         REG_WR(bp, addr, val);
617
618         if (CHIP_IS_E1H(bp)) {
619                 /* init leading/trailing edge */
620                 if (IS_E1HMF(bp)) {
621                         val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
622                         if (bp->port.pmf)
623                                 /* enable nig attention */
624                                 val |= 0x0100;
625                 } else
626                         val = 0xffff;
627
628                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
629                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
630         }
631 }
632
633 static void bnx2x_int_disable(struct bnx2x *bp)
634 {
635         int port = BP_PORT(bp);
636         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
637         u32 val = REG_RD(bp, addr);
638
639         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
640                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
641                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
642                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
643
644         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
645            val, port, addr);
646
647         REG_WR(bp, addr, val);
648         if (REG_RD(bp, addr) != val)
649                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
650 }
651
652 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
653 {
654         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
655         int i;
656
657         /* disable interrupt handling */
658         atomic_inc(&bp->intr_sem);
659         if (disable_hw)
660                 /* prevent the HW from sending interrupts */
661                 bnx2x_int_disable(bp);
662
663         /* make sure all ISRs are done */
664         if (msix) {
665                 for_each_queue(bp, i)
666                         synchronize_irq(bp->msix_table[i].vector);
667
668                 /* one more for the Slow Path IRQ */
669                 synchronize_irq(bp->msix_table[i].vector);
670         } else
671                 synchronize_irq(bp->pdev->irq);
672
673         /* make sure sp_task is not running */
674         cancel_work_sync(&bp->sp_task);
675 }
676
677 /* fast path */
678
679 /*
680  * General service functions
681  */
682
683 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
684                                 u8 storm, u16 index, u8 op, u8 update)
685 {
686         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687                        COMMAND_REG_INT_ACK);
688         struct igu_ack_register igu_ack;
689
690         igu_ack.status_block_index = index;
691         igu_ack.sb_id_and_flags =
692                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
693                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
694                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
696
697         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698            (*(u32 *)&igu_ack), hc_addr);
699         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
700 }
701
702 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
703 {
704         struct host_status_block *fpsb = fp->status_blk;
705         u16 rc = 0;
706
707         barrier(); /* status block is written to by the chip */
708         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
709                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
710                 rc |= 1;
711         }
712         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
713                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
714                 rc |= 2;
715         }
716         return rc;
717 }
718
719 static u16 bnx2x_ack_int(struct bnx2x *bp)
720 {
721         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722                        COMMAND_REG_SIMD_MASK);
723         u32 result = REG_RD(bp, hc_addr);
724
725         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
726            result, hc_addr);
727
728         return result;
729 }
730
731
732 /*
733  * fast path service functions
734  */
735
736 /* free skb in the packet ring at pos idx
737  * return idx of last bd freed
738  */
739 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
740                              u16 idx)
741 {
742         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
743         struct eth_tx_bd *tx_bd;
744         struct sk_buff *skb = tx_buf->skb;
745         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
746         int nbd;
747
748         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
749            idx, tx_buf, skb);
750
751         /* unmap first bd */
752         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
753         tx_bd = &fp->tx_desc_ring[bd_idx];
754         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
755                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
756
757         nbd = le16_to_cpu(tx_bd->nbd) - 1;
758         new_cons = nbd + tx_buf->first_bd;
759 #ifdef BNX2X_STOP_ON_ERROR
760         if (nbd > (MAX_SKB_FRAGS + 2)) {
761                 BNX2X_ERR("BAD nbd!\n");
762                 bnx2x_panic();
763         }
764 #endif
765
766         /* Skip a parse bd and the TSO split header bd
767            since they have no mapping */
768         if (nbd)
769                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
770
771         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
772                                            ETH_TX_BD_FLAGS_TCP_CSUM |
773                                            ETH_TX_BD_FLAGS_SW_LSO)) {
774                 if (--nbd)
775                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
776                 tx_bd = &fp->tx_desc_ring[bd_idx];
777                 /* is this a TSO split header bd? */
778                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
779                         if (--nbd)
780                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
781                 }
782         }
783
784         /* now free frags */
785         while (nbd > 0) {
786
787                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
788                 tx_bd = &fp->tx_desc_ring[bd_idx];
789                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
790                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
791                 if (--nbd)
792                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
793         }
794
795         /* release skb */
796         WARN_ON(!skb);
797         dev_kfree_skb(skb);
798         tx_buf->first_bd = 0;
799         tx_buf->skb = NULL;
800
801         return new_cons;
802 }
803
804 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
805 {
806         s16 used;
807         u16 prod;
808         u16 cons;
809
810         barrier(); /* Tell compiler that prod and cons can change */
811         prod = fp->tx_bd_prod;
812         cons = fp->tx_bd_cons;
813
814         /* NUM_TX_RINGS = number of "next-page" entries
815            It will be used as a threshold */
816         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
817
818 #ifdef BNX2X_STOP_ON_ERROR
819         WARN_ON(used < 0);
820         WARN_ON(used > fp->bp->tx_ring_size);
821         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
822 #endif
823
824         return (s16)(fp->bp->tx_ring_size) - used;
825 }
826
827 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
828 {
829         struct bnx2x *bp = fp->bp;
830         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
831         int done = 0;
832
833 #ifdef BNX2X_STOP_ON_ERROR
834         if (unlikely(bp->panic))
835                 return;
836 #endif
837
838         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
839         sw_cons = fp->tx_pkt_cons;
840
841         while (sw_cons != hw_cons) {
842                 u16 pkt_cons;
843
844                 pkt_cons = TX_BD(sw_cons);
845
846                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
847
848                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
849                    hw_cons, sw_cons, pkt_cons);
850
851 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
852                         rmb();
853                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
854                 }
855 */
856                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
857                 sw_cons++;
858                 done++;
859
860                 if (done == work)
861                         break;
862         }
863
864         fp->tx_pkt_cons = sw_cons;
865         fp->tx_bd_cons = bd_cons;
866
867         /* Need to make the tx_cons update visible to start_xmit()
868          * before checking for netif_queue_stopped().  Without the
869          * memory barrier, there is a small possibility that start_xmit()
870          * will miss it and cause the queue to be stopped forever.
871          */
872         smp_mb();
873
874         /* TBD need a thresh? */
875         if (unlikely(netif_queue_stopped(bp->dev))) {
876
877                 netif_tx_lock(bp->dev);
878
879                 if (netif_queue_stopped(bp->dev) &&
880                     (bp->state == BNX2X_STATE_OPEN) &&
881                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
882                         netif_wake_queue(bp->dev);
883
884                 netif_tx_unlock(bp->dev);
885         }
886 }
887
888
889 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
890                            union eth_rx_cqe *rr_cqe)
891 {
892         struct bnx2x *bp = fp->bp;
893         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
894         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
895
896         DP(BNX2X_MSG_SP,
897            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
898            FP_IDX(fp), cid, command, bp->state,
899            rr_cqe->ramrod_cqe.ramrod_type);
900
901         bp->spq_left++;
902
903         if (FP_IDX(fp)) {
904                 switch (command | fp->state) {
905                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
906                                                 BNX2X_FP_STATE_OPENING):
907                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
908                            cid);
909                         fp->state = BNX2X_FP_STATE_OPEN;
910                         break;
911
912                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
913                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
914                            cid);
915                         fp->state = BNX2X_FP_STATE_HALTED;
916                         break;
917
918                 default:
919                         BNX2X_ERR("unexpected MC reply (%d)  "
920                                   "fp->state is %x\n", command, fp->state);
921                         break;
922                 }
923                 mb(); /* force bnx2x_wait_ramrod() to see the change */
924                 return;
925         }
926
927         switch (command | bp->state) {
928         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
929                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
930                 bp->state = BNX2X_STATE_OPEN;
931                 break;
932
933         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
934                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
935                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
936                 fp->state = BNX2X_FP_STATE_HALTED;
937                 break;
938
939         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
940                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
941                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
942                 break;
943
944
945         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
946         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
947                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
948                 bp->set_mac_pending = 0;
949                 break;
950
951         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
952                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
953                 break;
954
955         default:
956                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
957                           command, bp->state);
958                 break;
959         }
960         mb(); /* force bnx2x_wait_ramrod() to see the change */
961 }
962
963 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
964                                      struct bnx2x_fastpath *fp, u16 index)
965 {
966         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
967         struct page *page = sw_buf->page;
968         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
969
970         /* Skip "next page" elements */
971         if (!page)
972                 return;
973
974         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
975                        BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
976         __free_pages(page, PAGES_PER_SGE_SHIFT);
977
978         sw_buf->page = NULL;
979         sge->addr_hi = 0;
980         sge->addr_lo = 0;
981 }
982
983 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
984                                            struct bnx2x_fastpath *fp, int last)
985 {
986         int i;
987
988         for (i = 0; i < last; i++)
989                 bnx2x_free_rx_sge(bp, fp, i);
990 }
991
992 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
993                                      struct bnx2x_fastpath *fp, u16 index)
994 {
995         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
996         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
997         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
998         dma_addr_t mapping;
999
1000         if (unlikely(page == NULL))
1001                 return -ENOMEM;
1002
1003         mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
1004                                PCI_DMA_FROMDEVICE);
1005         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1006                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1007                 return -ENOMEM;
1008         }
1009
1010         sw_buf->page = page;
1011         pci_unmap_addr_set(sw_buf, mapping, mapping);
1012
1013         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1014         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1015
1016         return 0;
1017 }
1018
1019 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1020                                      struct bnx2x_fastpath *fp, u16 index)
1021 {
1022         struct sk_buff *skb;
1023         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1024         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1025         dma_addr_t mapping;
1026
1027         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1028         if (unlikely(skb == NULL))
1029                 return -ENOMEM;
1030
1031         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1032                                  PCI_DMA_FROMDEVICE);
1033         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1034                 dev_kfree_skb(skb);
1035                 return -ENOMEM;
1036         }
1037
1038         rx_buf->skb = skb;
1039         pci_unmap_addr_set(rx_buf, mapping, mapping);
1040
1041         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1042         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1043
1044         return 0;
1045 }
1046
1047 /* note that we are not allocating a new skb,
1048  * we are just moving one from cons to prod
1049  * we are not creating a new mapping,
1050  * so there is no need to check for dma_mapping_error().
1051  */
1052 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1053                                struct sk_buff *skb, u16 cons, u16 prod)
1054 {
1055         struct bnx2x *bp = fp->bp;
1056         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1057         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1058         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1059         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1060
1061         pci_dma_sync_single_for_device(bp->pdev,
1062                                        pci_unmap_addr(cons_rx_buf, mapping),
1063                                        bp->rx_offset + RX_COPY_THRESH,
1064                                        PCI_DMA_FROMDEVICE);
1065
1066         prod_rx_buf->skb = cons_rx_buf->skb;
1067         pci_unmap_addr_set(prod_rx_buf, mapping,
1068                            pci_unmap_addr(cons_rx_buf, mapping));
1069         *prod_bd = *cons_bd;
1070 }
1071
1072 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1073                                              u16 idx)
1074 {
1075         u16 last_max = fp->last_max_sge;
1076
1077         if (SUB_S16(idx, last_max) > 0)
1078                 fp->last_max_sge = idx;
1079 }
1080
1081 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1082 {
1083         int i, j;
1084
1085         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1086                 int idx = RX_SGE_CNT * i - 1;
1087
1088                 for (j = 0; j < 2; j++) {
1089                         SGE_MASK_CLEAR_BIT(fp, idx);
1090                         idx--;
1091                 }
1092         }
1093 }
1094
1095 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1096                                   struct eth_fast_path_rx_cqe *fp_cqe)
1097 {
1098         struct bnx2x *bp = fp->bp;
1099         u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1100                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1101                       BCM_PAGE_SHIFT;
1102         u16 last_max, last_elem, first_elem;
1103         u16 delta = 0;
1104         u16 i;
1105
1106         if (!sge_len)
1107                 return;
1108
1109         /* First mark all used pages */
1110         for (i = 0; i < sge_len; i++)
1111                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1112
1113         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1114            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1115
1116         /* Here we assume that the last SGE index is the biggest */
1117         prefetch((void *)(fp->sge_mask));
1118         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1119
1120         last_max = RX_SGE(fp->last_max_sge);
1121         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1122         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1123
1124         /* If ring is not full */
1125         if (last_elem + 1 != first_elem)
1126                 last_elem++;
1127
1128         /* Now update the prod */
1129         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1130                 if (likely(fp->sge_mask[i]))
1131                         break;
1132
1133                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1134                 delta += RX_SGE_MASK_ELEM_SZ;
1135         }
1136
1137         if (delta > 0) {
1138                 fp->rx_sge_prod += delta;
1139                 /* clear page-end entries */
1140                 bnx2x_clear_sge_mask_next_elems(fp);
1141         }
1142
1143         DP(NETIF_MSG_RX_STATUS,
1144            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1145            fp->last_max_sge, fp->rx_sge_prod);
1146 }
1147
1148 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1149 {
1150         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1151         memset(fp->sge_mask, 0xff,
1152                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1153
1154         /* Clear the two last indices in the page to 1:
1155            these are the indices that correspond to the "next" element,
1156            hence will never be indicated and should be removed from
1157            the calculations. */
1158         bnx2x_clear_sge_mask_next_elems(fp);
1159 }
1160
1161 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1162                             struct sk_buff *skb, u16 cons, u16 prod)
1163 {
1164         struct bnx2x *bp = fp->bp;
1165         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1166         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1167         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1168         dma_addr_t mapping;
1169
1170         /* move empty skb from pool to prod and map it */
1171         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1172         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1173                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1174         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1175
1176         /* move partial skb from cons to pool (don't unmap yet) */
1177         fp->tpa_pool[queue] = *cons_rx_buf;
1178
1179         /* mark bin state as start - print error if current state != stop */
1180         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1181                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1182
1183         fp->tpa_state[queue] = BNX2X_TPA_START;
1184
1185         /* point prod_bd to new skb */
1186         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1187         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1188
1189 #ifdef BNX2X_STOP_ON_ERROR
1190         fp->tpa_queue_used |= (1 << queue);
1191 #ifdef __powerpc64__
1192         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1193 #else
1194         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1195 #endif
1196            fp->tpa_queue_used);
1197 #endif
1198 }
1199
1200 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1201                                struct sk_buff *skb,
1202                                struct eth_fast_path_rx_cqe *fp_cqe,
1203                                u16 cqe_idx)
1204 {
1205         struct sw_rx_page *rx_pg, old_rx_pg;
1206         struct page *sge;
1207         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1208         u32 i, frag_len, frag_size, pages;
1209         int err;
1210         int j;
1211
1212         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1213         pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
1214
1215         /* This is needed in order to enable forwarding support */
1216         if (frag_size)
1217                 skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
1218                                                max(frag_size, (u32)len_on_bd));
1219
1220 #ifdef BNX2X_STOP_ON_ERROR
1221         if (pages > 8*PAGES_PER_SGE) {
1222                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1223                           pages, cqe_idx);
1224                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1225                           fp_cqe->pkt_len, len_on_bd);
1226                 bnx2x_panic();
1227                 return -EINVAL;
1228         }
1229 #endif
1230
1231         /* Run through the SGL and compose the fragmented skb */
1232         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1233                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1234
1235                 /* FW gives the indices of the SGE as if the ring is an array
1236                    (meaning that "next" element will consume 2 indices) */
1237                 frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
1238                 rx_pg = &fp->rx_page_ring[sge_idx];
1239                 sge = rx_pg->page;
1240                 old_rx_pg = *rx_pg;
1241
1242                 /* If we fail to allocate a substitute page, we simply stop
1243                    where we are and drop the whole packet */
1244                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1245                 if (unlikely(err)) {
1246                         bp->eth_stats.rx_skb_alloc_failed++;
1247                         return err;
1248                 }
1249
1250                 /* Unmap the page as we r going to pass it to the stack */
1251                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1252                               BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1253
1254                 /* Add one frag and update the appropriate fields in the skb */
1255                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1256
1257                 skb->data_len += frag_len;
1258                 skb->truesize += frag_len;
1259                 skb->len += frag_len;
1260
1261                 frag_size -= frag_len;
1262         }
1263
1264         return 0;
1265 }
1266
1267 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1268                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1269                            u16 cqe_idx)
1270 {
1271         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1272         struct sk_buff *skb = rx_buf->skb;
1273         /* alloc new skb */
1274         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1275
1276         /* Unmap skb in the pool anyway, as we are going to change
1277            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1278            fails. */
1279         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1280                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1281
1282         if (likely(new_skb)) {
1283                 /* fix ip xsum and give it to the stack */
1284                 /* (no need to map the new skb) */
1285
1286                 prefetch(skb);
1287                 prefetch(((char *)(skb)) + 128);
1288
1289 #ifdef BNX2X_STOP_ON_ERROR
1290                 if (pad + len > bp->rx_buf_size) {
1291                         BNX2X_ERR("skb_put is about to fail...  "
1292                                   "pad %d  len %d  rx_buf_size %d\n",
1293                                   pad, len, bp->rx_buf_size);
1294                         bnx2x_panic();
1295                         return;
1296                 }
1297 #endif
1298
1299                 skb_reserve(skb, pad);
1300                 skb_put(skb, len);
1301
1302                 skb->protocol = eth_type_trans(skb, bp->dev);
1303                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1304
1305                 {
1306                         struct iphdr *iph;
1307
1308                         iph = (struct iphdr *)skb->data;
1309                         iph->check = 0;
1310                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1311                 }
1312
1313                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1314                                          &cqe->fast_path_cqe, cqe_idx)) {
1315 #ifdef BCM_VLAN
1316                         if ((bp->vlgrp != NULL) &&
1317                             (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1318                              PARSING_FLAGS_VLAN))
1319                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1320                                                 le16_to_cpu(cqe->fast_path_cqe.
1321                                                             vlan_tag));
1322                         else
1323 #endif
1324                                 netif_receive_skb(skb);
1325                 } else {
1326                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1327                            " - dropping packet!\n");
1328                         dev_kfree_skb(skb);
1329                 }
1330
1331
1332                 /* put new skb in bin */
1333                 fp->tpa_pool[queue].skb = new_skb;
1334
1335         } else {
1336                 /* else drop the packet and keep the buffer in the bin */
1337                 DP(NETIF_MSG_RX_STATUS,
1338                    "Failed to allocate new skb - dropping packet!\n");
1339                 bp->eth_stats.rx_skb_alloc_failed++;
1340         }
1341
1342         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1343 }
1344
1345 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1346                                         struct bnx2x_fastpath *fp,
1347                                         u16 bd_prod, u16 rx_comp_prod,
1348                                         u16 rx_sge_prod)
1349 {
1350         struct tstorm_eth_rx_producers rx_prods = {0};
1351         int i;
1352
1353         /* Update producers */
1354         rx_prods.bd_prod = bd_prod;
1355         rx_prods.cqe_prod = rx_comp_prod;
1356         rx_prods.sge_prod = rx_sge_prod;
1357
1358         for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1359                 REG_WR(bp, BAR_TSTRORM_INTMEM +
1360                        TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1361                        ((u32 *)&rx_prods)[i]);
1362
1363         DP(NETIF_MSG_RX_STATUS,
1364            "Wrote: bd_prod %u  cqe_prod %u  sge_prod %u\n",
1365            bd_prod, rx_comp_prod, rx_sge_prod);
1366 }
1367
1368 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1369 {
1370         struct bnx2x *bp = fp->bp;
1371         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1372         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1373         int rx_pkt = 0;
1374
1375 #ifdef BNX2X_STOP_ON_ERROR
1376         if (unlikely(bp->panic))
1377                 return 0;
1378 #endif
1379
1380         /* CQ "next element" is of the size of the regular element,
1381            that's why it's ok here */
1382         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1383         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1384                 hw_comp_cons++;
1385
1386         bd_cons = fp->rx_bd_cons;
1387         bd_prod = fp->rx_bd_prod;
1388         bd_prod_fw = bd_prod;
1389         sw_comp_cons = fp->rx_comp_cons;
1390         sw_comp_prod = fp->rx_comp_prod;
1391
1392         /* Memory barrier necessary as speculative reads of the rx
1393          * buffer can be ahead of the index in the status block
1394          */
1395         rmb();
1396
1397         DP(NETIF_MSG_RX_STATUS,
1398            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1399            FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1400
1401         while (sw_comp_cons != hw_comp_cons) {
1402                 struct sw_rx_bd *rx_buf = NULL;
1403                 struct sk_buff *skb;
1404                 union eth_rx_cqe *cqe;
1405                 u8 cqe_fp_flags;
1406                 u16 len, pad;
1407
1408                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1409                 bd_prod = RX_BD(bd_prod);
1410                 bd_cons = RX_BD(bd_cons);
1411
1412                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1413                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1414
1415                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1416                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1417                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1418                    cqe->fast_path_cqe.rss_hash_result,
1419                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1420                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1421
1422                 /* is this a slowpath msg? */
1423                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1424                         bnx2x_sp_event(fp, cqe);
1425                         goto next_cqe;
1426
1427                 /* this is an rx packet */
1428                 } else {
1429                         rx_buf = &fp->rx_buf_ring[bd_cons];
1430                         skb = rx_buf->skb;
1431                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1432                         pad = cqe->fast_path_cqe.placement_offset;
1433
1434                         /* If CQE is marked both TPA_START and TPA_END
1435                            it is a non-TPA CQE */
1436                         if ((!fp->disable_tpa) &&
1437                             (TPA_TYPE(cqe_fp_flags) !=
1438                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1439                                 u16 queue = cqe->fast_path_cqe.queue_index;
1440
1441                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1442                                         DP(NETIF_MSG_RX_STATUS,
1443                                            "calling tpa_start on queue %d\n",
1444                                            queue);
1445
1446                                         bnx2x_tpa_start(fp, queue, skb,
1447                                                         bd_cons, bd_prod);
1448                                         goto next_rx;
1449                                 }
1450
1451                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1452                                         DP(NETIF_MSG_RX_STATUS,
1453                                            "calling tpa_stop on queue %d\n",
1454                                            queue);
1455
1456                                         if (!BNX2X_RX_SUM_FIX(cqe))
1457                                                 BNX2X_ERR("STOP on none TCP "
1458                                                           "data\n");
1459
1460                                         /* This is a size of the linear data
1461                                            on this skb */
1462                                         len = le16_to_cpu(cqe->fast_path_cqe.
1463                                                                 len_on_bd);
1464                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1465                                                     len, cqe, comp_ring_cons);
1466 #ifdef BNX2X_STOP_ON_ERROR
1467                                         if (bp->panic)
1468                                                 return -EINVAL;
1469 #endif
1470
1471                                         bnx2x_update_sge_prod(fp,
1472                                                         &cqe->fast_path_cqe);
1473                                         goto next_cqe;
1474                                 }
1475                         }
1476
1477                         pci_dma_sync_single_for_device(bp->pdev,
1478                                         pci_unmap_addr(rx_buf, mapping),
1479                                                        pad + RX_COPY_THRESH,
1480                                                        PCI_DMA_FROMDEVICE);
1481                         prefetch(skb);
1482                         prefetch(((char *)(skb)) + 128);
1483
1484                         /* is this an error packet? */
1485                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1486                                 DP(NETIF_MSG_RX_ERR,
1487                                    "ERROR  flags %x  rx packet %u\n",
1488                                    cqe_fp_flags, sw_comp_cons);
1489                                 bp->eth_stats.rx_err_discard_pkt++;
1490                                 goto reuse_rx;
1491                         }
1492
1493                         /* Since we don't have a jumbo ring
1494                          * copy small packets if mtu > 1500
1495                          */
1496                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1497                             (len <= RX_COPY_THRESH)) {
1498                                 struct sk_buff *new_skb;
1499
1500                                 new_skb = netdev_alloc_skb(bp->dev,
1501                                                            len + pad);
1502                                 if (new_skb == NULL) {
1503                                         DP(NETIF_MSG_RX_ERR,
1504                                            "ERROR  packet dropped "
1505                                            "because of alloc failure\n");
1506                                         bp->eth_stats.rx_skb_alloc_failed++;
1507                                         goto reuse_rx;
1508                                 }
1509
1510                                 /* aligned copy */
1511                                 skb_copy_from_linear_data_offset(skb, pad,
1512                                                     new_skb->data + pad, len);
1513                                 skb_reserve(new_skb, pad);
1514                                 skb_put(new_skb, len);
1515
1516                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1517
1518                                 skb = new_skb;
1519
1520                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1521                                 pci_unmap_single(bp->pdev,
1522                                         pci_unmap_addr(rx_buf, mapping),
1523                                                  bp->rx_buf_size,
1524                                                  PCI_DMA_FROMDEVICE);
1525                                 skb_reserve(skb, pad);
1526                                 skb_put(skb, len);
1527
1528                         } else {
1529                                 DP(NETIF_MSG_RX_ERR,
1530                                    "ERROR  packet dropped because "
1531                                    "of alloc failure\n");
1532                                 bp->eth_stats.rx_skb_alloc_failed++;
1533 reuse_rx:
1534                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1535                                 goto next_rx;
1536                         }
1537
1538                         skb->protocol = eth_type_trans(skb, bp->dev);
1539
1540                         skb->ip_summed = CHECKSUM_NONE;
1541                         if (bp->rx_csum) {
1542                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1543                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1544                                 else
1545                                         bp->eth_stats.hw_csum_err++;
1546                         }
1547                 }
1548
1549 #ifdef BCM_VLAN
1550                 if ((bp->vlgrp != NULL) &&
1551                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1552                      PARSING_FLAGS_VLAN))
1553                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1554                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1555                 else
1556 #endif
1557                         netif_receive_skb(skb);
1558
1559
1560 next_rx:
1561                 rx_buf->skb = NULL;
1562
1563                 bd_cons = NEXT_RX_IDX(bd_cons);
1564                 bd_prod = NEXT_RX_IDX(bd_prod);
1565                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1566                 rx_pkt++;
1567 next_cqe:
1568                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1569                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1570
1571                 if (rx_pkt == budget)
1572                         break;
1573         } /* while */
1574
1575         fp->rx_bd_cons = bd_cons;
1576         fp->rx_bd_prod = bd_prod_fw;
1577         fp->rx_comp_cons = sw_comp_cons;
1578         fp->rx_comp_prod = sw_comp_prod;
1579
1580         /* Update producers */
1581         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1582                              fp->rx_sge_prod);
1583         mmiowb(); /* keep prod updates ordered */
1584
1585         fp->rx_pkt += rx_pkt;
1586         fp->rx_calls++;
1587
1588         return rx_pkt;
1589 }
1590
1591 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1592 {
1593         struct bnx2x_fastpath *fp = fp_cookie;
1594         struct bnx2x *bp = fp->bp;
1595         int index = FP_IDX(fp);
1596
1597         /* Return here if interrupt is disabled */
1598         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1599                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1600                 return IRQ_HANDLED;
1601         }
1602
1603         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1604            index, FP_SB_ID(fp));
1605         bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1606
1607 #ifdef BNX2X_STOP_ON_ERROR
1608         if (unlikely(bp->panic))
1609                 return IRQ_HANDLED;
1610 #endif
1611
1612         prefetch(fp->rx_cons_sb);
1613         prefetch(fp->tx_cons_sb);
1614         prefetch(&fp->status_blk->c_status_block.status_block_index);
1615         prefetch(&fp->status_blk->u_status_block.status_block_index);
1616
1617         netif_rx_schedule(&bnx2x_fp(bp, index, napi));
1618
1619         return IRQ_HANDLED;
1620 }
1621
1622 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1623 {
1624         struct net_device *dev = dev_instance;
1625         struct bnx2x *bp = netdev_priv(dev);
1626         u16 status = bnx2x_ack_int(bp);
1627         u16 mask;
1628
1629         /* Return here if interrupt is shared and it's not for us */
1630         if (unlikely(status == 0)) {
1631                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1632                 return IRQ_NONE;
1633         }
1634         DP(NETIF_MSG_INTR, "got an interrupt  status %u\n", status);
1635
1636         /* Return here if interrupt is disabled */
1637         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1638                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1639                 return IRQ_HANDLED;
1640         }
1641
1642 #ifdef BNX2X_STOP_ON_ERROR
1643         if (unlikely(bp->panic))
1644                 return IRQ_HANDLED;
1645 #endif
1646
1647         mask = 0x2 << bp->fp[0].sb_id;
1648         if (status & mask) {
1649                 struct bnx2x_fastpath *fp = &bp->fp[0];
1650
1651                 prefetch(fp->rx_cons_sb);
1652                 prefetch(fp->tx_cons_sb);
1653                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1654                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1655
1656                 netif_rx_schedule(&bnx2x_fp(bp, 0, napi));
1657
1658                 status &= ~mask;
1659         }
1660
1661
1662         if (unlikely(status & 0x1)) {
1663                 schedule_work(&bp->sp_task);
1664
1665                 status &= ~0x1;
1666                 if (!status)
1667                         return IRQ_HANDLED;
1668         }
1669
1670         if (status)
1671                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1672                    status);
1673
1674         return IRQ_HANDLED;
1675 }
1676
1677 /* end of fast path */
1678
1679 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1680
1681 /* Link */
1682
1683 /*
1684  * General service functions
1685  */
1686
1687 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1688 {
1689         u32 lock_status;
1690         u32 resource_bit = (1 << resource);
1691         int func = BP_FUNC(bp);
1692         u32 hw_lock_control_reg;
1693         int cnt;
1694
1695         /* Validating that the resource is within range */
1696         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1697                 DP(NETIF_MSG_HW,
1698                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1699                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1700                 return -EINVAL;
1701         }
1702
1703         if (func <= 5) {
1704                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1705         } else {
1706                 hw_lock_control_reg =
1707                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1708         }
1709
1710         /* Validating that the resource is not already taken */
1711         lock_status = REG_RD(bp, hw_lock_control_reg);
1712         if (lock_status & resource_bit) {
1713                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1714                    lock_status, resource_bit);
1715                 return -EEXIST;
1716         }
1717
1718         /* Try for 5 second every 5ms */
1719         for (cnt = 0; cnt < 1000; cnt++) {
1720                 /* Try to acquire the lock */
1721                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1722                 lock_status = REG_RD(bp, hw_lock_control_reg);
1723                 if (lock_status & resource_bit)
1724                         return 0;
1725
1726                 msleep(5);
1727         }
1728         DP(NETIF_MSG_HW, "Timeout\n");
1729         return -EAGAIN;
1730 }
1731
1732 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1733 {
1734         u32 lock_status;
1735         u32 resource_bit = (1 << resource);
1736         int func = BP_FUNC(bp);
1737         u32 hw_lock_control_reg;
1738
1739         /* Validating that the resource is within range */
1740         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1741                 DP(NETIF_MSG_HW,
1742                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1743                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1744                 return -EINVAL;
1745         }
1746
1747         if (func <= 5) {
1748                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1749         } else {
1750                 hw_lock_control_reg =
1751                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1752         }
1753
1754         /* Validating that the resource is currently taken */
1755         lock_status = REG_RD(bp, hw_lock_control_reg);
1756         if (!(lock_status & resource_bit)) {
1757                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1758                    lock_status, resource_bit);
1759                 return -EFAULT;
1760         }
1761
1762         REG_WR(bp, hw_lock_control_reg, resource_bit);
1763         return 0;
1764 }
1765
1766 /* HW Lock for shared dual port PHYs */
1767 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1768 {
1769         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1770
1771         mutex_lock(&bp->port.phy_mutex);
1772
1773         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1774             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1775                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1776 }
1777
1778 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1779 {
1780         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1781
1782         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1783             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1784                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1785
1786         mutex_unlock(&bp->port.phy_mutex);
1787 }
1788
1789 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1790 {
1791         /* The GPIO should be swapped if swap register is set and active */
1792         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1793                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1794         int gpio_shift = gpio_num +
1795                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1796         u32 gpio_mask = (1 << gpio_shift);
1797         u32 gpio_reg;
1798
1799         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1800                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1801                 return -EINVAL;
1802         }
1803
1804         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1805         /* read GPIO and mask except the float bits */
1806         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1807
1808         switch (mode) {
1809         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1810                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1811                    gpio_num, gpio_shift);
1812                 /* clear FLOAT and set CLR */
1813                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1814                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1815                 break;
1816
1817         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1818                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1819                    gpio_num, gpio_shift);
1820                 /* clear FLOAT and set SET */
1821                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1822                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1823                 break;
1824
1825         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1826                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1827                    gpio_num, gpio_shift);
1828                 /* set FLOAT */
1829                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1830                 break;
1831
1832         default:
1833                 break;
1834         }
1835
1836         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1837         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1838
1839         return 0;
1840 }
1841
1842 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1843 {
1844         u32 spio_mask = (1 << spio_num);
1845         u32 spio_reg;
1846
1847         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1848             (spio_num > MISC_REGISTERS_SPIO_7)) {
1849                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1850                 return -EINVAL;
1851         }
1852
1853         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1854         /* read SPIO and mask except the float bits */
1855         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1856
1857         switch (mode) {
1858         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1859                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1860                 /* clear FLOAT and set CLR */
1861                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1862                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1863                 break;
1864
1865         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1866                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1867                 /* clear FLOAT and set SET */
1868                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1869                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1870                 break;
1871
1872         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1873                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1874                 /* set FLOAT */
1875                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1876                 break;
1877
1878         default:
1879                 break;
1880         }
1881
1882         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1883         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1884
1885         return 0;
1886 }
1887
1888 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1889 {
1890         switch (bp->link_vars.ieee_fc) {
1891         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1892                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1893                                           ADVERTISED_Pause);
1894                 break;
1895         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1896                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1897                                          ADVERTISED_Pause);
1898                 break;
1899         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1900                 bp->port.advertising |= ADVERTISED_Asym_Pause;
1901                 break;
1902         default:
1903                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1904                                           ADVERTISED_Pause);
1905                 break;
1906         }
1907 }
1908
1909 static void bnx2x_link_report(struct bnx2x *bp)
1910 {
1911         if (bp->link_vars.link_up) {
1912                 if (bp->state == BNX2X_STATE_OPEN)
1913                         netif_carrier_on(bp->dev);
1914                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1915
1916                 printk("%d Mbps ", bp->link_vars.line_speed);
1917
1918                 if (bp->link_vars.duplex == DUPLEX_FULL)
1919                         printk("full duplex");
1920                 else
1921                         printk("half duplex");
1922
1923                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1924                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
1925                                 printk(", receive ");
1926                                 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1927                                         printk("& transmit ");
1928                         } else {
1929                                 printk(", transmit ");
1930                         }
1931                         printk("flow control ON");
1932                 }
1933                 printk("\n");
1934
1935         } else { /* link_down */
1936                 netif_carrier_off(bp->dev);
1937                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1938         }
1939 }
1940
1941 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1942 {
1943         if (!BP_NOMCP(bp)) {
1944                 u8 rc;
1945
1946                 /* Initialize link parameters structure variables */
1947                 /* It is recommended to turn off RX FC for jumbo frames
1948                    for better performance */
1949                 if (IS_E1HMF(bp))
1950                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1951                 else if (bp->dev->mtu > 5000)
1952                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1953                 else
1954                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1955
1956                 bnx2x_acquire_phy_lock(bp);
1957                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1958                 bnx2x_release_phy_lock(bp);
1959
1960                 if (bp->link_vars.link_up)
1961                         bnx2x_link_report(bp);
1962
1963                 bnx2x_calc_fc_adv(bp);
1964
1965                 return rc;
1966         }
1967         BNX2X_ERR("Bootcode is missing -not initializing link\n");
1968         return -EINVAL;
1969 }
1970
1971 static void bnx2x_link_set(struct bnx2x *bp)
1972 {
1973         if (!BP_NOMCP(bp)) {
1974                 bnx2x_acquire_phy_lock(bp);
1975                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1976                 bnx2x_release_phy_lock(bp);
1977
1978                 bnx2x_calc_fc_adv(bp);
1979         } else
1980                 BNX2X_ERR("Bootcode is missing -not setting link\n");
1981 }
1982
1983 static void bnx2x__link_reset(struct bnx2x *bp)
1984 {
1985         if (!BP_NOMCP(bp)) {
1986                 bnx2x_acquire_phy_lock(bp);
1987                 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
1988                 bnx2x_release_phy_lock(bp);
1989         } else
1990                 BNX2X_ERR("Bootcode is missing -not resetting link\n");
1991 }
1992
1993 static u8 bnx2x_link_test(struct bnx2x *bp)
1994 {
1995         u8 rc;
1996
1997         bnx2x_acquire_phy_lock(bp);
1998         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1999         bnx2x_release_phy_lock(bp);
2000
2001         return rc;
2002 }
2003
2004 /* Calculates the sum of vn_min_rates.
2005    It's needed for further normalizing of the min_rates.
2006
2007    Returns:
2008      sum of vn_min_rates
2009        or
2010      0 - if all the min_rates are 0.
2011      In the later case fairness algorithm should be deactivated.
2012      If not all min_rates are zero then those that are zeroes will
2013      be set to 1.
2014  */
2015 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2016 {
2017         int i, port = BP_PORT(bp);
2018         u32 wsum = 0;
2019         int all_zero = 1;
2020
2021         for (i = 0; i < E1HVN_MAX; i++) {
2022                 u32 vn_cfg =
2023                         SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2024                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2025                                      FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2026                 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2027                         /* If min rate is zero - set it to 1 */
2028                         if (!vn_min_rate)
2029                                 vn_min_rate = DEF_MIN_RATE;
2030                         else
2031                                 all_zero = 0;
2032
2033                         wsum += vn_min_rate;
2034                 }
2035         }
2036
2037         /* ... only if all min rates are zeros - disable FAIRNESS */
2038         if (all_zero)
2039                 return 0;
2040
2041         return wsum;
2042 }
2043
2044 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2045                                    int en_fness,
2046                                    u16 port_rate,
2047                                    struct cmng_struct_per_port *m_cmng_port)
2048 {
2049         u32 r_param = port_rate / 8;
2050         int port = BP_PORT(bp);
2051         int i;
2052
2053         memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2054
2055         /* Enable minmax only if we are in e1hmf mode */
2056         if (IS_E1HMF(bp)) {
2057                 u32 fair_periodic_timeout_usec;
2058                 u32 t_fair;
2059
2060                 /* Enable rate shaping and fairness */
2061                 m_cmng_port->flags.cmng_vn_enable = 1;
2062                 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2063                 m_cmng_port->flags.rate_shaping_enable = 1;
2064
2065                 if (!en_fness)
2066                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2067                            "  fairness will be disabled\n");
2068
2069                 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2070                 m_cmng_port->rs_vars.rs_periodic_timeout =
2071                                                 RS_PERIODIC_TIMEOUT_USEC / 4;
2072
2073                 /* this is the threshold below which no timer arming will occur
2074                    1.25 coefficient is for the threshold to be a little bigger
2075                    than the real time, to compensate for timer in-accuracy */
2076                 m_cmng_port->rs_vars.rs_threshold =
2077                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2078
2079                 /* resolution of fairness timer */
2080                 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2081                 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2082                 t_fair = T_FAIR_COEF / port_rate;
2083
2084                 /* this is the threshold below which we won't arm
2085                    the timer anymore */
2086                 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2087
2088                 /* we multiply by 1e3/8 to get bytes/msec.
2089                    We don't want the credits to pass a credit
2090                    of the T_FAIR*FAIR_MEM (algorithm resolution) */
2091                 m_cmng_port->fair_vars.upper_bound =
2092                                                 r_param * t_fair * FAIR_MEM;
2093                 /* since each tick is 4 usec */
2094                 m_cmng_port->fair_vars.fairness_timeout =
2095                                                 fair_periodic_timeout_usec / 4;
2096
2097         } else {
2098                 /* Disable rate shaping and fairness */
2099                 m_cmng_port->flags.cmng_vn_enable = 0;
2100                 m_cmng_port->flags.fairness_enable = 0;
2101                 m_cmng_port->flags.rate_shaping_enable = 0;
2102
2103                 DP(NETIF_MSG_IFUP,
2104                    "Single function mode  minmax will be disabled\n");
2105         }
2106
2107         /* Store it to internal memory */
2108         for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2109                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2110                        XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2111                        ((u32 *)(m_cmng_port))[i]);
2112 }
2113
2114 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2115                                    u32 wsum, u16 port_rate,
2116                                  struct cmng_struct_per_port *m_cmng_port)
2117 {
2118         struct rate_shaping_vars_per_vn m_rs_vn;
2119         struct fairness_vars_per_vn m_fair_vn;
2120         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2121         u16 vn_min_rate, vn_max_rate;
2122         int i;
2123
2124         /* If function is hidden - set min and max to zeroes */
2125         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2126                 vn_min_rate = 0;
2127                 vn_max_rate = 0;
2128
2129         } else {
2130                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2131                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2132                 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2133                    if current min rate is zero - set it to 1.
2134                    This is a requirement of the algorithm. */
2135                 if ((vn_min_rate == 0) && wsum)
2136                         vn_min_rate = DEF_MIN_RATE;
2137                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2138                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2139         }
2140
2141         DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d  vn_max_rate=%d  "
2142            "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2143
2144         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2145         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2146
2147         /* global vn counter - maximal Mbps for this vn */
2148         m_rs_vn.vn_counter.rate = vn_max_rate;
2149
2150         /* quota - number of bytes transmitted in this period */
2151         m_rs_vn.vn_counter.quota =
2152                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2153
2154 #ifdef BNX2X_PER_PROT_QOS
2155         /* per protocol counter */
2156         for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2157                 /* maximal Mbps for this protocol */
2158                 m_rs_vn.protocol_counters[protocol].rate =
2159                                                 protocol_max_rate[protocol];
2160                 /* the quota in each timer period -
2161                    number of bytes transmitted in this period */
2162                 m_rs_vn.protocol_counters[protocol].quota =
2163                         (u32)(rs_periodic_timeout_usec *
2164                           ((double)m_rs_vn.
2165                                    protocol_counters[protocol].rate/8));
2166         }
2167 #endif
2168
2169         if (wsum) {
2170                 /* credit for each period of the fairness algorithm:
2171                    number of bytes in T_FAIR (the vn share the port rate).
2172                    wsum should not be larger than 10000, thus
2173                    T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2174                 m_fair_vn.vn_credit_delta =
2175                         max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2176                             (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2177                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2178                    m_fair_vn.vn_credit_delta);
2179         }
2180
2181 #ifdef BNX2X_PER_PROT_QOS
2182         do {
2183                 u32 protocolWeightSum = 0;
2184
2185                 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2186                         protocolWeightSum +=
2187                                         drvInit.protocol_min_rate[protocol];
2188                 /* per protocol counter -
2189                    NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2190                 if (protocolWeightSum > 0) {
2191                         for (protocol = 0;
2192                              protocol < NUM_OF_PROTOCOLS; protocol++)
2193                                 /* credit for each period of the
2194                                    fairness algorithm - number of bytes in
2195                                    T_FAIR (the protocol share the vn rate) */
2196                                 m_fair_vn.protocol_credit_delta[protocol] =
2197                                         (u32)((vn_min_rate / 8) * t_fair *
2198                                         protocol_min_rate / protocolWeightSum);
2199                 }
2200         } while (0);
2201 #endif
2202
2203         /* Store it to internal memory */
2204         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2205                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2206                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2207                        ((u32 *)(&m_rs_vn))[i]);
2208
2209         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2210                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2211                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2212                        ((u32 *)(&m_fair_vn))[i]);
2213 }
2214
2215 /* This function is called upon link interrupt */
2216 static void bnx2x_link_attn(struct bnx2x *bp)
2217 {
2218         int vn;
2219
2220         /* Make sure that we are synced with the current statistics */
2221         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2222
2223         bnx2x_acquire_phy_lock(bp);
2224         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2225         bnx2x_release_phy_lock(bp);
2226
2227         if (bp->link_vars.link_up) {
2228
2229                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2230                         struct host_port_stats *pstats;
2231
2232                         pstats = bnx2x_sp(bp, port_stats);
2233                         /* reset old bmac stats */
2234                         memset(&(pstats->mac_stx[0]), 0,
2235                                sizeof(struct mac_stx));
2236                 }
2237                 if ((bp->state == BNX2X_STATE_OPEN) ||
2238                     (bp->state == BNX2X_STATE_DISABLED))
2239                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2240         }
2241
2242         /* indicate link status */
2243         bnx2x_link_report(bp);
2244
2245         if (IS_E1HMF(bp)) {
2246                 int func;
2247
2248                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2249                         if (vn == BP_E1HVN(bp))
2250                                 continue;
2251
2252                         func = ((vn << 1) | BP_PORT(bp));
2253
2254                         /* Set the attention towards other drivers
2255                            on the same port */
2256                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2257                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2258                 }
2259         }
2260
2261         if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2262                 struct cmng_struct_per_port m_cmng_port;
2263                 u32 wsum;
2264                 int port = BP_PORT(bp);
2265
2266                 /* Init RATE SHAPING and FAIRNESS contexts */
2267                 wsum = bnx2x_calc_vn_wsum(bp);
2268                 bnx2x_init_port_minmax(bp, (int)wsum,
2269                                         bp->link_vars.line_speed,
2270                                         &m_cmng_port);
2271                 if (IS_E1HMF(bp))
2272                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2273                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
2274                                         wsum, bp->link_vars.line_speed,
2275                                                      &m_cmng_port);
2276         }
2277 }
2278
2279 static void bnx2x__link_status_update(struct bnx2x *bp)
2280 {
2281         if (bp->state != BNX2X_STATE_OPEN)
2282                 return;
2283
2284         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2285
2286         if (bp->link_vars.link_up)
2287                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2288         else
2289                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2290
2291         /* indicate link status */
2292         bnx2x_link_report(bp);
2293 }
2294
2295 static void bnx2x_pmf_update(struct bnx2x *bp)
2296 {
2297         int port = BP_PORT(bp);
2298         u32 val;
2299
2300         bp->port.pmf = 1;
2301         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2302
2303         /* enable nig attention */
2304         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2305         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2306         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2307
2308         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2309 }
2310
2311 /* end of Link */
2312
2313 /* slow path */
2314
2315 /*
2316  * General service functions
2317  */
2318
2319 /* the slow path queue is odd since completions arrive on the fastpath ring */
2320 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2321                          u32 data_hi, u32 data_lo, int common)
2322 {
2323         int func = BP_FUNC(bp);
2324
2325         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2326            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2327            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2328            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2329            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2330
2331 #ifdef BNX2X_STOP_ON_ERROR
2332         if (unlikely(bp->panic))
2333                 return -EIO;
2334 #endif
2335
2336         spin_lock_bh(&bp->spq_lock);
2337
2338         if (!bp->spq_left) {
2339                 BNX2X_ERR("BUG! SPQ ring full!\n");
2340                 spin_unlock_bh(&bp->spq_lock);
2341                 bnx2x_panic();
2342                 return -EBUSY;
2343         }
2344
2345         /* CID needs port number to be encoded int it */
2346         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2347                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2348                                      HW_CID(bp, cid)));
2349         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2350         if (common)
2351                 bp->spq_prod_bd->hdr.type |=
2352                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2353
2354         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2355         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2356
2357         bp->spq_left--;
2358
2359         if (bp->spq_prod_bd == bp->spq_last_bd) {
2360                 bp->spq_prod_bd = bp->spq;
2361                 bp->spq_prod_idx = 0;
2362                 DP(NETIF_MSG_TIMER, "end of spq\n");
2363
2364         } else {
2365                 bp->spq_prod_bd++;
2366                 bp->spq_prod_idx++;
2367         }
2368
2369         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2370                bp->spq_prod_idx);
2371
2372         spin_unlock_bh(&bp->spq_lock);
2373         return 0;
2374 }
2375
2376 /* acquire split MCP access lock register */
2377 static int bnx2x_acquire_alr(struct bnx2x *bp)
2378 {
2379         u32 i, j, val;
2380         int rc = 0;
2381
2382         might_sleep();
2383         i = 100;
2384         for (j = 0; j < i*10; j++) {
2385                 val = (1UL << 31);
2386                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2387                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2388                 if (val & (1L << 31))
2389                         break;
2390
2391                 msleep(5);
2392         }
2393         if (!(val & (1L << 31))) {
2394                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2395                 rc = -EBUSY;
2396         }
2397
2398         return rc;
2399 }
2400
2401 /* release split MCP access lock register */
2402 static void bnx2x_release_alr(struct bnx2x *bp)
2403 {
2404         u32 val = 0;
2405
2406         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2407 }
2408
2409 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2410 {
2411         struct host_def_status_block *def_sb = bp->def_status_blk;
2412         u16 rc = 0;
2413
2414         barrier(); /* status block is written to by the chip */
2415         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2416                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2417                 rc |= 1;
2418         }
2419         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2420                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2421                 rc |= 2;
2422         }
2423         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2424                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2425                 rc |= 4;
2426         }
2427         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2428                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2429                 rc |= 8;
2430         }
2431         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2432                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2433                 rc |= 16;
2434         }
2435         return rc;
2436 }
2437
2438 /*
2439  * slow path service functions
2440  */
2441
2442 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2443 {
2444         int port = BP_PORT(bp);
2445         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2446                        COMMAND_REG_ATTN_BITS_SET);
2447         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2448                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2449         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2450                                        NIG_REG_MASK_INTERRUPT_PORT0;
2451         u32 aeu_mask;
2452
2453         if (bp->attn_state & asserted)
2454                 BNX2X_ERR("IGU ERROR\n");
2455
2456         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2457         aeu_mask = REG_RD(bp, aeu_addr);
2458
2459         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2460            aeu_mask, asserted);
2461         aeu_mask &= ~(asserted & 0xff);
2462         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2463
2464         REG_WR(bp, aeu_addr, aeu_mask);
2465         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2466
2467         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2468         bp->attn_state |= asserted;
2469         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2470
2471         if (asserted & ATTN_HARD_WIRED_MASK) {
2472                 if (asserted & ATTN_NIG_FOR_FUNC) {
2473
2474                         /* save nig interrupt mask */
2475                         bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2476                         REG_WR(bp, nig_int_mask_addr, 0);
2477
2478                         bnx2x_link_attn(bp);
2479
2480                         /* handle unicore attn? */
2481                 }
2482                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2483                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2484
2485                 if (asserted & GPIO_2_FUNC)
2486                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2487
2488                 if (asserted & GPIO_3_FUNC)
2489                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2490
2491                 if (asserted & GPIO_4_FUNC)
2492                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2493
2494                 if (port == 0) {
2495                         if (asserted & ATTN_GENERAL_ATTN_1) {
2496                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2497                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2498                         }
2499                         if (asserted & ATTN_GENERAL_ATTN_2) {
2500                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2501                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2502                         }
2503                         if (asserted & ATTN_GENERAL_ATTN_3) {
2504                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2505                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2506                         }
2507                 } else {
2508                         if (asserted & ATTN_GENERAL_ATTN_4) {
2509                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2510                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2511                         }
2512                         if (asserted & ATTN_GENERAL_ATTN_5) {
2513                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2514                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2515                         }
2516                         if (asserted & ATTN_GENERAL_ATTN_6) {
2517                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2518                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2519                         }
2520                 }
2521
2522         } /* if hardwired */
2523
2524         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2525            asserted, hc_addr);
2526         REG_WR(bp, hc_addr, asserted);
2527
2528         /* now set back the mask */
2529         if (asserted & ATTN_NIG_FOR_FUNC)
2530                 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2531 }
2532
2533 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2534 {
2535         int port = BP_PORT(bp);
2536         int reg_offset;
2537         u32 val;
2538
2539         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2540                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2541
2542         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2543
2544                 val = REG_RD(bp, reg_offset);
2545                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2546                 REG_WR(bp, reg_offset, val);
2547
2548                 BNX2X_ERR("SPIO5 hw attention\n");
2549
2550                 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2551                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
2552                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2553                         /* Fan failure attention */
2554
2555                         /* The PHY reset is controlled by GPIO 1 */
2556                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2557                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2558                         /* Low power mode is controlled by GPIO 2 */
2559                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2560                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2561                         /* mark the failure */
2562                         bp->link_params.ext_phy_config &=
2563                                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2564                         bp->link_params.ext_phy_config |=
2565                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2566                         SHMEM_WR(bp,
2567                                  dev_info.port_hw_config[port].
2568                                                         external_phy_config,
2569                                  bp->link_params.ext_phy_config);
2570                         /* log the failure */
2571                         printk(KERN_ERR PFX "Fan Failure on Network"
2572                                " Controller %s has caused the driver to"
2573                                " shutdown the card to prevent permanent"
2574                                " damage.  Please contact Dell Support for"
2575                                " assistance\n", bp->dev->name);
2576                         break;
2577
2578                 default:
2579                         break;
2580                 }
2581         }
2582
2583         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2584
2585                 val = REG_RD(bp, reg_offset);
2586                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2587                 REG_WR(bp, reg_offset, val);
2588
2589                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2590                           (attn & HW_INTERRUT_ASSERT_SET_0));
2591                 bnx2x_panic();
2592         }
2593 }
2594
2595 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2596 {
2597         u32 val;
2598
2599         if (attn & BNX2X_DOORQ_ASSERT) {
2600
2601                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2602                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2603                 /* DORQ discard attention */
2604                 if (val & 0x2)
2605                         BNX2X_ERR("FATAL error from DORQ\n");
2606         }
2607
2608         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2609
2610                 int port = BP_PORT(bp);
2611                 int reg_offset;
2612
2613                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2614                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2615
2616                 val = REG_RD(bp, reg_offset);
2617                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2618                 REG_WR(bp, reg_offset, val);
2619
2620                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2621                           (attn & HW_INTERRUT_ASSERT_SET_1));
2622                 bnx2x_panic();
2623         }
2624 }
2625
2626 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2627 {
2628         u32 val;
2629
2630         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2631
2632                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2633                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2634                 /* CFC error attention */
2635                 if (val & 0x2)
2636                         BNX2X_ERR("FATAL error from CFC\n");
2637         }
2638
2639         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2640
2641                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2642                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2643                 /* RQ_USDMDP_FIFO_OVERFLOW */
2644                 if (val & 0x18000)
2645                         BNX2X_ERR("FATAL error from PXP\n");
2646         }
2647
2648         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2649
2650                 int port = BP_PORT(bp);
2651                 int reg_offset;
2652
2653                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2654                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2655
2656                 val = REG_RD(bp, reg_offset);
2657                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2658                 REG_WR(bp, reg_offset, val);
2659
2660                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2661                           (attn & HW_INTERRUT_ASSERT_SET_2));
2662                 bnx2x_panic();
2663         }
2664 }
2665
2666 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2667 {
2668         u32 val;
2669
2670         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2671
2672                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2673                         int func = BP_FUNC(bp);
2674
2675                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2676                         bnx2x__link_status_update(bp);
2677                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2678                                                         DRV_STATUS_PMF)
2679                                 bnx2x_pmf_update(bp);
2680
2681                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2682
2683                         BNX2X_ERR("MC assert!\n");
2684                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2685                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2686                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2687                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2688                         bnx2x_panic();
2689
2690                 } else if (attn & BNX2X_MCP_ASSERT) {
2691
2692                         BNX2X_ERR("MCP assert!\n");
2693                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2694                         bnx2x_fw_dump(bp);
2695
2696                 } else
2697                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2698         }
2699
2700         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2701                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2702                 if (attn & BNX2X_GRC_TIMEOUT) {
2703                         val = CHIP_IS_E1H(bp) ?
2704                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2705                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2706                 }
2707                 if (attn & BNX2X_GRC_RSV) {
2708                         val = CHIP_IS_E1H(bp) ?
2709                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2710                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2711                 }
2712                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2713         }
2714 }
2715
2716 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2717 {
2718         struct attn_route attn;
2719         struct attn_route group_mask;
2720         int port = BP_PORT(bp);
2721         int index;
2722         u32 reg_addr;
2723         u32 val;
2724         u32 aeu_mask;
2725
2726         /* need to take HW lock because MCP or other port might also
2727            try to handle this event */
2728         bnx2x_acquire_alr(bp);
2729
2730         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2731         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2732         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2733         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2734         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2735            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2736
2737         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2738                 if (deasserted & (1 << index)) {
2739                         group_mask = bp->attn_group[index];
2740
2741                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2742                            index, group_mask.sig[0], group_mask.sig[1],
2743                            group_mask.sig[2], group_mask.sig[3]);
2744
2745                         bnx2x_attn_int_deasserted3(bp,
2746                                         attn.sig[3] & group_mask.sig[3]);
2747                         bnx2x_attn_int_deasserted1(bp,
2748                                         attn.sig[1] & group_mask.sig[1]);
2749                         bnx2x_attn_int_deasserted2(bp,
2750                                         attn.sig[2] & group_mask.sig[2]);
2751                         bnx2x_attn_int_deasserted0(bp,
2752                                         attn.sig[0] & group_mask.sig[0]);
2753
2754                         if ((attn.sig[0] & group_mask.sig[0] &
2755                                                 HW_PRTY_ASSERT_SET_0) ||
2756                             (attn.sig[1] & group_mask.sig[1] &
2757                                                 HW_PRTY_ASSERT_SET_1) ||
2758                             (attn.sig[2] & group_mask.sig[2] &
2759                                                 HW_PRTY_ASSERT_SET_2))
2760                                 BNX2X_ERR("FATAL HW block parity attention\n");
2761                 }
2762         }
2763
2764         bnx2x_release_alr(bp);
2765
2766         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2767
2768         val = ~deasserted;
2769         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2770            val, reg_addr);
2771         REG_WR(bp, reg_addr, val);
2772
2773         if (~bp->attn_state & deasserted)
2774                 BNX2X_ERR("IGU ERROR\n");
2775
2776         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2777                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2778
2779         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2780         aeu_mask = REG_RD(bp, reg_addr);
2781
2782         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2783            aeu_mask, deasserted);
2784         aeu_mask |= (deasserted & 0xff);
2785         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2786
2787         REG_WR(bp, reg_addr, aeu_mask);
2788         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2789
2790         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2791         bp->attn_state &= ~deasserted;
2792         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2793 }
2794
2795 static void bnx2x_attn_int(struct bnx2x *bp)
2796 {
2797         /* read local copy of bits */
2798         u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2799         u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2800         u32 attn_state = bp->attn_state;
2801
2802         /* look for changed bits */
2803         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2804         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2805
2806         DP(NETIF_MSG_HW,
2807            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2808            attn_bits, attn_ack, asserted, deasserted);
2809
2810         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2811                 BNX2X_ERR("BAD attention state\n");
2812
2813         /* handle bits that were raised */
2814         if (asserted)
2815                 bnx2x_attn_int_asserted(bp, asserted);
2816
2817         if (deasserted)
2818                 bnx2x_attn_int_deasserted(bp, deasserted);
2819 }
2820
2821 static void bnx2x_sp_task(struct work_struct *work)
2822 {
2823         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
2824         u16 status;
2825
2826
2827         /* Return here if interrupt is disabled */
2828         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2829                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2830                 return;
2831         }
2832
2833         status = bnx2x_update_dsb_idx(bp);
2834 /*      if (status == 0)                                     */
2835 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2836
2837         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2838
2839         /* HW attentions */
2840         if (status & 0x1)
2841                 bnx2x_attn_int(bp);
2842
2843         /* CStorm events: query_stats, port delete ramrod */
2844         if (status & 0x2)
2845                 bp->stats_pending = 0;
2846
2847         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2848                      IGU_INT_NOP, 1);
2849         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2850                      IGU_INT_NOP, 1);
2851         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2852                      IGU_INT_NOP, 1);
2853         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2854                      IGU_INT_NOP, 1);
2855         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2856                      IGU_INT_ENABLE, 1);
2857
2858 }
2859
2860 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2861 {
2862         struct net_device *dev = dev_instance;
2863         struct bnx2x *bp = netdev_priv(dev);
2864
2865         /* Return here if interrupt is disabled */
2866         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2867                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2868                 return IRQ_HANDLED;
2869         }
2870
2871         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2872
2873 #ifdef BNX2X_STOP_ON_ERROR
2874         if (unlikely(bp->panic))
2875                 return IRQ_HANDLED;
2876 #endif
2877
2878         schedule_work(&bp->sp_task);
2879
2880         return IRQ_HANDLED;
2881 }
2882
2883 /* end of slow path */
2884
2885 /* Statistics */
2886
2887 /****************************************************************************
2888 * Macros
2889 ****************************************************************************/
2890
2891 /* sum[hi:lo] += add[hi:lo] */
2892 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2893         do { \
2894                 s_lo += a_lo; \
2895                 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2896         } while (0)
2897
2898 /* difference = minuend - subtrahend */
2899 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2900         do { \
2901                 if (m_lo < s_lo) { \
2902                         /* underflow */ \
2903                         d_hi = m_hi - s_hi; \
2904                         if (d_hi > 0) { \
2905                                 /* we can 'loan' 1 */ \
2906                                 d_hi--; \
2907                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2908                         } else { \
2909                                 /* m_hi <= s_hi */ \
2910                                 d_hi = 0; \
2911                                 d_lo = 0; \
2912                         } \
2913                 } else { \
2914                         /* m_lo >= s_lo */ \
2915                         if (m_hi < s_hi) { \
2916                                 d_hi = 0; \
2917                                 d_lo = 0; \
2918                         } else { \
2919                                 /* m_hi >= s_hi */ \
2920                                 d_hi = m_hi - s_hi; \
2921                                 d_lo = m_lo - s_lo; \
2922                         } \
2923                 } \
2924         } while (0)
2925
2926 #define UPDATE_STAT64(s, t) \
2927         do { \
2928                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2929                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2930                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2931                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2932                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2933                        pstats->mac_stx[1].t##_lo, diff.lo); \
2934         } while (0)
2935
2936 #define UPDATE_STAT64_NIG(s, t) \
2937         do { \
2938                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2939                         diff.lo, new->s##_lo, old->s##_lo); \
2940                 ADD_64(estats->t##_hi, diff.hi, \
2941                        estats->t##_lo, diff.lo); \
2942         } while (0)
2943
2944 /* sum[hi:lo] += add */
2945 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2946         do { \
2947                 s_lo += a; \
2948                 s_hi += (s_lo < a) ? 1 : 0; \
2949         } while (0)
2950
2951 #define UPDATE_EXTEND_STAT(s) \
2952         do { \
2953                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2954                               pstats->mac_stx[1].s##_lo, \
2955                               new->s); \
2956         } while (0)
2957
2958 #define UPDATE_EXTEND_TSTAT(s, t) \
2959         do { \
2960                 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2961                 old_tclient->s = le32_to_cpu(tclient->s); \
2962                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2963         } while (0)
2964
2965 #define UPDATE_EXTEND_XSTAT(s, t) \
2966         do { \
2967                 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2968                 old_xclient->s = le32_to_cpu(xclient->s); \
2969                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2970         } while (0)
2971
2972 /*
2973  * General service functions
2974  */
2975
2976 static inline long bnx2x_hilo(u32 *hiref)
2977 {
2978         u32 lo = *(hiref + 1);
2979 #if (BITS_PER_LONG == 64)
2980         u32 hi = *hiref;
2981
2982         return HILO_U64(hi, lo);
2983 #else
2984         return lo;
2985 #endif
2986 }
2987
2988 /*
2989  * Init service functions
2990  */
2991
2992 static void bnx2x_storm_stats_post(struct bnx2x *bp)
2993 {
2994         if (!bp->stats_pending) {
2995                 struct eth_query_ramrod_data ramrod_data = {0};
2996                 int rc;
2997
2998                 ramrod_data.drv_counter = bp->stats_counter++;
2999                 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3000                 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3001
3002                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3003                                    ((u32 *)&ramrod_data)[1],
3004                                    ((u32 *)&ramrod_data)[0], 0);
3005                 if (rc == 0) {
3006                         /* stats ramrod has it's own slot on the spq */
3007                         bp->spq_left++;
3008                         bp->stats_pending = 1;
3009                 }
3010         }
3011 }
3012
3013 static void bnx2x_stats_init(struct bnx2x *bp)
3014 {
3015         int port = BP_PORT(bp);
3016
3017         bp->executer_idx = 0;
3018         bp->stats_counter = 0;
3019
3020         /* port stats */
3021         if (!BP_NOMCP(bp))
3022                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3023         else
3024                 bp->port.port_stx = 0;
3025         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3026
3027         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3028         bp->port.old_nig_stats.brb_discard =
3029                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3030         bp->port.old_nig_stats.brb_truncate =
3031                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3032         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3033                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3034         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3035                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3036
3037         /* function stats */
3038         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3039         memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3040         memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3041         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3042
3043         bp->stats_state = STATS_STATE_DISABLED;
3044         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3045                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3046 }
3047
3048 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3049 {
3050         struct dmae_command *dmae = &bp->stats_dmae;
3051         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3052
3053         *stats_comp = DMAE_COMP_VAL;
3054
3055         /* loader */
3056         if (bp->executer_idx) {
3057                 int loader_idx = PMF_DMAE_C(bp);
3058
3059                 memset(dmae, 0, sizeof(struct dmae_command));
3060
3061                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3062                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3063                                 DMAE_CMD_DST_RESET |
3064 #ifdef __BIG_ENDIAN
3065                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3066 #else
3067                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3068 #endif
3069                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3070                                                DMAE_CMD_PORT_0) |
3071                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3072                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3073                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3074                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3075                                      sizeof(struct dmae_command) *
3076                                      (loader_idx + 1)) >> 2;
3077                 dmae->dst_addr_hi = 0;
3078                 dmae->len = sizeof(struct dmae_command) >> 2;
3079                 if (CHIP_IS_E1(bp))
3080                         dmae->len--;
3081                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3082                 dmae->comp_addr_hi = 0;
3083                 dmae->comp_val = 1;
3084
3085                 *stats_comp = 0;
3086                 bnx2x_post_dmae(bp, dmae, loader_idx);
3087
3088         } else if (bp->func_stx) {
3089                 *stats_comp = 0;
3090                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3091         }
3092 }
3093
3094 static int bnx2x_stats_comp(struct bnx2x *bp)
3095 {
3096         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3097         int cnt = 10;
3098
3099         might_sleep();
3100         while (*stats_comp != DMAE_COMP_VAL) {
3101                 if (!cnt) {
3102                         BNX2X_ERR("timeout waiting for stats finished\n");
3103                         break;
3104                 }
3105                 cnt--;
3106                 msleep(1);
3107         }
3108         return 1;
3109 }
3110
3111 /*
3112  * Statistics service functions
3113  */
3114
3115 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3116 {
3117         struct dmae_command *dmae;
3118         u32 opcode;
3119         int loader_idx = PMF_DMAE_C(bp);
3120         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3121
3122         /* sanity */
3123         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3124                 BNX2X_ERR("BUG!\n");
3125                 return;
3126         }
3127
3128         bp->executer_idx = 0;
3129
3130         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3131                   DMAE_CMD_C_ENABLE |
3132                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3133 #ifdef __BIG_ENDIAN
3134                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3135 #else
3136                   DMAE_CMD_ENDIANITY_DW_SWAP |
3137 #endif
3138                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3139                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3140
3141         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3142         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3143         dmae->src_addr_lo = bp->port.port_stx >> 2;
3144         dmae->src_addr_hi = 0;
3145         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3146         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3147         dmae->len = DMAE_LEN32_RD_MAX;
3148         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3149         dmae->comp_addr_hi = 0;
3150         dmae->comp_val = 1;
3151
3152         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3153         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3154         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3155         dmae->src_addr_hi = 0;
3156         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3157                                    DMAE_LEN32_RD_MAX * 4);
3158         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3159                                    DMAE_LEN32_RD_MAX * 4);
3160         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3161         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3162         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3163         dmae->comp_val = DMAE_COMP_VAL;
3164
3165         *stats_comp = 0;
3166         bnx2x_hw_stats_post(bp);
3167         bnx2x_stats_comp(bp);
3168 }
3169
3170 static void bnx2x_port_stats_init(struct bnx2x *bp)
3171 {
3172         struct dmae_command *dmae;
3173         int port = BP_PORT(bp);
3174         int vn = BP_E1HVN(bp);
3175         u32 opcode;
3176         int loader_idx = PMF_DMAE_C(bp);
3177         u32 mac_addr;
3178         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3179
3180         /* sanity */
3181         if (!bp->link_vars.link_up || !bp->port.pmf) {
3182                 BNX2X_ERR("BUG!\n");
3183                 return;
3184         }
3185
3186         bp->executer_idx = 0;
3187
3188         /* MCP */
3189         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3190                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3191                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3192 #ifdef __BIG_ENDIAN
3193                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3194 #else
3195                   DMAE_CMD_ENDIANITY_DW_SWAP |
3196 #endif
3197                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3198                   (vn << DMAE_CMD_E1HVN_SHIFT));
3199
3200         if (bp->port.port_stx) {
3201
3202                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3203                 dmae->opcode = opcode;
3204                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3205                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3206                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3207                 dmae->dst_addr_hi = 0;
3208                 dmae->len = sizeof(struct host_port_stats) >> 2;
3209                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3210                 dmae->comp_addr_hi = 0;
3211                 dmae->comp_val = 1;
3212         }
3213
3214         if (bp->func_stx) {
3215
3216                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3217                 dmae->opcode = opcode;
3218                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3219                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3220                 dmae->dst_addr_lo = bp->func_stx >> 2;
3221                 dmae->dst_addr_hi = 0;
3222                 dmae->len = sizeof(struct host_func_stats) >> 2;
3223                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3224                 dmae->comp_addr_hi = 0;
3225                 dmae->comp_val = 1;
3226         }
3227
3228         /* MAC */
3229         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3230                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3231                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3232 #ifdef __BIG_ENDIAN
3233                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3234 #else
3235                   DMAE_CMD_ENDIANITY_DW_SWAP |
3236 #endif
3237                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3238                   (vn << DMAE_CMD_E1HVN_SHIFT));
3239
3240         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3241
3242                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3243                                    NIG_REG_INGRESS_BMAC0_MEM);
3244
3245                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3246                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3247                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3248                 dmae->opcode = opcode;
3249                 dmae->src_addr_lo = (mac_addr +
3250                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3251                 dmae->src_addr_hi = 0;
3252                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3253                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3254                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3255                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3256                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3257                 dmae->comp_addr_hi = 0;
3258                 dmae->comp_val = 1;
3259
3260                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3261                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3262                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3263                 dmae->opcode = opcode;
3264                 dmae->src_addr_lo = (mac_addr +
3265                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3266                 dmae->src_addr_hi = 0;
3267                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3268                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3269                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3270                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3271                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3272                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3273                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3274                 dmae->comp_addr_hi = 0;
3275                 dmae->comp_val = 1;
3276
3277         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3278
3279                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3280
3281                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3282                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3283                 dmae->opcode = opcode;
3284                 dmae->src_addr_lo = (mac_addr +
3285                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3286                 dmae->src_addr_hi = 0;
3287                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3288                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3289                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3290                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3291                 dmae->comp_addr_hi = 0;
3292                 dmae->comp_val = 1;
3293
3294                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3295                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3296                 dmae->opcode = opcode;
3297                 dmae->src_addr_lo = (mac_addr +
3298                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3299                 dmae->src_addr_hi = 0;
3300                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3301                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3302                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3303                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3304                 dmae->len = 1;
3305                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3306                 dmae->comp_addr_hi = 0;
3307                 dmae->comp_val = 1;
3308
3309                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3310                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3311                 dmae->opcode = opcode;
3312                 dmae->src_addr_lo = (mac_addr +
3313                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3314                 dmae->src_addr_hi = 0;
3315                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3316                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3317                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3318                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3319                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3320                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3321                 dmae->comp_addr_hi = 0;
3322                 dmae->comp_val = 1;
3323         }
3324
3325         /* NIG */
3326         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3327         dmae->opcode = opcode;
3328         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3329                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3330         dmae->src_addr_hi = 0;
3331         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3332         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3333         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3334         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3335         dmae->comp_addr_hi = 0;
3336         dmae->comp_val = 1;
3337
3338         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3339         dmae->opcode = opcode;
3340         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3341                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3342         dmae->src_addr_hi = 0;
3343         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3344                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3345         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3346                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3347         dmae->len = (2*sizeof(u32)) >> 2;
3348         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3349         dmae->comp_addr_hi = 0;
3350         dmae->comp_val = 1;
3351
3352         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3353         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3354                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3355                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3356 #ifdef __BIG_ENDIAN
3357                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3358 #else
3359                         DMAE_CMD_ENDIANITY_DW_SWAP |
3360 #endif
3361                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3362                         (vn << DMAE_CMD_E1HVN_SHIFT));
3363         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3364                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3365         dmae->src_addr_hi = 0;
3366         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3367                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3368         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3369                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3370         dmae->len = (2*sizeof(u32)) >> 2;
3371         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3372         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3373         dmae->comp_val = DMAE_COMP_VAL;
3374
3375         *stats_comp = 0;
3376 }
3377
3378 static void bnx2x_func_stats_init(struct bnx2x *bp)
3379 {
3380         struct dmae_command *dmae = &bp->stats_dmae;
3381         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3382
3383         /* sanity */
3384         if (!bp->func_stx) {
3385                 BNX2X_ERR("BUG!\n");
3386                 return;
3387         }
3388
3389         bp->executer_idx = 0;
3390         memset(dmae, 0, sizeof(struct dmae_command));
3391
3392         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3393                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3394                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3395 #ifdef __BIG_ENDIAN
3396                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3397 #else
3398                         DMAE_CMD_ENDIANITY_DW_SWAP |
3399 #endif
3400                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3401                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3402         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3403         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3404         dmae->dst_addr_lo = bp->func_stx >> 2;
3405         dmae->dst_addr_hi = 0;
3406         dmae->len = sizeof(struct host_func_stats) >> 2;
3407         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3408         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3409         dmae->comp_val = DMAE_COMP_VAL;
3410
3411         *stats_comp = 0;
3412 }
3413
3414 static void bnx2x_stats_start(struct bnx2x *bp)
3415 {
3416         if (bp->port.pmf)
3417                 bnx2x_port_stats_init(bp);
3418
3419         else if (bp->func_stx)
3420                 bnx2x_func_stats_init(bp);
3421
3422         bnx2x_hw_stats_post(bp);
3423         bnx2x_storm_stats_post(bp);
3424 }
3425
3426 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3427 {
3428         bnx2x_stats_comp(bp);
3429         bnx2x_stats_pmf_update(bp);
3430         bnx2x_stats_start(bp);
3431 }
3432
3433 static void bnx2x_stats_restart(struct bnx2x *bp)
3434 {
3435         bnx2x_stats_comp(bp);
3436         bnx2x_stats_start(bp);
3437 }
3438
3439 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3440 {
3441         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3442         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3443         struct regpair diff;
3444
3445         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3446         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3447         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3448         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3449         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3450         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3451         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3452         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3453         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3454         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3455         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3456         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3457         UPDATE_STAT64(tx_stat_gt127,
3458                                 tx_stat_etherstatspkts65octetsto127octets);
3459         UPDATE_STAT64(tx_stat_gt255,
3460                                 tx_stat_etherstatspkts128octetsto255octets);
3461         UPDATE_STAT64(tx_stat_gt511,
3462                                 tx_stat_etherstatspkts256octetsto511octets);
3463         UPDATE_STAT64(tx_stat_gt1023,
3464                                 tx_stat_etherstatspkts512octetsto1023octets);
3465         UPDATE_STAT64(tx_stat_gt1518,
3466                                 tx_stat_etherstatspkts1024octetsto1522octets);
3467         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3468         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3469         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3470         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3471         UPDATE_STAT64(tx_stat_gterr,
3472                                 tx_stat_dot3statsinternalmactransmiterrors);
3473         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3474 }
3475
3476 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3477 {
3478         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3479         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3480
3481         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3482         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3483         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3484         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3485         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3486         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3487         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3488         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3489         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3490         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3491         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3492         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3493         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3494         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3495         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3496         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3497         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3498         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3499         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3500         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3501         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3502         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3503         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3504         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3505         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3506         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3507         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3508         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3509         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3510         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3511         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3512 }
3513
3514 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3515 {
3516         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3517         struct nig_stats *old = &(bp->port.old_nig_stats);
3518         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3519         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3520         struct regpair diff;
3521
3522         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3523                 bnx2x_bmac_stats_update(bp);
3524
3525         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3526                 bnx2x_emac_stats_update(bp);
3527
3528         else { /* unreached */
3529                 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3530                 return -1;
3531         }
3532
3533         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3534                       new->brb_discard - old->brb_discard);
3535         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3536                       new->brb_truncate - old->brb_truncate);
3537
3538         UPDATE_STAT64_NIG(egress_mac_pkt0,
3539                                         etherstatspkts1024octetsto1522octets);
3540         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3541
3542         memcpy(old, new, sizeof(struct nig_stats));
3543
3544         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3545                sizeof(struct mac_stx));
3546         estats->brb_drop_hi = pstats->brb_drop_hi;
3547         estats->brb_drop_lo = pstats->brb_drop_lo;
3548
3549         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3550
3551         return 0;
3552 }
3553
3554 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3555 {
3556         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3557         int cl_id = BP_CL_ID(bp);
3558         struct tstorm_per_port_stats *tport =
3559                                 &stats->tstorm_common.port_statistics;
3560         struct tstorm_per_client_stats *tclient =
3561                         &stats->tstorm_common.client_statistics[cl_id];
3562         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3563         struct xstorm_per_client_stats *xclient =
3564                         &stats->xstorm_common.client_statistics[cl_id];
3565         struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3566         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3567         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3568         u32 diff;
3569
3570         /* are storm stats valid? */
3571         if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3572                                                         bp->stats_counter) {
3573                 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3574                    "  tstorm counter (%d) != stats_counter (%d)\n",
3575                    tclient->stats_counter, bp->stats_counter);
3576                 return -1;
3577         }
3578         if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3579                                                         bp->stats_counter) {
3580                 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3581                    "  xstorm counter (%d) != stats_counter (%d)\n",
3582                    xclient->stats_counter, bp->stats_counter);
3583                 return -2;
3584         }
3585
3586         fstats->total_bytes_received_hi =
3587         fstats->valid_bytes_received_hi =
3588                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3589         fstats->total_bytes_received_lo =
3590         fstats->valid_bytes_received_lo =
3591                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3592
3593         estats->error_bytes_received_hi =
3594                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3595         estats->error_bytes_received_lo =
3596                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3597         ADD_64(estats->error_bytes_received_hi,
3598                estats->rx_stat_ifhcinbadoctets_hi,
3599                estats->error_bytes_received_lo,
3600                estats->rx_stat_ifhcinbadoctets_lo);
3601
3602         ADD_64(fstats->total_bytes_received_hi,
3603                estats->error_bytes_received_hi,
3604                fstats->total_bytes_received_lo,
3605                estats->error_bytes_received_lo);
3606
3607         UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3608         UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3609                                 total_multicast_packets_received);
3610         UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3611                                 total_broadcast_packets_received);
3612
3613         fstats->total_bytes_transmitted_hi =
3614                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3615         fstats->total_bytes_transmitted_lo =
3616                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3617
3618         UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3619                                 total_unicast_packets_transmitted);
3620         UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3621                                 total_multicast_packets_transmitted);
3622         UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3623                                 total_broadcast_packets_transmitted);
3624
3625         memcpy(estats, &(fstats->total_bytes_received_hi),
3626                sizeof(struct host_func_stats) - 2*sizeof(u32));
3627
3628         estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3629         estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3630         estats->brb_truncate_discard =
3631                                 le32_to_cpu(tport->brb_truncate_discard);
3632         estats->mac_discard = le32_to_cpu(tport->mac_discard);
3633
3634         old_tclient->rcv_unicast_bytes.hi =
3635                                 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3636         old_tclient->rcv_unicast_bytes.lo =
3637                                 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3638         old_tclient->rcv_broadcast_bytes.hi =
3639                                 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3640         old_tclient->rcv_broadcast_bytes.lo =
3641                                 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3642         old_tclient->rcv_multicast_bytes.hi =
3643                                 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3644         old_tclient->rcv_multicast_bytes.lo =
3645                                 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3646         old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3647
3648         old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3649         old_tclient->packets_too_big_discard =
3650                                 le32_to_cpu(tclient->packets_too_big_discard);
3651         estats->no_buff_discard =
3652         old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3653         old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3654
3655         old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3656         old_xclient->unicast_bytes_sent.hi =
3657                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3658         old_xclient->unicast_bytes_sent.lo =
3659                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3660         old_xclient->multicast_bytes_sent.hi =
3661                                 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3662         old_xclient->multicast_bytes_sent.lo =
3663                                 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3664         old_xclient->broadcast_bytes_sent.hi =
3665                                 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3666         old_xclient->broadcast_bytes_sent.lo =
3667                                 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3668
3669         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3670
3671         return 0;
3672 }
3673
3674 static void bnx2x_net_stats_update(struct bnx2x *bp)
3675 {
3676         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3677         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3678         struct net_device_stats *nstats = &bp->dev->stats;
3679
3680         nstats->rx_packets =
3681                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3682                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3683                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3684
3685         nstats->tx_packets =
3686                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3687                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3688                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3689
3690         nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3691
3692         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3693
3694         nstats->rx_dropped = old_tclient->checksum_discard +
3695                              estats->mac_discard;
3696         nstats->tx_dropped = 0;
3697
3698         nstats->multicast =
3699                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3700
3701         nstats->collisions =
3702                         estats->tx_stat_dot3statssinglecollisionframes_lo +
3703                         estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3704                         estats->tx_stat_dot3statslatecollisions_lo +
3705                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3706
3707         estats->jabber_packets_received =
3708                                 old_tclient->packets_too_big_discard +
3709                                 estats->rx_stat_dot3statsframestoolong_lo;
3710
3711         nstats->rx_length_errors =
3712                                 estats->rx_stat_etherstatsundersizepkts_lo +
3713                                 estats->jabber_packets_received;
3714         nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3715         nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3716         nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3717         nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3718         nstats->rx_missed_errors = estats->xxoverflow_discard;
3719
3720         nstats->rx_errors = nstats->rx_length_errors +
3721                             nstats->rx_over_errors +
3722                             nstats->rx_crc_errors +
3723                             nstats->rx_frame_errors +
3724                             nstats->rx_fifo_errors +
3725                             nstats->rx_missed_errors;
3726
3727         nstats->tx_aborted_errors =
3728                         estats->tx_stat_dot3statslatecollisions_lo +
3729                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3730         nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3731         nstats->tx_fifo_errors = 0;
3732         nstats->tx_heartbeat_errors = 0;
3733         nstats->tx_window_errors = 0;
3734
3735         nstats->tx_errors = nstats->tx_aborted_errors +
3736                             nstats->tx_carrier_errors;
3737 }
3738
3739 static void bnx2x_stats_update(struct bnx2x *bp)
3740 {
3741         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3742         int update = 0;
3743
3744         if (*stats_comp != DMAE_COMP_VAL)
3745                 return;
3746
3747         if (bp->port.pmf)
3748                 update = (bnx2x_hw_stats_update(bp) == 0);
3749
3750         update |= (bnx2x_storm_stats_update(bp) == 0);
3751
3752         if (update)
3753                 bnx2x_net_stats_update(bp);
3754
3755         else {
3756                 if (bp->stats_pending) {
3757                         bp->stats_pending++;
3758                         if (bp->stats_pending == 3) {
3759                                 BNX2X_ERR("stats not updated for 3 times\n");
3760                                 bnx2x_panic();
3761                                 return;
3762                         }
3763                 }
3764         }
3765
3766         if (bp->msglevel & NETIF_MSG_TIMER) {
3767                 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3768                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3769                 struct net_device_stats *nstats = &bp->dev->stats;
3770                 int i;
3771
3772                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3773                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
3774                                   "  tx pkt (%lx)\n",
3775                        bnx2x_tx_avail(bp->fp),
3776                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3777                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
3778                                   "  rx pkt (%lx)\n",
3779                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3780                              bp->fp->rx_comp_cons),
3781                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3782                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u\n",
3783                        netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
3784                        estats->driver_xoff, estats->brb_drop_lo);
3785                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
3786                         "packets_too_big_discard %u  no_buff_discard %u  "
3787                         "mac_discard %u  mac_filter_discard %u  "
3788                         "xxovrflow_discard %u  brb_truncate_discard %u  "
3789                         "ttl0_discard %u\n",
3790                        old_tclient->checksum_discard,
3791                        old_tclient->packets_too_big_discard,
3792                        old_tclient->no_buff_discard, estats->mac_discard,
3793                        estats->mac_filter_discard, estats->xxoverflow_discard,
3794                        estats->brb_truncate_discard,
3795                        old_tclient->ttl0_discard);
3796
3797                 for_each_queue(bp, i) {
3798                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3799                                bnx2x_fp(bp, i, tx_pkt),
3800                                bnx2x_fp(bp, i, rx_pkt),
3801                                bnx2x_fp(bp, i, rx_calls));
3802                 }
3803         }
3804
3805         bnx2x_hw_stats_post(bp);
3806         bnx2x_storm_stats_post(bp);
3807 }
3808
3809 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3810 {
3811         struct dmae_command *dmae;
3812         u32 opcode;
3813         int loader_idx = PMF_DMAE_C(bp);
3814         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3815
3816         bp->executer_idx = 0;
3817
3818         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3819                   DMAE_CMD_C_ENABLE |
3820                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3821 #ifdef __BIG_ENDIAN
3822                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3823 #else
3824                   DMAE_CMD_ENDIANITY_DW_SWAP |
3825 #endif
3826                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3827                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3828
3829         if (bp->port.port_stx) {
3830
3831                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3832                 if (bp->func_stx)
3833                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3834                 else
3835                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3836                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3837                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3838                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3839                 dmae->dst_addr_hi = 0;
3840                 dmae->len = sizeof(struct host_port_stats) >> 2;
3841                 if (bp->func_stx) {
3842                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3843                         dmae->comp_addr_hi = 0;
3844                         dmae->comp_val = 1;
3845                 } else {
3846                         dmae->comp_addr_lo =
3847                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3848                         dmae->comp_addr_hi =
3849                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3850                         dmae->comp_val = DMAE_COMP_VAL;
3851
3852                         *stats_comp = 0;
3853                 }
3854         }
3855
3856         if (bp->func_stx) {
3857
3858                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3859                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3860                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3861                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3862                 dmae->dst_addr_lo = bp->func_stx >> 2;
3863                 dmae->dst_addr_hi = 0;
3864                 dmae->len = sizeof(struct host_func_stats) >> 2;
3865                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3866                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3867                 dmae->comp_val = DMAE_COMP_VAL;
3868
3869                 *stats_comp = 0;
3870         }
3871 }
3872
3873 static void bnx2x_stats_stop(struct bnx2x *bp)
3874 {
3875         int update = 0;
3876
3877         bnx2x_stats_comp(bp);
3878
3879         if (bp->port.pmf)
3880                 update = (bnx2x_hw_stats_update(bp) == 0);
3881
3882         update |= (bnx2x_storm_stats_update(bp) == 0);
3883
3884         if (update) {
3885                 bnx2x_net_stats_update(bp);
3886
3887                 if (bp->port.pmf)
3888                         bnx2x_port_stats_stop(bp);
3889
3890                 bnx2x_hw_stats_post(bp);
3891                 bnx2x_stats_comp(bp);
3892         }
3893 }
3894
3895 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3896 {
3897 }
3898
3899 static const struct {
3900         void (*action)(struct bnx2x *bp);
3901         enum bnx2x_stats_state next_state;
3902 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3903 /* state        event   */
3904 {
3905 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3906 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
3907 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3908 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3909 },
3910 {
3911 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
3912 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
3913 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
3914 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
3915 }
3916 };
3917
3918 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3919 {
3920         enum bnx2x_stats_state state = bp->stats_state;
3921
3922         bnx2x_stats_stm[state][event].action(bp);
3923         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3924
3925         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3926                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3927                    state, event, bp->stats_state);
3928 }
3929
3930 static void bnx2x_timer(unsigned long data)
3931 {
3932         struct bnx2x *bp = (struct bnx2x *) data;
3933
3934         if (!netif_running(bp->dev))
3935                 return;
3936
3937         if (atomic_read(&bp->intr_sem) != 0)
3938                 goto timer_restart;
3939
3940         if (poll) {
3941                 struct bnx2x_fastpath *fp = &bp->fp[0];
3942                 int rc;
3943
3944                 bnx2x_tx_int(fp, 1000);
3945                 rc = bnx2x_rx_int(fp, 1000);
3946         }
3947
3948         if (!BP_NOMCP(bp)) {
3949                 int func = BP_FUNC(bp);
3950                 u32 drv_pulse;
3951                 u32 mcp_pulse;
3952
3953                 ++bp->fw_drv_pulse_wr_seq;
3954                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3955                 /* TBD - add SYSTEM_TIME */
3956                 drv_pulse = bp->fw_drv_pulse_wr_seq;
3957                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
3958
3959                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
3960                              MCP_PULSE_SEQ_MASK);
3961                 /* The delta between driver pulse and mcp response
3962                  * should be 1 (before mcp response) or 0 (after mcp response)
3963                  */
3964                 if ((drv_pulse != mcp_pulse) &&
3965                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3966                         /* someone lost a heartbeat... */
3967                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3968                                   drv_pulse, mcp_pulse);
3969                 }
3970         }
3971
3972         if ((bp->state == BNX2X_STATE_OPEN) ||
3973             (bp->state == BNX2X_STATE_DISABLED))
3974                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
3975
3976 timer_restart:
3977         mod_timer(&bp->timer, jiffies + bp->current_interval);
3978 }
3979
3980 /* end of Statistics */
3981
3982 /* nic init */
3983
3984 /*
3985  * nic init service functions
3986  */
3987
3988 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
3989 {
3990         int port = BP_PORT(bp);
3991
3992         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3993                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3994                         sizeof(struct ustorm_status_block)/4);
3995         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3996                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3997                         sizeof(struct cstorm_status_block)/4);
3998 }
3999
4000 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4001                           dma_addr_t mapping, int sb_id)
4002 {
4003         int port = BP_PORT(bp);
4004         int func = BP_FUNC(bp);
4005         int index;
4006         u64 section;
4007
4008         /* USTORM */
4009         section = ((u64)mapping) + offsetof(struct host_status_block,
4010                                             u_status_block);
4011         sb->u_status_block.status_block_id = sb_id;
4012
4013         REG_WR(bp, BAR_USTRORM_INTMEM +
4014                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4015         REG_WR(bp, BAR_USTRORM_INTMEM +
4016                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4017                U64_HI(section));
4018         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4019                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4020
4021         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4022                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4023                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4024
4025         /* CSTORM */
4026         section = ((u64)mapping) + offsetof(struct host_status_block,
4027                                             c_status_block);
4028         sb->c_status_block.status_block_id = sb_id;
4029
4030         REG_WR(bp, BAR_CSTRORM_INTMEM +
4031                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4032         REG_WR(bp, BAR_CSTRORM_INTMEM +
4033                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4034                U64_HI(section));
4035         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4036                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4037
4038         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4039                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4040                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4041
4042         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4043 }
4044
4045 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4046 {
4047         int func = BP_FUNC(bp);
4048
4049         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4050                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4051                         sizeof(struct ustorm_def_status_block)/4);
4052         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4053                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4054                         sizeof(struct cstorm_def_status_block)/4);
4055         bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4056                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4057                         sizeof(struct xstorm_def_status_block)/4);
4058         bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4059                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4060                         sizeof(struct tstorm_def_status_block)/4);
4061 }
4062
4063 static void bnx2x_init_def_sb(struct bnx2x *bp,
4064                               struct host_def_status_block *def_sb,
4065                               dma_addr_t mapping, int sb_id)
4066 {
4067         int port = BP_PORT(bp);
4068         int func = BP_FUNC(bp);
4069         int index, val, reg_offset;
4070         u64 section;
4071
4072         /* ATTN */
4073         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4074                                             atten_status_block);
4075         def_sb->atten_status_block.status_block_id = sb_id;
4076
4077         bp->attn_state = 0;
4078
4079         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4080                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4081
4082         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4083                 bp->attn_group[index].sig[0] = REG_RD(bp,
4084                                                      reg_offset + 0x10*index);
4085                 bp->attn_group[index].sig[1] = REG_RD(bp,
4086                                                reg_offset + 0x4 + 0x10*index);
4087                 bp->attn_group[index].sig[2] = REG_RD(bp,
4088                                                reg_offset + 0x8 + 0x10*index);
4089                 bp->attn_group[index].sig[3] = REG_RD(bp,
4090                                                reg_offset + 0xc + 0x10*index);
4091         }
4092
4093         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4094                              HC_REG_ATTN_MSG0_ADDR_L);
4095
4096         REG_WR(bp, reg_offset, U64_LO(section));
4097         REG_WR(bp, reg_offset + 4, U64_HI(section));
4098
4099         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4100
4101         val = REG_RD(bp, reg_offset);
4102         val |= sb_id;
4103         REG_WR(bp, reg_offset, val);
4104
4105         /* USTORM */
4106         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4107                                             u_def_status_block);
4108         def_sb->u_def_status_block.status_block_id = sb_id;
4109
4110         REG_WR(bp, BAR_USTRORM_INTMEM +
4111                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4112         REG_WR(bp, BAR_USTRORM_INTMEM +
4113                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4114                U64_HI(section));
4115         REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4116                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4117
4118         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4119                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4120                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4121
4122         /* CSTORM */
4123         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4124                                             c_def_status_block);
4125         def_sb->c_def_status_block.status_block_id = sb_id;
4126
4127         REG_WR(bp, BAR_CSTRORM_INTMEM +
4128                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4129         REG_WR(bp, BAR_CSTRORM_INTMEM +
4130                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4131                U64_HI(section));
4132         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4133                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4134
4135         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4136                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4137                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4138
4139         /* TSTORM */
4140         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4141                                             t_def_status_block);
4142         def_sb->t_def_status_block.status_block_id = sb_id;
4143
4144         REG_WR(bp, BAR_TSTRORM_INTMEM +
4145                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4146         REG_WR(bp, BAR_TSTRORM_INTMEM +
4147                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4148                U64_HI(section));
4149         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4150                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4151
4152         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4153                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4154                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4155
4156         /* XSTORM */
4157         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4158                                             x_def_status_block);
4159         def_sb->x_def_status_block.status_block_id = sb_id;
4160
4161         REG_WR(bp, BAR_XSTRORM_INTMEM +
4162                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4163         REG_WR(bp, BAR_XSTRORM_INTMEM +
4164                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4165                U64_HI(section));
4166         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4167                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4168
4169         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4170                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4171                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4172
4173         bp->stats_pending = 0;
4174         bp->set_mac_pending = 0;
4175
4176         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4177 }
4178
4179 static void bnx2x_update_coalesce(struct bnx2x *bp)
4180 {
4181         int port = BP_PORT(bp);
4182         int i;
4183
4184         for_each_queue(bp, i) {
4185                 int sb_id = bp->fp[i].sb_id;
4186
4187                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4188                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4189                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4190                                                     U_SB_ETH_RX_CQ_INDEX),
4191                         bp->rx_ticks/12);
4192                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4193                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4194                                                      U_SB_ETH_RX_CQ_INDEX),
4195                          bp->rx_ticks ? 0 : 1);
4196                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4197                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4198                                                      U_SB_ETH_RX_BD_INDEX),
4199                          bp->rx_ticks ? 0 : 1);
4200
4201                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4202                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4203                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4204                                                     C_SB_ETH_TX_CQ_INDEX),
4205                         bp->tx_ticks/12);
4206                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4207                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4208                                                      C_SB_ETH_TX_CQ_INDEX),
4209                          bp->tx_ticks ? 0 : 1);
4210         }
4211 }
4212
4213 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4214                                        struct bnx2x_fastpath *fp, int last)
4215 {
4216         int i;
4217
4218         for (i = 0; i < last; i++) {
4219                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4220                 struct sk_buff *skb = rx_buf->skb;
4221
4222                 if (skb == NULL) {
4223                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4224                         continue;
4225                 }
4226
4227                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4228                         pci_unmap_single(bp->pdev,
4229                                          pci_unmap_addr(rx_buf, mapping),
4230                                          bp->rx_buf_size,
4231                                          PCI_DMA_FROMDEVICE);
4232
4233                 dev_kfree_skb(skb);
4234                 rx_buf->skb = NULL;
4235         }
4236 }
4237
4238 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4239 {
4240         int func = BP_FUNC(bp);
4241         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4242                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4243         u16 ring_prod, cqe_ring_prod;
4244         int i, j;
4245
4246         bp->rx_buf_size = bp->dev->mtu;
4247         bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4248                 BCM_RX_ETH_PAYLOAD_ALIGN;
4249
4250         if (bp->flags & TPA_ENABLE_FLAG) {
4251                 DP(NETIF_MSG_IFUP,
4252                    "rx_buf_size %d  effective_mtu %d\n",
4253                    bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
4254
4255                 for_each_queue(bp, j) {
4256                         struct bnx2x_fastpath *fp = &bp->fp[j];
4257
4258                         for (i = 0; i < max_agg_queues; i++) {
4259                                 fp->tpa_pool[i].skb =
4260                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4261                                 if (!fp->tpa_pool[i].skb) {
4262                                         BNX2X_ERR("Failed to allocate TPA "
4263                                                   "skb pool for queue[%d] - "
4264                                                   "disabling TPA on this "
4265                                                   "queue!\n", j);
4266                                         bnx2x_free_tpa_pool(bp, fp, i);
4267                                         fp->disable_tpa = 1;
4268                                         break;
4269                                 }
4270                                 pci_unmap_addr_set((struct sw_rx_bd *)
4271                                                         &bp->fp->tpa_pool[i],
4272                                                    mapping, 0);
4273                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4274                         }
4275                 }
4276         }
4277
4278         for_each_queue(bp, j) {
4279                 struct bnx2x_fastpath *fp = &bp->fp[j];
4280
4281                 fp->rx_bd_cons = 0;
4282                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4283                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4284
4285                 /* "next page" elements initialization */
4286                 /* SGE ring */
4287                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4288                         struct eth_rx_sge *sge;
4289
4290                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4291                         sge->addr_hi =
4292                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4293                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4294                         sge->addr_lo =
4295                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4296                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4297                 }
4298
4299                 bnx2x_init_sge_ring_bit_mask(fp);
4300
4301                 /* RX BD ring */
4302                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4303                         struct eth_rx_bd *rx_bd;
4304
4305                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4306                         rx_bd->addr_hi =
4307                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4308                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4309                         rx_bd->addr_lo =
4310                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4311                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4312                 }
4313
4314                 /* CQ ring */
4315                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4316                         struct eth_rx_cqe_next_page *nextpg;
4317
4318                         nextpg = (struct eth_rx_cqe_next_page *)
4319                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4320                         nextpg->addr_hi =
4321                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4322                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4323                         nextpg->addr_lo =
4324                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4325                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4326                 }
4327
4328                 /* Allocate SGEs and initialize the ring elements */
4329                 for (i = 0, ring_prod = 0;
4330                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4331
4332                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4333                                 BNX2X_ERR("was only able to allocate "
4334                                           "%d rx sges\n", i);
4335                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4336                                 /* Cleanup already allocated elements */
4337                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4338                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4339                                 fp->disable_tpa = 1;
4340                                 ring_prod = 0;
4341                                 break;
4342                         }
4343                         ring_prod = NEXT_SGE_IDX(ring_prod);
4344                 }
4345                 fp->rx_sge_prod = ring_prod;
4346
4347                 /* Allocate BDs and initialize BD ring */
4348                 fp->rx_comp_cons = 0;
4349                 cqe_ring_prod = ring_prod = 0;
4350                 for (i = 0; i < bp->rx_ring_size; i++) {
4351                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4352                                 BNX2X_ERR("was only able to allocate "
4353                                           "%d rx skbs\n", i);
4354                                 bp->eth_stats.rx_skb_alloc_failed++;
4355                                 break;
4356                         }
4357                         ring_prod = NEXT_RX_IDX(ring_prod);
4358                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4359                         WARN_ON(ring_prod <= i);
4360                 }
4361
4362                 fp->rx_bd_prod = ring_prod;
4363                 /* must not have more available CQEs than BDs */
4364                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4365                                        cqe_ring_prod);
4366                 fp->rx_pkt = fp->rx_calls = 0;
4367
4368                 /* Warning!
4369                  * this will generate an interrupt (to the TSTORM)
4370                  * must only be done after chip is initialized
4371                  */
4372                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4373                                      fp->rx_sge_prod);
4374                 if (j != 0)
4375                         continue;
4376
4377                 REG_WR(bp, BAR_USTRORM_INTMEM +
4378                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4379                        U64_LO(fp->rx_comp_mapping));
4380                 REG_WR(bp, BAR_USTRORM_INTMEM +
4381                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4382                        U64_HI(fp->rx_comp_mapping));
4383         }
4384 }
4385
4386 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4387 {
4388         int i, j;
4389
4390         for_each_queue(bp, j) {
4391                 struct bnx2x_fastpath *fp = &bp->fp[j];
4392
4393                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4394                         struct eth_tx_bd *tx_bd =
4395                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4396
4397                         tx_bd->addr_hi =
4398                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4399                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4400                         tx_bd->addr_lo =
4401                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4402                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4403                 }
4404
4405                 fp->tx_pkt_prod = 0;
4406                 fp->tx_pkt_cons = 0;
4407                 fp->tx_bd_prod = 0;
4408                 fp->tx_bd_cons = 0;
4409                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4410                 fp->tx_pkt = 0;
4411         }
4412 }
4413
4414 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4415 {
4416         int func = BP_FUNC(bp);
4417
4418         spin_lock_init(&bp->spq_lock);
4419
4420         bp->spq_left = MAX_SPQ_PENDING;
4421         bp->spq_prod_idx = 0;
4422         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4423         bp->spq_prod_bd = bp->spq;
4424         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4425
4426         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4427                U64_LO(bp->spq_mapping));
4428         REG_WR(bp,
4429                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4430                U64_HI(bp->spq_mapping));
4431
4432         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4433                bp->spq_prod_idx);
4434 }
4435
4436 static void bnx2x_init_context(struct bnx2x *bp)
4437 {
4438         int i;
4439
4440         for_each_queue(bp, i) {
4441                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4442                 struct bnx2x_fastpath *fp = &bp->fp[i];
4443                 u8 sb_id = FP_SB_ID(fp);
4444
4445                 context->xstorm_st_context.tx_bd_page_base_hi =
4446                                                 U64_HI(fp->tx_desc_mapping);
4447                 context->xstorm_st_context.tx_bd_page_base_lo =
4448                                                 U64_LO(fp->tx_desc_mapping);
4449                 context->xstorm_st_context.db_data_addr_hi =
4450                                                 U64_HI(fp->tx_prods_mapping);
4451                 context->xstorm_st_context.db_data_addr_lo =
4452                                                 U64_LO(fp->tx_prods_mapping);
4453                 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4454                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4455
4456                 context->ustorm_st_context.common.sb_index_numbers =
4457                                                 BNX2X_RX_SB_INDEX_NUM;
4458                 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4459                 context->ustorm_st_context.common.status_block_id = sb_id;
4460                 context->ustorm_st_context.common.flags =
4461                         USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4462                 context->ustorm_st_context.common.mc_alignment_size =
4463                         BCM_RX_ETH_PAYLOAD_ALIGN;
4464                 context->ustorm_st_context.common.bd_buff_size =
4465                                                 bp->rx_buf_size;
4466                 context->ustorm_st_context.common.bd_page_base_hi =
4467                                                 U64_HI(fp->rx_desc_mapping);
4468                 context->ustorm_st_context.common.bd_page_base_lo =
4469                                                 U64_LO(fp->rx_desc_mapping);
4470                 if (!fp->disable_tpa) {
4471                         context->ustorm_st_context.common.flags |=
4472                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4473                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4474                         context->ustorm_st_context.common.sge_buff_size =
4475                                         (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4476                         context->ustorm_st_context.common.sge_page_base_hi =
4477                                                 U64_HI(fp->rx_sge_mapping);
4478                         context->ustorm_st_context.common.sge_page_base_lo =
4479                                                 U64_LO(fp->rx_sge_mapping);
4480                 }
4481
4482                 context->cstorm_st_context.sb_index_number =
4483                                                 C_SB_ETH_TX_CQ_INDEX;
4484                 context->cstorm_st_context.status_block_id = sb_id;
4485
4486                 context->xstorm_ag_context.cdu_reserved =
4487                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4488                                                CDU_REGION_NUMBER_XCM_AG,
4489                                                ETH_CONNECTION_TYPE);
4490                 context->ustorm_ag_context.cdu_usage =
4491                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4492                                                CDU_REGION_NUMBER_UCM_AG,
4493                                                ETH_CONNECTION_TYPE);
4494         }
4495 }
4496
4497 static void bnx2x_init_ind_table(struct bnx2x *bp)
4498 {
4499         int port = BP_PORT(bp);
4500         int i;
4501
4502         if (!is_multi(bp))
4503                 return;
4504
4505         DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4506         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4507                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4508                         TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
4509                         i % bp->num_queues);
4510
4511         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4512 }
4513
4514 static void bnx2x_set_client_config(struct bnx2x *bp)
4515 {
4516         struct tstorm_eth_client_config tstorm_client = {0};
4517         int port = BP_PORT(bp);
4518         int i;
4519
4520         tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
4521         tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4522         tstorm_client.config_flags =
4523                                 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4524 #ifdef BCM_VLAN
4525         if (bp->rx_mode && bp->vlgrp) {
4526                 tstorm_client.config_flags |=
4527                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4528                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4529         }
4530 #endif
4531
4532         if (bp->flags & TPA_ENABLE_FLAG) {
4533                 tstorm_client.max_sges_for_packet =
4534                         BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
4535                 tstorm_client.max_sges_for_packet =
4536                         ((tstorm_client.max_sges_for_packet +
4537                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4538                         PAGES_PER_SGE_SHIFT;
4539
4540                 tstorm_client.config_flags |=
4541                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4542         }
4543
4544         for_each_queue(bp, i) {
4545                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4546                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4547                        ((u32 *)&tstorm_client)[0]);
4548                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4549                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4550                        ((u32 *)&tstorm_client)[1]);
4551         }
4552
4553         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4554            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4555 }
4556
4557 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4558 {
4559         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4560         int mode = bp->rx_mode;
4561         int mask = (1 << BP_L_ID(bp));
4562         int func = BP_FUNC(bp);
4563         int i;
4564
4565         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
4566
4567         switch (mode) {
4568         case BNX2X_RX_MODE_NONE: /* no Rx */
4569                 tstorm_mac_filter.ucast_drop_all = mask;
4570                 tstorm_mac_filter.mcast_drop_all = mask;
4571                 tstorm_mac_filter.bcast_drop_all = mask;
4572                 break;
4573         case BNX2X_RX_MODE_NORMAL:
4574                 tstorm_mac_filter.bcast_accept_all = mask;
4575                 break;
4576         case BNX2X_RX_MODE_ALLMULTI:
4577                 tstorm_mac_filter.mcast_accept_all = mask;
4578                 tstorm_mac_filter.bcast_accept_all = mask;
4579                 break;
4580         case BNX2X_RX_MODE_PROMISC:
4581                 tstorm_mac_filter.ucast_accept_all = mask;
4582                 tstorm_mac_filter.mcast_accept_all = mask;
4583                 tstorm_mac_filter.bcast_accept_all = mask;
4584                 break;
4585         default:
4586                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4587                 break;
4588         }
4589
4590         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4591                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4592                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4593                        ((u32 *)&tstorm_mac_filter)[i]);
4594
4595 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4596                    ((u32 *)&tstorm_mac_filter)[i]); */
4597         }
4598
4599         if (mode != BNX2X_RX_MODE_NONE)
4600                 bnx2x_set_client_config(bp);
4601 }
4602
4603 static void bnx2x_init_internal_common(struct bnx2x *bp)
4604 {
4605         int i;
4606
4607         if (bp->flags & TPA_ENABLE_FLAG) {
4608                 struct tstorm_eth_tpa_exist tpa = {0};
4609
4610                 tpa.tpa_exist = 1;
4611
4612                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4613                        ((u32 *)&tpa)[0]);
4614                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4615                        ((u32 *)&tpa)[1]);
4616         }
4617
4618         /* Zero this manually as its initialization is
4619            currently missing in the initTool */
4620         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4621                 REG_WR(bp, BAR_USTRORM_INTMEM +
4622                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4623 }
4624
4625 static void bnx2x_init_internal_port(struct bnx2x *bp)
4626 {
4627         int port = BP_PORT(bp);
4628
4629         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4630         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4631         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4632         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4633 }
4634
4635 static void bnx2x_init_internal_func(struct bnx2x *bp)
4636 {
4637         struct tstorm_eth_function_common_config tstorm_config = {0};
4638         struct stats_indication_flags stats_flags = {0};
4639         int port = BP_PORT(bp);
4640         int func = BP_FUNC(bp);
4641         int i;
4642         u16 max_agg_size;
4643
4644         if (is_multi(bp)) {
4645                 tstorm_config.config_flags = MULTI_FLAGS;
4646                 tstorm_config.rss_result_mask = MULTI_MASK;
4647         }
4648
4649         tstorm_config.leading_client_id = BP_L_ID(bp);
4650
4651         REG_WR(bp, BAR_TSTRORM_INTMEM +
4652                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4653                (*(u32 *)&tstorm_config));
4654
4655         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4656         bnx2x_set_storm_rx_mode(bp);
4657
4658         /* reset xstorm per client statistics */
4659         for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4660                 REG_WR(bp, BAR_XSTRORM_INTMEM +
4661                        XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4662                        i*4, 0);
4663         }
4664         /* reset tstorm per client statistics */
4665         for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4666                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4667                        TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4668                        i*4, 0);
4669         }
4670
4671         /* Init statistics related context */
4672         stats_flags.collect_eth = 1;
4673
4674         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4675                ((u32 *)&stats_flags)[0]);
4676         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4677                ((u32 *)&stats_flags)[1]);
4678
4679         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4680                ((u32 *)&stats_flags)[0]);
4681         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4682                ((u32 *)&stats_flags)[1]);
4683
4684         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4685                ((u32 *)&stats_flags)[0]);
4686         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4687                ((u32 *)&stats_flags)[1]);
4688
4689         REG_WR(bp, BAR_XSTRORM_INTMEM +
4690                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4691                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4692         REG_WR(bp, BAR_XSTRORM_INTMEM +
4693                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4694                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4695
4696         REG_WR(bp, BAR_TSTRORM_INTMEM +
4697                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4698                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4699         REG_WR(bp, BAR_TSTRORM_INTMEM +
4700                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4701                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4702
4703         if (CHIP_IS_E1H(bp)) {
4704                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4705                         IS_E1HMF(bp));
4706                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4707                         IS_E1HMF(bp));
4708                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4709                         IS_E1HMF(bp));
4710                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4711                         IS_E1HMF(bp));
4712
4713                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4714                          bp->e1hov);
4715         }
4716
4717         /* Init CQ ring mapping and aggregation size */
4718         max_agg_size = min((u32)(bp->rx_buf_size +
4719                                  8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4720                            (u32)0xffff);
4721         for_each_queue(bp, i) {
4722                 struct bnx2x_fastpath *fp = &bp->fp[i];
4723
4724                 REG_WR(bp, BAR_USTRORM_INTMEM +
4725                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4726                        U64_LO(fp->rx_comp_mapping));
4727                 REG_WR(bp, BAR_USTRORM_INTMEM +
4728                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4729                        U64_HI(fp->rx_comp_mapping));
4730
4731                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4732                          USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4733                          max_agg_size);
4734         }
4735 }
4736
4737 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4738 {
4739         switch (load_code) {
4740         case FW_MSG_CODE_DRV_LOAD_COMMON:
4741                 bnx2x_init_internal_common(bp);
4742                 /* no break */
4743
4744         case FW_MSG_CODE_DRV_LOAD_PORT:
4745                 bnx2x_init_internal_port(bp);
4746                 /* no break */
4747
4748         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4749                 bnx2x_init_internal_func(bp);
4750                 break;
4751
4752         default:
4753                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4754                 break;
4755         }
4756 }
4757
4758 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4759 {
4760         int i;
4761
4762         for_each_queue(bp, i) {
4763                 struct bnx2x_fastpath *fp = &bp->fp[i];
4764
4765                 fp->bp = bp;
4766                 fp->state = BNX2X_FP_STATE_CLOSED;
4767                 fp->index = i;
4768                 fp->cl_id = BP_L_ID(bp) + i;
4769                 fp->sb_id = fp->cl_id;
4770                 DP(NETIF_MSG_IFUP,
4771                    "bnx2x_init_sb(%p,%p) index %d  cl_id %d  sb %d\n",
4772                    bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4773                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4774                               FP_SB_ID(fp));
4775                 bnx2x_update_fpsb_idx(fp);
4776         }
4777
4778         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4779                           DEF_SB_ID);
4780         bnx2x_update_dsb_idx(bp);
4781         bnx2x_update_coalesce(bp);
4782         bnx2x_init_rx_rings(bp);
4783         bnx2x_init_tx_ring(bp);
4784         bnx2x_init_sp_ring(bp);
4785         bnx2x_init_context(bp);
4786         bnx2x_init_internal(bp, load_code);
4787         bnx2x_init_ind_table(bp);
4788         bnx2x_int_enable(bp);
4789 }
4790
4791 /* end of nic init */
4792
4793 /*
4794  * gzip service functions
4795  */
4796
4797 static int bnx2x_gunzip_init(struct bnx2x *bp)
4798 {
4799         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4800                                               &bp->gunzip_mapping);
4801         if (bp->gunzip_buf  == NULL)
4802                 goto gunzip_nomem1;
4803
4804         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4805         if (bp->strm  == NULL)
4806                 goto gunzip_nomem2;
4807
4808         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4809                                       GFP_KERNEL);
4810         if (bp->strm->workspace == NULL)
4811                 goto gunzip_nomem3;
4812
4813         return 0;
4814
4815 gunzip_nomem3:
4816         kfree(bp->strm);
4817         bp->strm = NULL;
4818
4819 gunzip_nomem2:
4820         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4821                             bp->gunzip_mapping);
4822         bp->gunzip_buf = NULL;
4823
4824 gunzip_nomem1:
4825         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4826                " un-compression\n", bp->dev->name);
4827         return -ENOMEM;
4828 }
4829
4830 static void bnx2x_gunzip_end(struct bnx2x *bp)
4831 {
4832         kfree(bp->strm->workspace);
4833
4834         kfree(bp->strm);
4835         bp->strm = NULL;
4836
4837         if (bp->gunzip_buf) {
4838                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4839                                     bp->gunzip_mapping);
4840                 bp->gunzip_buf = NULL;
4841         }
4842 }
4843
4844 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4845 {
4846         int n, rc;
4847
4848         /* check gzip header */
4849         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4850                 return -EINVAL;
4851
4852         n = 10;
4853
4854 #define FNAME                           0x8
4855
4856         if (zbuf[3] & FNAME)
4857                 while ((zbuf[n++] != 0) && (n < len));
4858
4859         bp->strm->next_in = zbuf + n;
4860         bp->strm->avail_in = len - n;
4861         bp->strm->next_out = bp->gunzip_buf;
4862         bp->strm->avail_out = FW_BUF_SIZE;
4863
4864         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4865         if (rc != Z_OK)
4866                 return rc;
4867
4868         rc = zlib_inflate(bp->strm, Z_FINISH);
4869         if ((rc != Z_OK) && (rc != Z_STREAM_END))
4870                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4871                        bp->dev->name, bp->strm->msg);
4872
4873         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4874         if (bp->gunzip_outlen & 0x3)
4875                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4876                                     " gunzip_outlen (%d) not aligned\n",
4877                        bp->dev->name, bp->gunzip_outlen);
4878         bp->gunzip_outlen >>= 2;
4879
4880         zlib_inflateEnd(bp->strm);
4881
4882         if (rc == Z_STREAM_END)
4883                 return 0;
4884
4885         return rc;
4886 }
4887
4888 /* nic load/unload */
4889
4890 /*
4891  * General service functions
4892  */
4893
4894 /* send a NIG loopback debug packet */
4895 static void bnx2x_lb_pckt(struct bnx2x *bp)
4896 {
4897         u32 wb_write[3];
4898
4899         /* Ethernet source and destination addresses */
4900         wb_write[0] = 0x55555555;
4901         wb_write[1] = 0x55555555;
4902         wb_write[2] = 0x20;             /* SOP */
4903         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4904
4905         /* NON-IP protocol */
4906         wb_write[0] = 0x09000000;
4907         wb_write[1] = 0x55555555;
4908         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
4909         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4910 }
4911
4912 /* some of the internal memories
4913  * are not directly readable from the driver
4914  * to test them we send debug packets
4915  */
4916 static int bnx2x_int_mem_test(struct bnx2x *bp)
4917 {
4918         int factor;
4919         int count, i;
4920         u32 val = 0;
4921
4922         if (CHIP_REV_IS_FPGA(bp))
4923                 factor = 120;
4924         else if (CHIP_REV_IS_EMUL(bp))
4925                 factor = 200;
4926         else
4927                 factor = 1;
4928
4929         DP(NETIF_MSG_HW, "start part1\n");
4930
4931         /* Disable inputs of parser neighbor blocks */
4932         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4933         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4934         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4935         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4936
4937         /*  Write 0 to parser credits for CFC search request */
4938         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4939
4940         /* send Ethernet packet */
4941         bnx2x_lb_pckt(bp);
4942
4943         /* TODO do i reset NIG statistic? */
4944         /* Wait until NIG register shows 1 packet of size 0x10 */
4945         count = 1000 * factor;
4946         while (count) {
4947
4948                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4949                 val = *bnx2x_sp(bp, wb_data[0]);
4950                 if (val == 0x10)
4951                         break;
4952
4953                 msleep(10);
4954                 count--;
4955         }
4956         if (val != 0x10) {
4957                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
4958                 return -1;
4959         }
4960
4961         /* Wait until PRS register shows 1 packet */
4962         count = 1000 * factor;
4963         while (count) {
4964                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4965                 if (val == 1)
4966                         break;
4967
4968                 msleep(10);
4969                 count--;
4970         }
4971         if (val != 0x1) {
4972                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4973                 return -2;
4974         }
4975
4976         /* Reset and init BRB, PRS */
4977         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4978         msleep(50);
4979         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4980         msleep(50);
4981         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4982         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4983
4984         DP(NETIF_MSG_HW, "part2\n");
4985
4986         /* Disable inputs of parser neighbor blocks */
4987         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4988         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4989         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4990         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4991
4992         /* Write 0 to parser credits for CFC search request */
4993         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4994
4995         /* send 10 Ethernet packets */
4996         for (i = 0; i < 10; i++)
4997                 bnx2x_lb_pckt(bp);
4998
4999         /* Wait until NIG register shows 10 + 1
5000            packets of size 11*0x10 = 0xb0 */
5001         count = 1000 * factor;
5002         while (count) {
5003
5004                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5005                 val = *bnx2x_sp(bp, wb_data[0]);
5006                 if (val == 0xb0)
5007                         break;
5008
5009                 msleep(10);
5010                 count--;
5011         }
5012         if (val != 0xb0) {
5013                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5014                 return -3;
5015         }
5016
5017         /* Wait until PRS register shows 2 packets */
5018         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5019         if (val != 2)
5020                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5021
5022         /* Write 1 to parser credits for CFC search request */
5023         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5024
5025         /* Wait until PRS register shows 3 packets */
5026         msleep(10 * factor);
5027         /* Wait until NIG register shows 1 packet of size 0x10 */
5028         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5029         if (val != 3)
5030                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5031
5032         /* clear NIG EOP FIFO */
5033         for (i = 0; i < 11; i++)
5034                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5035         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5036         if (val != 1) {
5037                 BNX2X_ERR("clear of NIG failed\n");
5038                 return -4;
5039         }
5040
5041         /* Reset and init BRB, PRS, NIG */
5042         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5043         msleep(50);
5044         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5045         msleep(50);
5046         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5047         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5048 #ifndef BCM_ISCSI
5049         /* set NIC mode */
5050         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5051 #endif
5052
5053         /* Enable inputs of parser neighbor blocks */
5054         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5055         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5056         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5057         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5058
5059         DP(NETIF_MSG_HW, "done\n");
5060
5061         return 0; /* OK */
5062 }
5063
5064 static void enable_blocks_attention(struct bnx2x *bp)
5065 {
5066         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5067         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5068         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5069         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5070         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5071         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5072         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5073         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5074         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5075 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5076 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5077         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5078         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5079         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5080 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5081 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5082         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5083         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5084         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5085         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5086 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5087 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5088         if (CHIP_REV_IS_FPGA(bp))
5089                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5090         else
5091                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5092         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5093         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5094         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5095 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5096 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5097         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5098         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5099 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5100         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5101 }
5102
5103
5104 static int bnx2x_init_common(struct bnx2x *bp)
5105 {
5106         u32 val, i;
5107
5108         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5109
5110         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5111         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5112
5113         bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5114         if (CHIP_IS_E1H(bp))
5115                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5116
5117         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5118         msleep(30);
5119         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5120
5121         bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5122         if (CHIP_IS_E1(bp)) {
5123                 /* enable HW interrupt from PXP on USDM overflow
5124                    bit 16 on INT_MASK_0 */
5125                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5126         }
5127
5128         bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5129         bnx2x_init_pxp(bp);
5130
5131 #ifdef __BIG_ENDIAN
5132         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5133         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5134         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5135         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5136         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5137         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5138
5139 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5140         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5141         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5142         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5143         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5144 #endif
5145
5146         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5147 #ifdef BCM_ISCSI
5148         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5149         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5150         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5151 #endif
5152
5153         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5154                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5155
5156         /* let the HW do it's magic ... */
5157         msleep(100);
5158         /* finish PXP init */
5159         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5160         if (val != 1) {
5161                 BNX2X_ERR("PXP2 CFG failed\n");
5162                 return -EBUSY;
5163         }
5164         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5165         if (val != 1) {
5166                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5167                 return -EBUSY;
5168         }
5169
5170         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5171         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5172
5173         bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5174
5175         /* clean the DMAE memory */
5176         bp->dmae_ready = 1;
5177         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5178
5179         bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5180         bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5181         bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5182         bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5183
5184         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5185         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5186         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5187         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5188
5189         bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5190         /* soft reset pulse */
5191         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5192         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5193
5194 #ifdef BCM_ISCSI
5195         bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5196 #endif
5197
5198         bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5199         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5200         if (!CHIP_REV_IS_SLOW(bp)) {
5201                 /* enable hw interrupt from doorbell Q */
5202                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5203         }
5204
5205         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5206         if (CHIP_REV_IS_SLOW(bp)) {
5207                 /* fix for emulation and FPGA for no pause */
5208                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5209                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5210                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5211                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5212         }
5213
5214         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5215         /* set NIC mode */
5216         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5217         if (CHIP_IS_E1H(bp))
5218                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5219
5220         bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5221         bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5222         bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5223         bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5224
5225         if (CHIP_IS_E1H(bp)) {
5226                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5227                                 STORM_INTMEM_SIZE_E1H/2);
5228                 bnx2x_init_fill(bp,
5229                                 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5230                                 0, STORM_INTMEM_SIZE_E1H/2);
5231                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5232                                 STORM_INTMEM_SIZE_E1H/2);
5233                 bnx2x_init_fill(bp,
5234                                 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5235                                 0, STORM_INTMEM_SIZE_E1H/2);
5236                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5237                                 STORM_INTMEM_SIZE_E1H/2);
5238                 bnx2x_init_fill(bp,
5239                                 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5240                                 0, STORM_INTMEM_SIZE_E1H/2);
5241                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5242                                 STORM_INTMEM_SIZE_E1H/2);
5243                 bnx2x_init_fill(bp,
5244                                 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5245                                 0, STORM_INTMEM_SIZE_E1H/2);
5246         } else { /* E1 */
5247                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5248                                 STORM_INTMEM_SIZE_E1);
5249                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5250                                 STORM_INTMEM_SIZE_E1);
5251                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5252                                 STORM_INTMEM_SIZE_E1);
5253                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5254                                 STORM_INTMEM_SIZE_E1);
5255         }
5256
5257         bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5258         bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5259         bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5260         bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5261
5262         /* sync semi rtc */
5263         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5264                0x80000000);
5265         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5266                0x80000000);
5267
5268         bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5269         bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5270         bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5271
5272         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5273         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5274                 REG_WR(bp, i, 0xc0cac01a);
5275                 /* TODO: replace with something meaningful */
5276         }
5277         if (CHIP_IS_E1H(bp))
5278                 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5279         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5280
5281         if (sizeof(union cdu_context) != 1024)
5282                 /* we currently assume that a context is 1024 bytes */
5283                 printk(KERN_ALERT PFX "please adjust the size of"
5284                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5285
5286         bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5287         val = (4 << 24) + (0 << 12) + 1024;
5288         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5289         if (CHIP_IS_E1(bp)) {
5290                 /* !!! fix pxp client crdit until excel update */
5291                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5292                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5293         }
5294
5295         bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5296         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5297
5298         bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5299         bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5300
5301         /* PXPCS COMMON comes here */
5302         /* Reset PCIE errors for debug */
5303         REG_WR(bp, 0x2814, 0xffffffff);
5304         REG_WR(bp, 0x3820, 0xffffffff);
5305
5306         /* EMAC0 COMMON comes here */
5307         /* EMAC1 COMMON comes here */
5308         /* DBU COMMON comes here */
5309         /* DBG COMMON comes here */
5310
5311         bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5312         if (CHIP_IS_E1H(bp)) {
5313                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5314                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5315         }
5316
5317         if (CHIP_REV_IS_SLOW(bp))
5318                 msleep(200);
5319
5320         /* finish CFC init */
5321         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5322         if (val != 1) {
5323                 BNX2X_ERR("CFC LL_INIT failed\n");
5324                 return -EBUSY;
5325         }
5326         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5327         if (val != 1) {
5328                 BNX2X_ERR("CFC AC_INIT failed\n");
5329                 return -EBUSY;
5330         }
5331         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5332         if (val != 1) {
5333                 BNX2X_ERR("CFC CAM_INIT failed\n");
5334                 return -EBUSY;
5335         }
5336         REG_WR(bp, CFC_REG_DEBUG0, 0);
5337
5338         /* read NIG statistic
5339            to see if this is our first up since powerup */
5340         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5341         val = *bnx2x_sp(bp, wb_data[0]);
5342
5343         /* do internal memory self test */
5344         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5345                 BNX2X_ERR("internal mem self test failed\n");
5346                 return -EBUSY;
5347         }
5348
5349         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5350         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5351         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5352                 /* Fan failure is indicated by SPIO 5 */
5353                 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5354                                MISC_REGISTERS_SPIO_INPUT_HI_Z);
5355
5356                 /* set to active low mode */
5357                 val = REG_RD(bp, MISC_REG_SPIO_INT);
5358                 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5359                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5360                 REG_WR(bp, MISC_REG_SPIO_INT, val);
5361
5362                 /* enable interrupt to signal the IGU */
5363                 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5364                 val |= (1 << MISC_REGISTERS_SPIO_5);
5365                 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5366                 break;
5367
5368         default:
5369                 break;
5370         }
5371
5372         /* clear PXP2 attentions */
5373         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5374
5375         enable_blocks_attention(bp);
5376
5377         if (!BP_NOMCP(bp)) {
5378                 bnx2x_acquire_phy_lock(bp);
5379                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5380                 bnx2x_release_phy_lock(bp);
5381         } else
5382                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5383
5384         return 0;
5385 }
5386
5387 static int bnx2x_init_port(struct bnx2x *bp)
5388 {
5389         int port = BP_PORT(bp);
5390         u32 val;
5391
5392         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
5393
5394         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5395
5396         /* Port PXP comes here */
5397         /* Port PXP2 comes here */
5398 #ifdef BCM_ISCSI
5399         /* Port0  1
5400          * Port1  385 */
5401         i++;
5402         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5403         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5404         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5405         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5406
5407         /* Port0  2
5408          * Port1  386 */
5409         i++;
5410         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5411         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5412         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5413         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5414
5415         /* Port0  3
5416          * Port1  387 */
5417         i++;
5418         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5419         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5420         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5421         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5422 #endif
5423         /* Port CMs come here */
5424
5425         /* Port QM comes here */
5426 #ifdef BCM_ISCSI
5427         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5428         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5429
5430         bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5431                              func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5432 #endif
5433         /* Port DQ comes here */
5434         /* Port BRB1 comes here */
5435         /* Port PRS comes here */
5436         /* Port TSDM comes here */
5437         /* Port CSDM comes here */
5438         /* Port USDM comes here */
5439         /* Port XSDM comes here */
5440         bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5441                              port ? TSEM_PORT1_END : TSEM_PORT0_END);
5442         bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5443                              port ? USEM_PORT1_END : USEM_PORT0_END);
5444         bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5445                              port ? CSEM_PORT1_END : CSEM_PORT0_END);
5446         bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5447                              port ? XSEM_PORT1_END : XSEM_PORT0_END);
5448         /* Port UPB comes here */
5449         /* Port XPB comes here */
5450
5451         bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5452                              port ? PBF_PORT1_END : PBF_PORT0_END);
5453
5454         /* configure PBF to work without PAUSE mtu 9000 */
5455         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5456
5457         /* update threshold */
5458         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5459         /* update init credit */
5460         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5461
5462         /* probe changes */
5463         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5464         msleep(5);
5465         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5466
5467 #ifdef BCM_ISCSI
5468         /* tell the searcher where the T2 table is */
5469         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5470
5471         wb_write[0] = U64_LO(bp->t2_mapping);
5472         wb_write[1] = U64_HI(bp->t2_mapping);
5473         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5474         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5475         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5476         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5477
5478         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5479         /* Port SRCH comes here */
5480 #endif
5481         /* Port CDU comes here */
5482         /* Port CFC comes here */
5483
5484         if (CHIP_IS_E1(bp)) {
5485                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5486                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5487         }
5488         bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5489                              port ? HC_PORT1_END : HC_PORT0_END);
5490
5491         bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5492                                     MISC_AEU_PORT0_START,
5493                              port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5494         /* init aeu_mask_attn_func_0/1:
5495          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5496          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5497          *             bits 4-7 are used for "per vn group attention" */
5498         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5499                (IS_E1HMF(bp) ? 0xF7 : 0x7));
5500
5501         /* Port PXPCS comes here */
5502         /* Port EMAC0 comes here */
5503         /* Port EMAC1 comes here */
5504         /* Port DBU comes here */
5505         /* Port DBG comes here */
5506         bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5507                              port ? NIG_PORT1_END : NIG_PORT0_END);
5508
5509         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5510
5511         if (CHIP_IS_E1H(bp)) {
5512                 u32 wsum;
5513                 struct cmng_struct_per_port m_cmng_port;
5514                 int vn;
5515
5516                 /* 0x2 disable e1hov, 0x1 enable */
5517                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5518                        (IS_E1HMF(bp) ? 0x1 : 0x2));
5519
5520                 /* Init RATE SHAPING and FAIRNESS contexts.
5521                    Initialize as if there is 10G link. */
5522                 wsum = bnx2x_calc_vn_wsum(bp);
5523                 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5524                 if (IS_E1HMF(bp))
5525                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
5526                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
5527                                         wsum, 10000, &m_cmng_port);
5528         }
5529
5530         /* Port MCP comes here */
5531         /* Port DMAE comes here */
5532
5533         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5534         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5535         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5536                 /* add SPIO 5 to group 0 */
5537                 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5538                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5539                 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5540                 break;
5541
5542         default:
5543                 break;
5544         }
5545
5546         bnx2x__link_reset(bp);
5547
5548         return 0;
5549 }
5550
5551 #define ILT_PER_FUNC            (768/2)
5552 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
5553 /* the phys address is shifted right 12 bits and has an added
5554    1=valid bit added to the 53rd bit
5555    then since this is a wide register(TM)
5556    we split it into two 32 bit writes
5557  */
5558 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5559 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
5560 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
5561 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
5562
5563 #define CNIC_ILT_LINES          0
5564
5565 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5566 {
5567         int reg;
5568
5569         if (CHIP_IS_E1H(bp))
5570                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5571         else /* E1 */
5572                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5573
5574         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5575 }
5576
5577 static int bnx2x_init_func(struct bnx2x *bp)
5578 {
5579         int port = BP_PORT(bp);
5580         int func = BP_FUNC(bp);
5581         int i;
5582
5583         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
5584
5585         i = FUNC_ILT_BASE(func);
5586
5587         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5588         if (CHIP_IS_E1H(bp)) {
5589                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5590                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5591         } else /* E1 */
5592                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5593                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5594
5595
5596         if (CHIP_IS_E1H(bp)) {
5597                 for (i = 0; i < 9; i++)
5598                         bnx2x_init_block(bp,
5599                                          cm_start[func][i], cm_end[func][i]);
5600
5601                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5602                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5603         }
5604
5605         /* HC init per function */
5606         if (CHIP_IS_E1H(bp)) {
5607                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5608
5609                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5610                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5611         }
5612         bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5613
5614         if (CHIP_IS_E1H(bp))
5615                 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5616
5617         /* Reset PCIE errors for debug */
5618         REG_WR(bp, 0x2114, 0xffffffff);
5619         REG_WR(bp, 0x2120, 0xffffffff);
5620
5621         return 0;
5622 }
5623
5624 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5625 {
5626         int i, rc = 0;
5627
5628         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
5629            BP_FUNC(bp), load_code);
5630
5631         bp->dmae_ready = 0;
5632         mutex_init(&bp->dmae_mutex);
5633         bnx2x_gunzip_init(bp);
5634
5635         switch (load_code) {
5636         case FW_MSG_CODE_DRV_LOAD_COMMON:
5637                 rc = bnx2x_init_common(bp);
5638                 if (rc)
5639                         goto init_hw_err;
5640                 /* no break */
5641
5642         case FW_MSG_CODE_DRV_LOAD_PORT:
5643                 bp->dmae_ready = 1;
5644                 rc = bnx2x_init_port(bp);
5645                 if (rc)
5646                         goto init_hw_err;
5647                 /* no break */
5648
5649         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5650                 bp->dmae_ready = 1;
5651                 rc = bnx2x_init_func(bp);
5652                 if (rc)
5653                         goto init_hw_err;
5654                 break;
5655
5656         default:
5657                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5658                 break;
5659         }
5660
5661         if (!BP_NOMCP(bp)) {
5662                 int func = BP_FUNC(bp);
5663
5664                 bp->fw_drv_pulse_wr_seq =
5665                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5666                                  DRV_PULSE_SEQ_MASK);
5667                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5668                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  func_stx 0x%x\n",
5669                    bp->fw_drv_pulse_wr_seq, bp->func_stx);
5670         } else
5671                 bp->func_stx = 0;
5672
5673         /* this needs to be done before gunzip end */
5674         bnx2x_zero_def_sb(bp);
5675         for_each_queue(bp, i)
5676                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5677
5678 init_hw_err:
5679         bnx2x_gunzip_end(bp);
5680
5681         return rc;
5682 }
5683
5684 /* send the MCP a request, block until there is a reply */
5685 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5686 {
5687         int func = BP_FUNC(bp);
5688         u32 seq = ++bp->fw_seq;
5689         u32 rc = 0;
5690         u32 cnt = 1;
5691         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5692
5693         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5694         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5695
5696         do {
5697                 /* let the FW do it's magic ... */
5698                 msleep(delay);
5699
5700                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5701
5702                 /* Give the FW up to 2 second (200*10ms) */
5703         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5704
5705         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5706            cnt*delay, rc, seq);
5707
5708         /* is this a reply to our command? */
5709         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5710                 rc &= FW_MSG_CODE_MASK;
5711
5712         } else {
5713                 /* FW BUG! */
5714                 BNX2X_ERR("FW failed to respond!\n");
5715                 bnx2x_fw_dump(bp);
5716                 rc = 0;
5717         }
5718
5719         return rc;
5720 }
5721
5722 static void bnx2x_free_mem(struct bnx2x *bp)
5723 {
5724
5725 #define BNX2X_PCI_FREE(x, y, size) \
5726         do { \
5727                 if (x) { \
5728                         pci_free_consistent(bp->pdev, size, x, y); \
5729                         x = NULL; \
5730                         y = 0; \
5731                 } \
5732         } while (0)
5733
5734 #define BNX2X_FREE(x) \
5735         do { \
5736                 if (x) { \
5737                         vfree(x); \
5738                         x = NULL; \
5739                 } \
5740         } while (0)
5741
5742         int i;
5743
5744         /* fastpath */
5745         for_each_queue(bp, i) {
5746
5747                 /* Status blocks */
5748                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5749                                bnx2x_fp(bp, i, status_blk_mapping),
5750                                sizeof(struct host_status_block) +
5751                                sizeof(struct eth_tx_db_data));
5752
5753                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5754                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5755                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5756                                bnx2x_fp(bp, i, tx_desc_mapping),
5757                                sizeof(struct eth_tx_bd) * NUM_TX_BD);
5758
5759                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5760                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5761                                bnx2x_fp(bp, i, rx_desc_mapping),
5762                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
5763
5764                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5765                                bnx2x_fp(bp, i, rx_comp_mapping),
5766                                sizeof(struct eth_fast_path_rx_cqe) *
5767                                NUM_RCQ_BD);
5768
5769                 /* SGE ring */
5770                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5771                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5772                                bnx2x_fp(bp, i, rx_sge_mapping),
5773                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5774         }
5775         /* end of fastpath */
5776
5777         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5778                        sizeof(struct host_def_status_block));
5779
5780         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5781                        sizeof(struct bnx2x_slowpath));
5782
5783 #ifdef BCM_ISCSI
5784         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5785         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5786         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5787         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5788 #endif
5789         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5790
5791 #undef BNX2X_PCI_FREE
5792 #undef BNX2X_KFREE
5793 }
5794
5795 static int bnx2x_alloc_mem(struct bnx2x *bp)
5796 {
5797
5798 #define BNX2X_PCI_ALLOC(x, y, size) \
5799         do { \
5800                 x = pci_alloc_consistent(bp->pdev, size, y); \
5801                 if (x == NULL) \
5802                         goto alloc_mem_err; \
5803                 memset(x, 0, size); \
5804         } while (0)
5805
5806 #define BNX2X_ALLOC(x, size) \
5807         do { \
5808                 x = vmalloc(size); \
5809                 if (x == NULL) \
5810                         goto alloc_mem_err; \
5811                 memset(x, 0, size); \
5812         } while (0)
5813
5814         int i;
5815
5816         /* fastpath */
5817         for_each_queue(bp, i) {
5818                 bnx2x_fp(bp, i, bp) = bp;
5819
5820                 /* Status blocks */
5821                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5822                                 &bnx2x_fp(bp, i, status_blk_mapping),
5823                                 sizeof(struct host_status_block) +
5824                                 sizeof(struct eth_tx_db_data));
5825
5826                 bnx2x_fp(bp, i, hw_tx_prods) =
5827                                 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5828
5829                 bnx2x_fp(bp, i, tx_prods_mapping) =
5830                                 bnx2x_fp(bp, i, status_blk_mapping) +
5831                                 sizeof(struct host_status_block);
5832
5833                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5834                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5835                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5836                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5837                                 &bnx2x_fp(bp, i, tx_desc_mapping),
5838                                 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5839
5840                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5841                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5842                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5843                                 &bnx2x_fp(bp, i, rx_desc_mapping),
5844                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5845
5846                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5847                                 &bnx2x_fp(bp, i, rx_comp_mapping),
5848                                 sizeof(struct eth_fast_path_rx_cqe) *
5849                                 NUM_RCQ_BD);
5850
5851                 /* SGE ring */
5852                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5853                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5854                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5855                                 &bnx2x_fp(bp, i, rx_sge_mapping),
5856                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5857         }
5858         /* end of fastpath */
5859
5860         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5861                         sizeof(struct host_def_status_block));
5862
5863         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5864                         sizeof(struct bnx2x_slowpath));
5865
5866 #ifdef BCM_ISCSI
5867         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5868
5869         /* Initialize T1 */
5870         for (i = 0; i < 64*1024; i += 64) {
5871                 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5872                 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5873         }
5874
5875         /* allocate searcher T2 table
5876            we allocate 1/4 of alloc num for T2
5877           (which is not entered into the ILT) */
5878         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5879
5880         /* Initialize T2 */
5881         for (i = 0; i < 16*1024; i += 64)
5882                 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5883
5884         /* now fixup the last line in the block to point to the next block */
5885         *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5886
5887         /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5888         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5889
5890         /* QM queues (128*MAX_CONN) */
5891         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5892 #endif
5893
5894         /* Slow path ring */
5895         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5896
5897         return 0;
5898
5899 alloc_mem_err:
5900         bnx2x_free_mem(bp);
5901         return -ENOMEM;
5902
5903 #undef BNX2X_PCI_ALLOC
5904 #undef BNX2X_ALLOC
5905 }
5906
5907 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5908 {
5909         int i;
5910
5911         for_each_queue(bp, i) {
5912                 struct bnx2x_fastpath *fp = &bp->fp[i];
5913
5914                 u16 bd_cons = fp->tx_bd_cons;
5915                 u16 sw_prod = fp->tx_pkt_prod;
5916                 u16 sw_cons = fp->tx_pkt_cons;
5917
5918                 while (sw_cons != sw_prod) {
5919                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5920                         sw_cons++;
5921                 }
5922         }
5923 }
5924
5925 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5926 {
5927         int i, j;
5928
5929         for_each_queue(bp, j) {
5930                 struct bnx2x_fastpath *fp = &bp->fp[j];
5931
5932                 for (i = 0; i < NUM_RX_BD; i++) {
5933                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5934                         struct sk_buff *skb = rx_buf->skb;
5935
5936                         if (skb == NULL)
5937                                 continue;
5938
5939                         pci_unmap_single(bp->pdev,
5940                                          pci_unmap_addr(rx_buf, mapping),
5941                                          bp->rx_buf_size,
5942                                          PCI_DMA_FROMDEVICE);
5943
5944                         rx_buf->skb = NULL;
5945                         dev_kfree_skb(skb);
5946                 }
5947                 if (!fp->disable_tpa)
5948                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5949                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
5950                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
5951         }
5952 }
5953
5954 static void bnx2x_free_skbs(struct bnx2x *bp)
5955 {
5956         bnx2x_free_tx_skbs(bp);
5957         bnx2x_free_rx_skbs(bp);
5958 }
5959
5960 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5961 {
5962         int i, offset = 1;
5963
5964         free_irq(bp->msix_table[0].vector, bp->dev);
5965         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
5966            bp->msix_table[0].vector);
5967
5968         for_each_queue(bp, i) {
5969                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
5970                    "state %x\n", i, bp->msix_table[i + offset].vector,
5971                    bnx2x_fp(bp, i, state));
5972
5973                 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5974                         BNX2X_ERR("IRQ of fp #%d being freed while "
5975                                   "state != closed\n", i);
5976
5977                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
5978         }
5979 }
5980
5981 static void bnx2x_free_irq(struct bnx2x *bp)
5982 {
5983         if (bp->flags & USING_MSIX_FLAG) {
5984                 bnx2x_free_msix_irqs(bp);
5985                 pci_disable_msix(bp->pdev);
5986                 bp->flags &= ~USING_MSIX_FLAG;
5987
5988         } else
5989                 free_irq(bp->pdev->irq, bp->dev);
5990 }
5991
5992 static int bnx2x_enable_msix(struct bnx2x *bp)
5993 {
5994         int i, rc, offset;
5995
5996         bp->msix_table[0].entry = 0;
5997         offset = 1;
5998         DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
5999
6000         for_each_queue(bp, i) {
6001                 int igu_vec = offset + i + BP_L_ID(bp);
6002
6003                 bp->msix_table[i + offset].entry = igu_vec;
6004                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6005                    "(fastpath #%u)\n", i + offset, igu_vec, i);
6006         }
6007
6008         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6009                              bp->num_queues + offset);
6010         if (rc) {
6011                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6012                 return -1;
6013         }
6014         bp->flags |= USING_MSIX_FLAG;
6015
6016         return 0;
6017 }
6018
6019 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6020 {
6021         int i, rc, offset = 1;
6022
6023         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6024                          bp->dev->name, bp->dev);
6025         if (rc) {
6026                 BNX2X_ERR("request sp irq failed\n");
6027                 return -EBUSY;
6028         }
6029
6030         for_each_queue(bp, i) {
6031                 rc = request_irq(bp->msix_table[i + offset].vector,
6032                                  bnx2x_msix_fp_int, 0,
6033                                  bp->dev->name, &bp->fp[i]);
6034                 if (rc) {
6035                         BNX2X_ERR("request fp #%d irq failed  rc -%d\n",
6036                                   i + offset, -rc);
6037                         bnx2x_free_msix_irqs(bp);
6038                         return -EBUSY;
6039                 }
6040
6041                 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6042         }
6043
6044         return 0;
6045 }
6046
6047 static int bnx2x_req_irq(struct bnx2x *bp)
6048 {
6049         int rc;
6050
6051         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6052                          bp->dev->name, bp->dev);
6053         if (!rc)
6054                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6055
6056         return rc;
6057 }
6058
6059 static void bnx2x_napi_enable(struct bnx2x *bp)
6060 {
6061         int i;
6062
6063         for_each_queue(bp, i)
6064                 napi_enable(&bnx2x_fp(bp, i, napi));
6065 }
6066
6067 static void bnx2x_napi_disable(struct bnx2x *bp)
6068 {
6069         int i;
6070
6071         for_each_queue(bp, i)
6072                 napi_disable(&bnx2x_fp(bp, i, napi));
6073 }
6074
6075 static void bnx2x_netif_start(struct bnx2x *bp)
6076 {
6077         if (atomic_dec_and_test(&bp->intr_sem)) {
6078                 if (netif_running(bp->dev)) {
6079                         if (bp->state == BNX2X_STATE_OPEN)
6080                                 netif_wake_queue(bp->dev);
6081                         bnx2x_napi_enable(bp);
6082                         bnx2x_int_enable(bp);
6083                 }
6084         }
6085 }
6086
6087 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6088 {
6089         bnx2x_int_disable_sync(bp, disable_hw);
6090         if (netif_running(bp->dev)) {
6091                 bnx2x_napi_disable(bp);
6092                 netif_tx_disable(bp->dev);
6093                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6094         }
6095 }
6096
6097 /*
6098  * Init service functions
6099  */
6100
6101 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6102 {
6103         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6104         int port = BP_PORT(bp);
6105
6106         /* CAM allocation
6107          * unicasts 0-31:port0 32-63:port1
6108          * multicast 64-127:port0 128-191:port1
6109          */
6110         config->hdr.length_6b = 2;
6111         config->hdr.offset = port ? 31 : 0;
6112         config->hdr.client_id = BP_CL_ID(bp);
6113         config->hdr.reserved1 = 0;
6114
6115         /* primary MAC */
6116         config->config_table[0].cam_entry.msb_mac_addr =
6117                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6118         config->config_table[0].cam_entry.middle_mac_addr =
6119                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6120         config->config_table[0].cam_entry.lsb_mac_addr =
6121                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6122         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6123         if (set)
6124                 config->config_table[0].target_table_entry.flags = 0;
6125         else
6126                 CAM_INVALIDATE(config->config_table[0]);
6127         config->config_table[0].target_table_entry.client_id = 0;
6128         config->config_table[0].target_table_entry.vlan_id = 0;
6129
6130         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6131            (set ? "setting" : "clearing"),
6132            config->config_table[0].cam_entry.msb_mac_addr,
6133            config->config_table[0].cam_entry.middle_mac_addr,
6134            config->config_table[0].cam_entry.lsb_mac_addr);
6135
6136         /* broadcast */
6137         config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6138         config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6139         config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6140         config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6141         if (set)
6142                 config->config_table[1].target_table_entry.flags =
6143                                 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6144         else
6145                 CAM_INVALIDATE(config->config_table[1]);
6146         config->config_table[1].target_table_entry.client_id = 0;
6147         config->config_table[1].target_table_entry.vlan_id = 0;
6148
6149         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6150                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6151                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6152 }
6153
6154 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6155 {
6156         struct mac_configuration_cmd_e1h *config =
6157                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6158
6159         if (set && (bp->state != BNX2X_STATE_OPEN)) {
6160                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6161                 return;
6162         }
6163
6164         /* CAM allocation for E1H
6165          * unicasts: by func number
6166          * multicast: 20+FUNC*20, 20 each
6167          */
6168         config->hdr.length_6b = 1;
6169         config->hdr.offset = BP_FUNC(bp);
6170         config->hdr.client_id = BP_CL_ID(bp);
6171         config->hdr.reserved1 = 0;
6172
6173         /* primary MAC */
6174         config->config_table[0].msb_mac_addr =
6175                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6176         config->config_table[0].middle_mac_addr =
6177                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6178         config->config_table[0].lsb_mac_addr =
6179                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6180         config->config_table[0].client_id = BP_L_ID(bp);
6181         config->config_table[0].vlan_id = 0;
6182         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6183         if (set)
6184                 config->config_table[0].flags = BP_PORT(bp);
6185         else
6186                 config->config_table[0].flags =
6187                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6188
6189         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID %d\n",
6190            (set ? "setting" : "clearing"),
6191            config->config_table[0].msb_mac_addr,
6192            config->config_table[0].middle_mac_addr,
6193            config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6194
6195         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6196                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6197                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6198 }
6199
6200 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6201                              int *state_p, int poll)
6202 {
6203         /* can take a while if any port is running */
6204         int cnt = 500;
6205
6206         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6207            poll ? "polling" : "waiting", state, idx);
6208
6209         might_sleep();
6210         while (cnt--) {
6211                 if (poll) {
6212                         bnx2x_rx_int(bp->fp, 10);
6213                         /* if index is different from 0
6214                          * the reply for some commands will
6215                          * be on the non default queue
6216                          */
6217                         if (idx)
6218                                 bnx2x_rx_int(&bp->fp[idx], 10);
6219                 }
6220
6221                 mb(); /* state is changed by bnx2x_sp_event() */
6222                 if (*state_p == state)
6223                         return 0;
6224
6225                 msleep(1);
6226         }
6227
6228         /* timeout! */
6229         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6230                   poll ? "polling" : "waiting", state, idx);
6231 #ifdef BNX2X_STOP_ON_ERROR
6232         bnx2x_panic();
6233 #endif
6234
6235         return -EBUSY;
6236 }
6237
6238 static int bnx2x_setup_leading(struct bnx2x *bp)
6239 {
6240         int rc;
6241
6242         /* reset IGU state */
6243         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6244
6245         /* SETUP ramrod */
6246         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6247
6248         /* Wait for completion */
6249         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6250
6251         return rc;
6252 }
6253
6254 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6255 {
6256         /* reset IGU state */
6257         bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6258
6259         /* SETUP ramrod */
6260         bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6261         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6262
6263         /* Wait for completion */
6264         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6265                                  &(bp->fp[index].state), 0);
6266 }
6267
6268 static int bnx2x_poll(struct napi_struct *napi, int budget);
6269 static void bnx2x_set_rx_mode(struct net_device *dev);
6270
6271 /* must be called with rtnl_lock */
6272 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6273 {
6274         u32 load_code;
6275         int i, rc;
6276 #ifdef BNX2X_STOP_ON_ERROR
6277         if (unlikely(bp->panic))
6278                 return -EPERM;
6279 #endif
6280
6281         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6282
6283         /* Send LOAD_REQUEST command to MCP
6284            Returns the type of LOAD command:
6285            if it is the first port to be initialized
6286            common blocks should be initialized, otherwise - not
6287         */
6288         if (!BP_NOMCP(bp)) {
6289                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6290                 if (!load_code) {
6291                         BNX2X_ERR("MCP response failure, aborting\n");
6292                         return -EBUSY;
6293                 }
6294                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6295                         return -EBUSY; /* other port in diagnostic mode */
6296
6297         } else {
6298                 int port = BP_PORT(bp);
6299
6300                 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6301                    load_count[0], load_count[1], load_count[2]);
6302                 load_count[0]++;
6303                 load_count[1 + port]++;
6304                 DP(NETIF_MSG_IFUP, "NO MCP new load counts       %d, %d, %d\n",
6305                    load_count[0], load_count[1], load_count[2]);
6306                 if (load_count[0] == 1)
6307                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6308                 else if (load_count[1 + port] == 1)
6309                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6310                 else
6311                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6312         }
6313
6314         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6315             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6316                 bp->port.pmf = 1;
6317         else
6318                 bp->port.pmf = 0;
6319         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6320
6321         /* if we can't use MSI-X we only need one fp,
6322          * so try to enable MSI-X with the requested number of fp's
6323          * and fallback to inta with one fp
6324          */
6325         if (use_inta) {
6326                 bp->num_queues = 1;
6327
6328         } else {
6329                 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6330                         /* user requested number */
6331                         bp->num_queues = use_multi;
6332
6333                 else if (use_multi)
6334                         bp->num_queues = min_t(u32, num_online_cpus(),
6335                                                BP_MAX_QUEUES(bp));
6336                 else
6337                         bp->num_queues = 1;
6338
6339                 if (bnx2x_enable_msix(bp)) {
6340                         /* failed to enable MSI-X */
6341                         bp->num_queues = 1;
6342                         if (use_multi)
6343                                 BNX2X_ERR("Multi requested but failed"
6344                                           " to enable MSI-X\n");
6345                 }
6346         }
6347         DP(NETIF_MSG_IFUP,
6348            "set number of queues to %d\n", bp->num_queues);
6349
6350         if (bnx2x_alloc_mem(bp))
6351                 return -ENOMEM;
6352
6353         for_each_queue(bp, i)
6354                 bnx2x_fp(bp, i, disable_tpa) =
6355                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
6356
6357         if (bp->flags & USING_MSIX_FLAG) {
6358                 rc = bnx2x_req_msix_irqs(bp);
6359                 if (rc) {
6360                         pci_disable_msix(bp->pdev);
6361                         goto load_error;
6362                 }
6363         } else {
6364                 bnx2x_ack_int(bp);
6365                 rc = bnx2x_req_irq(bp);
6366                 if (rc) {
6367                         BNX2X_ERR("IRQ request failed, aborting\n");
6368                         goto load_error;
6369                 }
6370         }
6371
6372         for_each_queue(bp, i)
6373                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6374                                bnx2x_poll, 128);
6375
6376         /* Initialize HW */
6377         rc = bnx2x_init_hw(bp, load_code);
6378         if (rc) {
6379                 BNX2X_ERR("HW init failed, aborting\n");
6380                 goto load_int_disable;
6381         }
6382
6383         /* Setup NIC internals and enable interrupts */
6384         bnx2x_nic_init(bp, load_code);
6385
6386         /* Send LOAD_DONE command to MCP */
6387         if (!BP_NOMCP(bp)) {
6388                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6389                 if (!load_code) {
6390                         BNX2X_ERR("MCP response failure, aborting\n");
6391                         rc = -EBUSY;
6392                         goto load_rings_free;
6393                 }
6394         }
6395
6396         bnx2x_stats_init(bp);
6397
6398         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6399
6400         /* Enable Rx interrupt handling before sending the ramrod
6401            as it's completed on Rx FP queue */
6402         bnx2x_napi_enable(bp);
6403
6404         /* Enable interrupt handling */
6405         atomic_set(&bp->intr_sem, 0);
6406
6407         rc = bnx2x_setup_leading(bp);
6408         if (rc) {
6409                 BNX2X_ERR("Setup leading failed!\n");
6410                 goto load_netif_stop;
6411         }
6412
6413         if (CHIP_IS_E1H(bp))
6414                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6415                         BNX2X_ERR("!!!  mf_cfg function disabled\n");
6416                         bp->state = BNX2X_STATE_DISABLED;
6417                 }
6418
6419         if (bp->state == BNX2X_STATE_OPEN)
6420                 for_each_nondefault_queue(bp, i) {
6421                         rc = bnx2x_setup_multi(bp, i);
6422                         if (rc)
6423                                 goto load_netif_stop;
6424                 }
6425
6426         if (CHIP_IS_E1(bp))
6427                 bnx2x_set_mac_addr_e1(bp, 1);
6428         else
6429                 bnx2x_set_mac_addr_e1h(bp, 1);
6430
6431         if (bp->port.pmf)
6432                 bnx2x_initial_phy_init(bp);
6433
6434         /* Start fast path */
6435         switch (load_mode) {
6436         case LOAD_NORMAL:
6437                 /* Tx queue should be only reenabled */
6438                 netif_wake_queue(bp->dev);
6439                 bnx2x_set_rx_mode(bp->dev);
6440                 break;
6441
6442         case LOAD_OPEN:
6443                 netif_start_queue(bp->dev);
6444                 bnx2x_set_rx_mode(bp->dev);
6445                 if (bp->flags & USING_MSIX_FLAG)
6446                         printk(KERN_INFO PFX "%s: using MSI-X\n",
6447                                bp->dev->name);
6448                 break;
6449
6450         case LOAD_DIAG:
6451                 bnx2x_set_rx_mode(bp->dev);
6452                 bp->state = BNX2X_STATE_DIAG;
6453                 break;
6454
6455         default:
6456                 break;
6457         }
6458
6459         if (!bp->port.pmf)
6460                 bnx2x__link_status_update(bp);
6461
6462         /* start the timer */
6463         mod_timer(&bp->timer, jiffies + bp->current_interval);
6464
6465
6466         return 0;
6467
6468 load_netif_stop:
6469         bnx2x_napi_disable(bp);
6470 load_rings_free:
6471         /* Free SKBs, SGEs, TPA pool and driver internals */
6472         bnx2x_free_skbs(bp);
6473         for_each_queue(bp, i)
6474                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6475 load_int_disable:
6476         bnx2x_int_disable_sync(bp, 1);
6477         /* Release IRQs */
6478         bnx2x_free_irq(bp);
6479 load_error:
6480         bnx2x_free_mem(bp);
6481         bp->port.pmf = 0;
6482
6483         /* TBD we really need to reset the chip
6484            if we want to recover from this */
6485         return rc;
6486 }
6487
6488 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6489 {
6490         int rc;
6491
6492         /* halt the connection */
6493         bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6494         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
6495
6496         /* Wait for completion */
6497         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6498                                &(bp->fp[index].state), 1);
6499         if (rc) /* timeout */
6500                 return rc;
6501
6502         /* delete cfc entry */
6503         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6504
6505         /* Wait for completion */
6506         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6507                                &(bp->fp[index].state), 1);
6508         return rc;
6509 }
6510
6511 static int bnx2x_stop_leading(struct bnx2x *bp)
6512 {
6513         u16 dsb_sp_prod_idx;
6514         /* if the other port is handling traffic,
6515            this can take a lot of time */
6516         int cnt = 500;
6517         int rc;
6518
6519         might_sleep();
6520
6521         /* Send HALT ramrod */
6522         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6523         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6524
6525         /* Wait for completion */
6526         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6527                                &(bp->fp[0].state), 1);
6528         if (rc) /* timeout */
6529                 return rc;
6530
6531         dsb_sp_prod_idx = *bp->dsb_sp_prod;
6532
6533         /* Send PORT_DELETE ramrod */
6534         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6535
6536         /* Wait for completion to arrive on default status block
6537            we are going to reset the chip anyway
6538            so there is not much to do if this times out
6539          */
6540         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6541                 if (!cnt) {
6542                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6543                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6544                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
6545 #ifdef BNX2X_STOP_ON_ERROR
6546                         bnx2x_panic();
6547 #else
6548                         rc = -EBUSY;
6549 #endif
6550                         break;
6551                 }
6552                 cnt--;
6553                 msleep(1);
6554         }
6555         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6556         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6557
6558         return rc;
6559 }
6560
6561 static void bnx2x_reset_func(struct bnx2x *bp)
6562 {
6563         int port = BP_PORT(bp);
6564         int func = BP_FUNC(bp);
6565         int base, i;
6566
6567         /* Configure IGU */
6568         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6569         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6570
6571         REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6572
6573         /* Clear ILT */
6574         base = FUNC_ILT_BASE(func);
6575         for (i = base; i < base + ILT_PER_FUNC; i++)
6576                 bnx2x_ilt_wr(bp, i, 0);
6577 }
6578
6579 static void bnx2x_reset_port(struct bnx2x *bp)
6580 {
6581         int port = BP_PORT(bp);
6582         u32 val;
6583
6584         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6585
6586         /* Do not rcv packets to BRB */
6587         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6588         /* Do not direct rcv packets that are not for MCP to the BRB */
6589         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6590                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6591
6592         /* Configure AEU */
6593         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6594
6595         msleep(100);
6596         /* Check for BRB port occupancy */
6597         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6598         if (val)
6599                 DP(NETIF_MSG_IFDOWN,
6600                    "BRB1 is not empty  %d blocks are occupied\n", val);
6601
6602         /* TODO: Close Doorbell port? */
6603 }
6604
6605 static void bnx2x_reset_common(struct bnx2x *bp)
6606 {
6607         /* reset_common */
6608         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6609                0xd3ffff7f);
6610         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6611 }
6612
6613 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6614 {
6615         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
6616            BP_FUNC(bp), reset_code);
6617
6618         switch (reset_code) {
6619         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6620                 bnx2x_reset_port(bp);
6621                 bnx2x_reset_func(bp);
6622                 bnx2x_reset_common(bp);
6623                 break;
6624
6625         case FW_MSG_CODE_DRV_UNLOAD_PORT:
6626                 bnx2x_reset_port(bp);
6627                 bnx2x_reset_func(bp);
6628                 break;
6629
6630         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6631                 bnx2x_reset_func(bp);
6632                 break;
6633
6634         default:
6635                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6636                 break;
6637         }
6638 }
6639
6640 /* must be called with rtnl_lock */
6641 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6642 {
6643         int port = BP_PORT(bp);
6644         u32 reset_code = 0;
6645         int i, cnt, rc;
6646
6647         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6648
6649         bp->rx_mode = BNX2X_RX_MODE_NONE;
6650         bnx2x_set_storm_rx_mode(bp);
6651
6652         bnx2x_netif_stop(bp, 1);
6653         if (!netif_running(bp->dev))
6654                 bnx2x_napi_disable(bp);
6655         del_timer_sync(&bp->timer);
6656         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6657                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6658         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6659
6660         /* Wait until tx fast path tasks complete */
6661         for_each_queue(bp, i) {
6662                 struct bnx2x_fastpath *fp = &bp->fp[i];
6663
6664                 cnt = 1000;
6665                 smp_rmb();
6666                 while (BNX2X_HAS_TX_WORK(fp)) {
6667
6668                         bnx2x_tx_int(fp, 1000);
6669                         if (!cnt) {
6670                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
6671                                           i);
6672 #ifdef BNX2X_STOP_ON_ERROR
6673                                 bnx2x_panic();
6674                                 return -EBUSY;
6675 #else
6676                                 break;
6677 #endif
6678                         }
6679                         cnt--;
6680                         msleep(1);
6681                         smp_rmb();
6682                 }
6683         }
6684         /* Give HW time to discard old tx messages */
6685         msleep(1);
6686
6687         /* Release IRQs */
6688         bnx2x_free_irq(bp);
6689
6690         if (CHIP_IS_E1(bp)) {
6691                 struct mac_configuration_cmd *config =
6692                                                 bnx2x_sp(bp, mcast_config);
6693
6694                 bnx2x_set_mac_addr_e1(bp, 0);
6695
6696                 for (i = 0; i < config->hdr.length_6b; i++)
6697                         CAM_INVALIDATE(config->config_table[i]);
6698
6699                 config->hdr.length_6b = i;
6700                 if (CHIP_REV_IS_SLOW(bp))
6701                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6702                 else
6703                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6704                 config->hdr.client_id = BP_CL_ID(bp);
6705                 config->hdr.reserved1 = 0;
6706
6707                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6708                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6709                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6710
6711         } else { /* E1H */
6712                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6713
6714                 bnx2x_set_mac_addr_e1h(bp, 0);
6715
6716                 for (i = 0; i < MC_HASH_SIZE; i++)
6717                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6718         }
6719
6720         if (unload_mode == UNLOAD_NORMAL)
6721                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6722
6723         else if (bp->flags & NO_WOL_FLAG) {
6724                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6725                 if (CHIP_IS_E1H(bp))
6726                         REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6727
6728         } else if (bp->wol) {
6729                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6730                 u8 *mac_addr = bp->dev->dev_addr;
6731                 u32 val;
6732                 /* The mac address is written to entries 1-4 to
6733                    preserve entry 0 which is used by the PMF */
6734                 u8 entry = (BP_E1HVN(bp) + 1)*8;
6735
6736                 val = (mac_addr[0] << 8) | mac_addr[1];
6737                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6738
6739                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6740                       (mac_addr[4] << 8) | mac_addr[5];
6741                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6742
6743                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6744
6745         } else
6746                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6747
6748         /* Close multi and leading connections
6749            Completions for ramrods are collected in a synchronous way */
6750         for_each_nondefault_queue(bp, i)
6751                 if (bnx2x_stop_multi(bp, i))
6752                         goto unload_error;
6753
6754         rc = bnx2x_stop_leading(bp);
6755         if (rc) {
6756                 BNX2X_ERR("Stop leading failed!\n");
6757 #ifdef BNX2X_STOP_ON_ERROR
6758                 return -EBUSY;
6759 #else
6760                 goto unload_error;
6761 #endif
6762         }
6763
6764 unload_error:
6765         if (!BP_NOMCP(bp))
6766                 reset_code = bnx2x_fw_command(bp, reset_code);
6767         else {
6768                 DP(NETIF_MSG_IFDOWN, "NO MCP load counts      %d, %d, %d\n",
6769                    load_count[0], load_count[1], load_count[2]);
6770                 load_count[0]--;
6771                 load_count[1 + port]--;
6772                 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts  %d, %d, %d\n",
6773                    load_count[0], load_count[1], load_count[2]);
6774                 if (load_count[0] == 0)
6775                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6776                 else if (load_count[1 + port] == 0)
6777                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6778                 else
6779                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6780         }
6781
6782         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6783             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6784                 bnx2x__link_reset(bp);
6785
6786         /* Reset the chip */
6787         bnx2x_reset_chip(bp, reset_code);
6788
6789         /* Report UNLOAD_DONE to MCP */
6790         if (!BP_NOMCP(bp))
6791                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6792         bp->port.pmf = 0;
6793
6794         /* Free SKBs, SGEs, TPA pool and driver internals */
6795         bnx2x_free_skbs(bp);
6796         for_each_queue(bp, i)
6797                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6798         bnx2x_free_mem(bp);
6799
6800         bp->state = BNX2X_STATE_CLOSED;
6801
6802         netif_carrier_off(bp->dev);
6803
6804         return 0;
6805 }
6806
6807 static void bnx2x_reset_task(struct work_struct *work)
6808 {
6809         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6810
6811 #ifdef BNX2X_STOP_ON_ERROR
6812         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6813                   " so reset not done to allow debug dump,\n"
6814          KERN_ERR " you will need to reboot when done\n");
6815         return;
6816 #endif
6817
6818         rtnl_lock();
6819
6820         if (!netif_running(bp->dev))
6821                 goto reset_task_exit;
6822
6823         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6824         bnx2x_nic_load(bp, LOAD_NORMAL);
6825
6826 reset_task_exit:
6827         rtnl_unlock();
6828 }
6829
6830 /* end of nic load/unload */
6831
6832 /* ethtool_ops */
6833
6834 /*
6835  * Init service functions
6836  */
6837
6838 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6839 {
6840         u32 val;
6841
6842         /* Check if there is any driver already loaded */
6843         val = REG_RD(bp, MISC_REG_UNPREPARED);
6844         if (val == 0x1) {
6845                 /* Check if it is the UNDI driver
6846                  * UNDI driver initializes CID offset for normal bell to 0x7
6847                  */
6848                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6849                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6850                 if (val == 0x7)
6851                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6852                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6853
6854                 if (val == 0x7) {
6855                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6856                         /* save our func */
6857                         int func = BP_FUNC(bp);
6858                         u32 swap_en;
6859                         u32 swap_val;
6860
6861                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
6862
6863                         /* try unload UNDI on port 0 */
6864                         bp->func = 0;
6865                         bp->fw_seq =
6866                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6867                                 DRV_MSG_SEQ_NUMBER_MASK);
6868                         reset_code = bnx2x_fw_command(bp, reset_code);
6869
6870                         /* if UNDI is loaded on the other port */
6871                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6872
6873                                 /* send "DONE" for previous unload */
6874                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6875
6876                                 /* unload UNDI on port 1 */
6877                                 bp->func = 1;
6878                                 bp->fw_seq =
6879                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6880                                         DRV_MSG_SEQ_NUMBER_MASK);
6881                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6882
6883                                 bnx2x_fw_command(bp, reset_code);
6884                         }
6885
6886                         REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6887                                     HC_REG_CONFIG_0), 0x1000);
6888
6889                         /* close input traffic and wait for it */
6890                         /* Do not rcv packets to BRB */
6891                         REG_WR(bp,
6892                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6893                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6894                         /* Do not direct rcv packets that are not for MCP to
6895                          * the BRB */
6896                         REG_WR(bp,
6897                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6898                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6899                         /* clear AEU */
6900                         REG_WR(bp,
6901                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6902                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6903                         msleep(10);
6904
6905                         /* save NIG port swap info */
6906                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6907                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6908                         /* reset device */
6909                         REG_WR(bp,
6910                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6911                                0xd3ffffff);
6912                         REG_WR(bp,
6913                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6914                                0x1403);
6915                         /* take the NIG out of reset and restore swap values */
6916                         REG_WR(bp,
6917                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6918                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
6919                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6920                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6921
6922                         /* send unload done to the MCP */
6923                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6924
6925                         /* restore our func and fw_seq */
6926                         bp->func = func;
6927                         bp->fw_seq =
6928                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6929                                 DRV_MSG_SEQ_NUMBER_MASK);
6930                 }
6931         }
6932 }
6933
6934 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6935 {
6936         u32 val, val2, val3, val4, id;
6937         u16 pmc;
6938
6939         /* Get the chip revision id and number. */
6940         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6941         val = REG_RD(bp, MISC_REG_CHIP_NUM);
6942         id = ((val & 0xffff) << 16);
6943         val = REG_RD(bp, MISC_REG_CHIP_REV);
6944         id |= ((val & 0xf) << 12);
6945         val = REG_RD(bp, MISC_REG_CHIP_METAL);
6946         id |= ((val & 0xff) << 4);
6947         REG_RD(bp, MISC_REG_BOND_ID);
6948         id |= (val & 0xf);
6949         bp->common.chip_id = id;
6950         bp->link_params.chip_id = bp->common.chip_id;
6951         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6952
6953         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6954         bp->common.flash_size = (NVRAM_1MB_SIZE <<
6955                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
6956         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6957                        bp->common.flash_size, bp->common.flash_size);
6958
6959         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6960         bp->link_params.shmem_base = bp->common.shmem_base;
6961         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6962
6963         if (!bp->common.shmem_base ||
6964             (bp->common.shmem_base < 0xA0000) ||
6965             (bp->common.shmem_base >= 0xC0000)) {
6966                 BNX2X_DEV_INFO("MCP not active\n");
6967                 bp->flags |= NO_MCP_FLAG;
6968                 return;
6969         }
6970
6971         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6972         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6973                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6974                 BNX2X_ERR("BAD MCP validity signature\n");
6975
6976         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6977         bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6978
6979         BNX2X_DEV_INFO("hw_config 0x%08x  board 0x%08x\n",
6980                        bp->common.hw_config, bp->common.board);
6981
6982         bp->link_params.hw_led_mode = ((bp->common.hw_config &
6983                                         SHARED_HW_CFG_LED_MODE_MASK) >>
6984                                        SHARED_HW_CFG_LED_MODE_SHIFT);
6985
6986         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6987         bp->common.bc_ver = val;
6988         BNX2X_DEV_INFO("bc_ver %X\n", val);
6989         if (val < BNX2X_BC_VER) {
6990                 /* for now only warn
6991                  * later we might need to enforce this */
6992                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6993                           " please upgrade BC\n", BNX2X_BC_VER, val);
6994         }
6995
6996         if (BP_E1HVN(bp) == 0) {
6997                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6998                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
6999         } else {
7000                 /* no WOL capability for E1HVN != 0 */
7001                 bp->flags |= NO_WOL_FLAG;
7002         }
7003         BNX2X_DEV_INFO("%sWoL capable\n",
7004                        (bp->flags & NO_WOL_FLAG) ? "Not " : "");
7005
7006         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7007         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7008         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7009         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7010
7011         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7012                val, val2, val3, val4);
7013 }
7014
7015 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7016                                                     u32 switch_cfg)
7017 {
7018         int port = BP_PORT(bp);
7019         u32 ext_phy_type;
7020
7021         switch (switch_cfg) {
7022         case SWITCH_CFG_1G:
7023                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7024
7025                 ext_phy_type =
7026                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7027                 switch (ext_phy_type) {
7028                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7029                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7030                                        ext_phy_type);
7031
7032                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7033                                                SUPPORTED_10baseT_Full |
7034                                                SUPPORTED_100baseT_Half |
7035                                                SUPPORTED_100baseT_Full |
7036                                                SUPPORTED_1000baseT_Full |
7037                                                SUPPORTED_2500baseX_Full |
7038                                                SUPPORTED_TP |
7039                                                SUPPORTED_FIBRE |
7040                                                SUPPORTED_Autoneg |
7041                                                SUPPORTED_Pause |
7042                                                SUPPORTED_Asym_Pause);
7043                         break;
7044
7045                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7046                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7047                                        ext_phy_type);
7048
7049                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7050                                                SUPPORTED_10baseT_Full |
7051                                                SUPPORTED_100baseT_Half |
7052                                                SUPPORTED_100baseT_Full |
7053                                                SUPPORTED_1000baseT_Full |
7054                                                SUPPORTED_TP |
7055                                                SUPPORTED_FIBRE |
7056                                                SUPPORTED_Autoneg |
7057                                                SUPPORTED_Pause |
7058                                                SUPPORTED_Asym_Pause);
7059                         break;
7060
7061                 default:
7062                         BNX2X_ERR("NVRAM config error. "
7063                                   "BAD SerDes ext_phy_config 0x%x\n",
7064                                   bp->link_params.ext_phy_config);
7065                         return;
7066                 }
7067
7068                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7069                                            port*0x10);
7070                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7071                 break;
7072
7073         case SWITCH_CFG_10G:
7074                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7075
7076                 ext_phy_type =
7077                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7078                 switch (ext_phy_type) {
7079                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7080                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7081                                        ext_phy_type);
7082
7083                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7084                                                SUPPORTED_10baseT_Full |
7085                                                SUPPORTED_100baseT_Half |
7086                                                SUPPORTED_100baseT_Full |
7087                                                SUPPORTED_1000baseT_Full |
7088                                                SUPPORTED_2500baseX_Full |
7089                                                SUPPORTED_10000baseT_Full |
7090                                                SUPPORTED_TP |
7091                                                SUPPORTED_FIBRE |
7092                                                SUPPORTED_Autoneg |
7093                                                SUPPORTED_Pause |
7094                                                SUPPORTED_Asym_Pause);
7095                         break;
7096
7097                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7098                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7099                                        ext_phy_type);
7100
7101                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7102                                                SUPPORTED_FIBRE |
7103                                                SUPPORTED_Pause |
7104                                                SUPPORTED_Asym_Pause);
7105                         break;
7106
7107                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7108                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7109                                        ext_phy_type);
7110
7111                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7112                                                SUPPORTED_1000baseT_Full |
7113                                                SUPPORTED_FIBRE |
7114                                                SUPPORTED_Pause |
7115                                                SUPPORTED_Asym_Pause);
7116                         break;
7117
7118                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7119                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7120                                        ext_phy_type);
7121
7122                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7123                                                SUPPORTED_1000baseT_Full |
7124                                                SUPPORTED_FIBRE |
7125                                                SUPPORTED_Autoneg |
7126                                                SUPPORTED_Pause |
7127                                                SUPPORTED_Asym_Pause);
7128                         break;
7129
7130                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7131                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7132                                        ext_phy_type);
7133
7134                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7135                                                SUPPORTED_2500baseX_Full |
7136                                                SUPPORTED_1000baseT_Full |
7137                                                SUPPORTED_FIBRE |
7138                                                SUPPORTED_Autoneg |
7139                                                SUPPORTED_Pause |
7140                                                SUPPORTED_Asym_Pause);
7141                         break;
7142
7143                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7144                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7145                                        ext_phy_type);
7146
7147                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7148                                                SUPPORTED_TP |
7149                                                SUPPORTED_Autoneg |
7150                                                SUPPORTED_Pause |
7151                                                SUPPORTED_Asym_Pause);
7152                         break;
7153
7154                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7155                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7156                                   bp->link_params.ext_phy_config);
7157                         break;
7158
7159                 default:
7160                         BNX2X_ERR("NVRAM config error. "
7161                                   "BAD XGXS ext_phy_config 0x%x\n",
7162                                   bp->link_params.ext_phy_config);
7163                         return;
7164                 }
7165
7166                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7167                                            port*0x18);
7168                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7169
7170                 break;
7171
7172         default:
7173                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7174                           bp->port.link_config);
7175                 return;
7176         }
7177         bp->link_params.phy_addr = bp->port.phy_addr;
7178
7179         /* mask what we support according to speed_cap_mask */
7180         if (!(bp->link_params.speed_cap_mask &
7181                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7182                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7183
7184         if (!(bp->link_params.speed_cap_mask &
7185                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7186                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7187
7188         if (!(bp->link_params.speed_cap_mask &
7189                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7190                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7191
7192         if (!(bp->link_params.speed_cap_mask &
7193                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7194                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7195
7196         if (!(bp->link_params.speed_cap_mask &
7197                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7198                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7199                                         SUPPORTED_1000baseT_Full);
7200
7201         if (!(bp->link_params.speed_cap_mask &
7202                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7203                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7204
7205         if (!(bp->link_params.speed_cap_mask &
7206                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7207                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7208
7209         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7210 }
7211
7212 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7213 {
7214         bp->link_params.req_duplex = DUPLEX_FULL;
7215
7216         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7217         case PORT_FEATURE_LINK_SPEED_AUTO:
7218                 if (bp->port.supported & SUPPORTED_Autoneg) {
7219                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7220                         bp->port.advertising = bp->port.supported;
7221                 } else {
7222                         u32 ext_phy_type =
7223                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7224
7225                         if ((ext_phy_type ==
7226                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7227                             (ext_phy_type ==
7228                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7229                                 /* force 10G, no AN */
7230                                 bp->link_params.req_line_speed = SPEED_10000;
7231                                 bp->port.advertising =
7232                                                 (ADVERTISED_10000baseT_Full |
7233                                                  ADVERTISED_FIBRE);
7234                                 break;
7235                         }
7236                         BNX2X_ERR("NVRAM config error. "
7237                                   "Invalid link_config 0x%x"
7238                                   "  Autoneg not supported\n",
7239                                   bp->port.link_config);
7240                         return;
7241                 }
7242                 break;
7243
7244         case PORT_FEATURE_LINK_SPEED_10M_FULL:
7245                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7246                         bp->link_params.req_line_speed = SPEED_10;
7247                         bp->port.advertising = (ADVERTISED_10baseT_Full |
7248                                                 ADVERTISED_TP);
7249                 } else {
7250                         BNX2X_ERR("NVRAM config error. "
7251                                   "Invalid link_config 0x%x"
7252                                   "  speed_cap_mask 0x%x\n",
7253                                   bp->port.link_config,
7254                                   bp->link_params.speed_cap_mask);
7255                         return;
7256                 }
7257                 break;
7258
7259         case PORT_FEATURE_LINK_SPEED_10M_HALF:
7260                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7261                         bp->link_params.req_line_speed = SPEED_10;
7262                         bp->link_params.req_duplex = DUPLEX_HALF;
7263                         bp->port.advertising = (ADVERTISED_10baseT_Half |
7264                                                 ADVERTISED_TP);
7265                 } else {
7266                         BNX2X_ERR("NVRAM config error. "
7267                                   "Invalid link_config 0x%x"
7268                                   "  speed_cap_mask 0x%x\n",
7269                                   bp->port.link_config,
7270                                   bp->link_params.speed_cap_mask);
7271                         return;
7272                 }
7273                 break;
7274
7275         case PORT_FEATURE_LINK_SPEED_100M_FULL:
7276                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7277                         bp->link_params.req_line_speed = SPEED_100;
7278                         bp->port.advertising = (ADVERTISED_100baseT_Full |
7279                                                 ADVERTISED_TP);
7280                 } else {
7281                         BNX2X_ERR("NVRAM config error. "
7282                                   "Invalid link_config 0x%x"
7283                                   "  speed_cap_mask 0x%x\n",
7284                                   bp->port.link_config,
7285                                   bp->link_params.speed_cap_mask);
7286                         return;
7287                 }
7288                 break;
7289
7290         case PORT_FEATURE_LINK_SPEED_100M_HALF:
7291                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7292                         bp->link_params.req_line_speed = SPEED_100;
7293                         bp->link_params.req_duplex = DUPLEX_HALF;
7294                         bp->port.advertising = (ADVERTISED_100baseT_Half |
7295                                                 ADVERTISED_TP);
7296                 } else {
7297                         BNX2X_ERR("NVRAM config error. "
7298                                   "Invalid link_config 0x%x"
7299                                   "  speed_cap_mask 0x%x\n",
7300                                   bp->port.link_config,
7301                                   bp->link_params.speed_cap_mask);
7302                         return;
7303                 }
7304                 break;
7305
7306         case PORT_FEATURE_LINK_SPEED_1G:
7307                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7308                         bp->link_params.req_line_speed = SPEED_1000;
7309                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
7310                                                 ADVERTISED_TP);
7311                 } else {
7312                         BNX2X_ERR("NVRAM config error. "
7313                                   "Invalid link_config 0x%x"
7314                                   "  speed_cap_mask 0x%x\n",
7315                                   bp->port.link_config,
7316                                   bp->link_params.speed_cap_mask);
7317                         return;
7318                 }
7319                 break;
7320
7321         case PORT_FEATURE_LINK_SPEED_2_5G:
7322                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7323                         bp->link_params.req_line_speed = SPEED_2500;
7324                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
7325                                                 ADVERTISED_TP);
7326                 } else {
7327                         BNX2X_ERR("NVRAM config error. "
7328                                   "Invalid link_config 0x%x"
7329                                   "  speed_cap_mask 0x%x\n",
7330                                   bp->port.link_config,
7331                                   bp->link_params.speed_cap_mask);
7332                         return;
7333                 }
7334                 break;
7335
7336         case PORT_FEATURE_LINK_SPEED_10G_CX4:
7337         case PORT_FEATURE_LINK_SPEED_10G_KX4:
7338         case PORT_FEATURE_LINK_SPEED_10G_KR:
7339                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7340                         bp->link_params.req_line_speed = SPEED_10000;
7341                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
7342                                                 ADVERTISED_FIBRE);
7343                 } else {
7344                         BNX2X_ERR("NVRAM config error. "
7345                                   "Invalid link_config 0x%x"
7346                                   "  speed_cap_mask 0x%x\n",
7347                                   bp->port.link_config,
7348                                   bp->link_params.speed_cap_mask);
7349                         return;
7350                 }
7351                 break;
7352
7353         default:
7354                 BNX2X_ERR("NVRAM config error. "
7355                           "BAD link speed link_config 0x%x\n",
7356                           bp->port.link_config);
7357                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7358                 bp->port.advertising = bp->port.supported;
7359                 break;
7360         }
7361
7362         bp->link_params.req_flow_ctrl = (bp->port.link_config &
7363                                          PORT_FEATURE_FLOW_CONTROL_MASK);
7364         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
7365             !(bp->port.supported & SUPPORTED_Autoneg))
7366                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7367
7368         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
7369                        "  advertising 0x%x\n",
7370                        bp->link_params.req_line_speed,
7371                        bp->link_params.req_duplex,
7372                        bp->link_params.req_flow_ctrl, bp->port.advertising);
7373 }
7374
7375 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7376 {
7377         int port = BP_PORT(bp);
7378         u32 val, val2;
7379
7380         bp->link_params.bp = bp;
7381         bp->link_params.port = port;
7382
7383         bp->link_params.serdes_config =
7384                 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7385         bp->link_params.lane_config =
7386                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7387         bp->link_params.ext_phy_config =
7388                 SHMEM_RD(bp,
7389                          dev_info.port_hw_config[port].external_phy_config);
7390         bp->link_params.speed_cap_mask =
7391                 SHMEM_RD(bp,
7392                          dev_info.port_hw_config[port].speed_capability_mask);
7393
7394         bp->port.link_config =
7395                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7396
7397         BNX2X_DEV_INFO("serdes_config 0x%08x  lane_config 0x%08x\n"
7398              KERN_INFO "  ext_phy_config 0x%08x  speed_cap_mask 0x%08x"
7399                        "  link_config 0x%08x\n",
7400                        bp->link_params.serdes_config,
7401                        bp->link_params.lane_config,
7402                        bp->link_params.ext_phy_config,
7403                        bp->link_params.speed_cap_mask, bp->port.link_config);
7404
7405         bp->link_params.switch_cfg = (bp->port.link_config &
7406                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
7407         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7408
7409         bnx2x_link_settings_requested(bp);
7410
7411         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7412         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7413         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7414         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7415         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7416         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7417         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7418         bp->dev->dev_addr[5] = (u8)(val & 0xff);
7419         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7420         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7421 }
7422
7423 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7424 {
7425         int func = BP_FUNC(bp);
7426         u32 val, val2;
7427         int rc = 0;
7428
7429         bnx2x_get_common_hwinfo(bp);
7430
7431         bp->e1hov = 0;
7432         bp->e1hmf = 0;
7433         if (CHIP_IS_E1H(bp)) {
7434                 bp->mf_config =
7435                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7436
7437                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7438                        FUNC_MF_CFG_E1HOV_TAG_MASK);
7439                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7440
7441                         bp->e1hov = val;
7442                         bp->e1hmf = 1;
7443                         BNX2X_DEV_INFO("MF mode  E1HOV for func %d is %d "
7444                                        "(0x%04x)\n",
7445                                        func, bp->e1hov, bp->e1hov);
7446                 } else {
7447                         BNX2X_DEV_INFO("Single function mode\n");
7448                         if (BP_E1HVN(bp)) {
7449                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
7450                                           "  aborting\n", func);
7451                                 rc = -EPERM;
7452                         }
7453                 }
7454         }
7455
7456         if (!BP_NOMCP(bp)) {
7457                 bnx2x_get_port_hwinfo(bp);
7458
7459                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7460                               DRV_MSG_SEQ_NUMBER_MASK);
7461                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7462         }
7463
7464         if (IS_E1HMF(bp)) {
7465                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7466                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
7467                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7468                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7469                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7470                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7471                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7472                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7473                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7474                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
7475                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7476                                ETH_ALEN);
7477                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7478                                ETH_ALEN);
7479                 }
7480
7481                 return rc;
7482         }
7483
7484         if (BP_NOMCP(bp)) {
7485                 /* only supposed to happen on emulation/FPGA */
7486                 BNX2X_ERR("warning random MAC workaround active\n");
7487                 random_ether_addr(bp->dev->dev_addr);
7488                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7489         }
7490
7491         return rc;
7492 }
7493
7494 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7495 {
7496         int func = BP_FUNC(bp);
7497         int rc;
7498
7499         /* Disable interrupt handling until HW is initialized */
7500         atomic_set(&bp->intr_sem, 1);
7501
7502         mutex_init(&bp->port.phy_mutex);
7503
7504         INIT_WORK(&bp->sp_task, bnx2x_sp_task);
7505         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7506
7507         rc = bnx2x_get_hwinfo(bp);
7508
7509         /* need to reset chip if undi was active */
7510         if (!BP_NOMCP(bp))
7511                 bnx2x_undi_unload(bp);
7512
7513         if (CHIP_REV_IS_FPGA(bp))
7514                 printk(KERN_ERR PFX "FPGA detected\n");
7515
7516         if (BP_NOMCP(bp) && (func == 0))
7517                 printk(KERN_ERR PFX
7518                        "MCP disabled, must load devices in order!\n");
7519
7520         /* Set TPA flags */
7521         if (disable_tpa) {
7522                 bp->flags &= ~TPA_ENABLE_FLAG;
7523                 bp->dev->features &= ~NETIF_F_LRO;
7524         } else {
7525                 bp->flags |= TPA_ENABLE_FLAG;
7526                 bp->dev->features |= NETIF_F_LRO;
7527         }
7528
7529
7530         bp->tx_ring_size = MAX_TX_AVAIL;
7531         bp->rx_ring_size = MAX_RX_AVAIL;
7532
7533         bp->rx_csum = 1;
7534         bp->rx_offset = 0;
7535
7536         bp->tx_ticks = 50;
7537         bp->rx_ticks = 25;
7538
7539         bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7540         bp->current_interval = (poll ? poll : bp->timer_interval);
7541
7542         init_timer(&bp->timer);
7543         bp->timer.expires = jiffies + bp->current_interval;
7544         bp->timer.data = (unsigned long) bp;
7545         bp->timer.function = bnx2x_timer;
7546
7547         return rc;
7548 }
7549
7550 /*
7551  * ethtool service functions
7552  */
7553
7554 /* All ethtool functions called with rtnl_lock */
7555
7556 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7557 {
7558         struct bnx2x *bp = netdev_priv(dev);
7559
7560         cmd->supported = bp->port.supported;
7561         cmd->advertising = bp->port.advertising;
7562
7563         if (netif_carrier_ok(dev)) {
7564                 cmd->speed = bp->link_vars.line_speed;
7565                 cmd->duplex = bp->link_vars.duplex;
7566         } else {
7567                 cmd->speed = bp->link_params.req_line_speed;
7568                 cmd->duplex = bp->link_params.req_duplex;
7569         }
7570         if (IS_E1HMF(bp)) {
7571                 u16 vn_max_rate;
7572
7573                 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7574                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7575                 if (vn_max_rate < cmd->speed)
7576                         cmd->speed = vn_max_rate;
7577         }
7578
7579         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7580                 u32 ext_phy_type =
7581                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7582
7583                 switch (ext_phy_type) {
7584                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7585                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7586                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7587                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7588                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7589                         cmd->port = PORT_FIBRE;
7590                         break;
7591
7592                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7593                         cmd->port = PORT_TP;
7594                         break;
7595
7596                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7597                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7598                                   bp->link_params.ext_phy_config);
7599                         break;
7600
7601                 default:
7602                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7603                            bp->link_params.ext_phy_config);
7604                         break;
7605                 }
7606         } else
7607                 cmd->port = PORT_TP;
7608
7609         cmd->phy_address = bp->port.phy_addr;
7610         cmd->transceiver = XCVR_INTERNAL;
7611
7612         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7613                 cmd->autoneg = AUTONEG_ENABLE;
7614         else
7615                 cmd->autoneg = AUTONEG_DISABLE;
7616
7617         cmd->maxtxpkt = 0;
7618         cmd->maxrxpkt = 0;
7619
7620         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7621            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7622            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7623            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7624            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7625            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7626            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7627
7628         return 0;
7629 }
7630
7631 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7632 {
7633         struct bnx2x *bp = netdev_priv(dev);
7634         u32 advertising;
7635
7636         if (IS_E1HMF(bp))
7637                 return 0;
7638
7639         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7640            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7641            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7642            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7643            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7644            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7645            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7646
7647         if (cmd->autoneg == AUTONEG_ENABLE) {
7648                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7649                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7650                         return -EINVAL;
7651                 }
7652
7653                 /* advertise the requested speed and duplex if supported */
7654                 cmd->advertising &= bp->port.supported;
7655
7656                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7657                 bp->link_params.req_duplex = DUPLEX_FULL;
7658                 bp->port.advertising |= (ADVERTISED_Autoneg |
7659                                          cmd->advertising);
7660
7661         } else { /* forced speed */
7662                 /* advertise the requested speed and duplex if supported */
7663                 switch (cmd->speed) {
7664                 case SPEED_10:
7665                         if (cmd->duplex == DUPLEX_FULL) {
7666                                 if (!(bp->port.supported &
7667                                       SUPPORTED_10baseT_Full)) {
7668                                         DP(NETIF_MSG_LINK,
7669                                            "10M full not supported\n");
7670                                         return -EINVAL;
7671                                 }
7672
7673                                 advertising = (ADVERTISED_10baseT_Full |
7674                                                ADVERTISED_TP);
7675                         } else {
7676                                 if (!(bp->port.supported &
7677                                       SUPPORTED_10baseT_Half)) {
7678                                         DP(NETIF_MSG_LINK,
7679                                            "10M half not supported\n");
7680                                         return -EINVAL;
7681                                 }
7682
7683                                 advertising = (ADVERTISED_10baseT_Half |
7684                                                ADVERTISED_TP);
7685                         }
7686                         break;
7687
7688                 case SPEED_100:
7689                         if (cmd->duplex == DUPLEX_FULL) {
7690                                 if (!(bp->port.supported &
7691                                                 SUPPORTED_100baseT_Full)) {
7692                                         DP(NETIF_MSG_LINK,
7693                                            "100M full not supported\n");
7694                                         return -EINVAL;
7695                                 }
7696
7697                                 advertising = (ADVERTISED_100baseT_Full |
7698                                                ADVERTISED_TP);
7699                         } else {
7700                                 if (!(bp->port.supported &
7701                                                 SUPPORTED_100baseT_Half)) {
7702                                         DP(NETIF_MSG_LINK,
7703                                            "100M half not supported\n");
7704                                         return -EINVAL;
7705                                 }
7706
7707                                 advertising = (ADVERTISED_100baseT_Half |
7708                                                ADVERTISED_TP);
7709                         }
7710                         break;
7711
7712                 case SPEED_1000:
7713                         if (cmd->duplex != DUPLEX_FULL) {
7714                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
7715                                 return -EINVAL;
7716                         }
7717
7718                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7719                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
7720                                 return -EINVAL;
7721                         }
7722
7723                         advertising = (ADVERTISED_1000baseT_Full |
7724                                        ADVERTISED_TP);
7725                         break;
7726
7727                 case SPEED_2500:
7728                         if (cmd->duplex != DUPLEX_FULL) {
7729                                 DP(NETIF_MSG_LINK,
7730                                    "2.5G half not supported\n");
7731                                 return -EINVAL;
7732                         }
7733
7734                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7735                                 DP(NETIF_MSG_LINK,
7736                                    "2.5G full not supported\n");
7737                                 return -EINVAL;
7738                         }
7739
7740                         advertising = (ADVERTISED_2500baseX_Full |
7741                                        ADVERTISED_TP);
7742                         break;
7743
7744                 case SPEED_10000:
7745                         if (cmd->duplex != DUPLEX_FULL) {
7746                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
7747                                 return -EINVAL;
7748                         }
7749
7750                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7751                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
7752                                 return -EINVAL;
7753                         }
7754
7755                         advertising = (ADVERTISED_10000baseT_Full |
7756                                        ADVERTISED_FIBRE);
7757                         break;
7758
7759                 default:
7760                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
7761                         return -EINVAL;
7762                 }
7763
7764                 bp->link_params.req_line_speed = cmd->speed;
7765                 bp->link_params.req_duplex = cmd->duplex;
7766                 bp->port.advertising = advertising;
7767         }
7768
7769         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
7770            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
7771            bp->link_params.req_line_speed, bp->link_params.req_duplex,
7772            bp->port.advertising);
7773
7774         if (netif_running(dev)) {
7775                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7776                 bnx2x_link_set(bp);
7777         }
7778
7779         return 0;
7780 }
7781
7782 #define PHY_FW_VER_LEN                  10
7783
7784 static void bnx2x_get_drvinfo(struct net_device *dev,
7785                               struct ethtool_drvinfo *info)
7786 {
7787         struct bnx2x *bp = netdev_priv(dev);
7788         u8 phy_fw_ver[PHY_FW_VER_LEN];
7789
7790         strcpy(info->driver, DRV_MODULE_NAME);
7791         strcpy(info->version, DRV_MODULE_VERSION);
7792
7793         phy_fw_ver[0] = '\0';
7794         if (bp->port.pmf) {
7795                 bnx2x_acquire_phy_lock(bp);
7796                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7797                                              (bp->state != BNX2X_STATE_CLOSED),
7798                                              phy_fw_ver, PHY_FW_VER_LEN);
7799                 bnx2x_release_phy_lock(bp);
7800         }
7801
7802         snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7803                  (bp->common.bc_ver & 0xff0000) >> 16,
7804                  (bp->common.bc_ver & 0xff00) >> 8,
7805                  (bp->common.bc_ver & 0xff),
7806                  ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
7807         strcpy(info->bus_info, pci_name(bp->pdev));
7808         info->n_stats = BNX2X_NUM_STATS;
7809         info->testinfo_len = BNX2X_NUM_TESTS;
7810         info->eedump_len = bp->common.flash_size;
7811         info->regdump_len = 0;
7812 }
7813
7814 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7815 {
7816         struct bnx2x *bp = netdev_priv(dev);
7817
7818         if (bp->flags & NO_WOL_FLAG) {
7819                 wol->supported = 0;
7820                 wol->wolopts = 0;
7821         } else {
7822                 wol->supported = WAKE_MAGIC;
7823                 if (bp->wol)
7824                         wol->wolopts = WAKE_MAGIC;
7825                 else
7826                         wol->wolopts = 0;
7827         }
7828         memset(&wol->sopass, 0, sizeof(wol->sopass));
7829 }
7830
7831 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7832 {
7833         struct bnx2x *bp = netdev_priv(dev);
7834
7835         if (wol->wolopts & ~WAKE_MAGIC)
7836                 return -EINVAL;
7837
7838         if (wol->wolopts & WAKE_MAGIC) {
7839                 if (bp->flags & NO_WOL_FLAG)
7840                         return -EINVAL;
7841
7842                 bp->wol = 1;
7843         } else
7844                 bp->wol = 0;
7845
7846         return 0;
7847 }
7848
7849 static u32 bnx2x_get_msglevel(struct net_device *dev)
7850 {
7851         struct bnx2x *bp = netdev_priv(dev);
7852
7853         return bp->msglevel;
7854 }
7855
7856 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7857 {
7858         struct bnx2x *bp = netdev_priv(dev);
7859
7860         if (capable(CAP_NET_ADMIN))
7861                 bp->msglevel = level;
7862 }
7863
7864 static int bnx2x_nway_reset(struct net_device *dev)
7865 {
7866         struct bnx2x *bp = netdev_priv(dev);
7867
7868         if (!bp->port.pmf)
7869                 return 0;
7870
7871         if (netif_running(dev)) {
7872                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7873                 bnx2x_link_set(bp);
7874         }
7875
7876         return 0;
7877 }
7878
7879 static int bnx2x_get_eeprom_len(struct net_device *dev)
7880 {
7881         struct bnx2x *bp = netdev_priv(dev);
7882
7883         return bp->common.flash_size;
7884 }
7885
7886 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7887 {
7888         int port = BP_PORT(bp);
7889         int count, i;
7890         u32 val = 0;
7891
7892         /* adjust timeout for emulation/FPGA */
7893         count = NVRAM_TIMEOUT_COUNT;
7894         if (CHIP_REV_IS_SLOW(bp))
7895                 count *= 100;
7896
7897         /* request access to nvram interface */
7898         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7899                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7900
7901         for (i = 0; i < count*10; i++) {
7902                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7903                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7904                         break;
7905
7906                 udelay(5);
7907         }
7908
7909         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7910                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
7911                 return -EBUSY;
7912         }
7913
7914         return 0;
7915 }
7916
7917 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7918 {
7919         int port = BP_PORT(bp);
7920         int count, i;
7921         u32 val = 0;
7922
7923         /* adjust timeout for emulation/FPGA */
7924         count = NVRAM_TIMEOUT_COUNT;
7925         if (CHIP_REV_IS_SLOW(bp))
7926                 count *= 100;
7927
7928         /* relinquish nvram interface */
7929         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7930                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7931
7932         for (i = 0; i < count*10; i++) {
7933                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7934                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7935                         break;
7936
7937                 udelay(5);
7938         }
7939
7940         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
7941                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
7942                 return -EBUSY;
7943         }
7944
7945         return 0;
7946 }
7947
7948 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7949 {
7950         u32 val;
7951
7952         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7953
7954         /* enable both bits, even on read */
7955         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7956                (val | MCPR_NVM_ACCESS_ENABLE_EN |
7957                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
7958 }
7959
7960 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7961 {
7962         u32 val;
7963
7964         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7965
7966         /* disable both bits, even after read */
7967         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7968                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7969                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7970 }
7971
7972 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7973                                   u32 cmd_flags)
7974 {
7975         int count, i, rc;
7976         u32 val;
7977
7978         /* build the command word */
7979         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7980
7981         /* need to clear DONE bit separately */
7982         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7983
7984         /* address of the NVRAM to read from */
7985         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7986                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7987
7988         /* issue a read command */
7989         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7990
7991         /* adjust timeout for emulation/FPGA */
7992         count = NVRAM_TIMEOUT_COUNT;
7993         if (CHIP_REV_IS_SLOW(bp))
7994                 count *= 100;
7995
7996         /* wait for completion */
7997         *ret_val = 0;
7998         rc = -EBUSY;
7999         for (i = 0; i < count; i++) {
8000                 udelay(5);
8001                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8002
8003                 if (val & MCPR_NVM_COMMAND_DONE) {
8004                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8005                         /* we read nvram data in cpu order
8006                          * but ethtool sees it as an array of bytes
8007                          * converting to big-endian will do the work */
8008                         val = cpu_to_be32(val);
8009                         *ret_val = val;
8010                         rc = 0;
8011                         break;
8012                 }
8013         }
8014
8015         return rc;
8016 }
8017
8018 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8019                             int buf_size)
8020 {
8021         int rc;
8022         u32 cmd_flags;
8023         u32 val;
8024
8025         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8026                 DP(BNX2X_MSG_NVM,
8027                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8028                    offset, buf_size);
8029                 return -EINVAL;
8030         }
8031
8032         if (offset + buf_size > bp->common.flash_size) {
8033                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8034                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8035                    offset, buf_size, bp->common.flash_size);
8036                 return -EINVAL;
8037         }
8038
8039         /* request access to nvram interface */
8040         rc = bnx2x_acquire_nvram_lock(bp);
8041         if (rc)
8042                 return rc;
8043
8044         /* enable access to nvram interface */
8045         bnx2x_enable_nvram_access(bp);
8046
8047         /* read the first word(s) */
8048         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8049         while ((buf_size > sizeof(u32)) && (rc == 0)) {
8050                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8051                 memcpy(ret_buf, &val, 4);
8052
8053                 /* advance to the next dword */
8054                 offset += sizeof(u32);
8055                 ret_buf += sizeof(u32);
8056                 buf_size -= sizeof(u32);
8057                 cmd_flags = 0;
8058         }
8059
8060         if (rc == 0) {
8061                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8062                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8063                 memcpy(ret_buf, &val, 4);
8064         }
8065
8066         /* disable access to nvram interface */
8067         bnx2x_disable_nvram_access(bp);
8068         bnx2x_release_nvram_lock(bp);
8069
8070         return rc;
8071 }
8072
8073 static int bnx2x_get_eeprom(struct net_device *dev,
8074                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8075 {
8076         struct bnx2x *bp = netdev_priv(dev);
8077         int rc;
8078
8079         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8080            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8081            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8082            eeprom->len, eeprom->len);
8083
8084         /* parameters already validated in ethtool_get_eeprom */
8085
8086         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8087
8088         return rc;
8089 }
8090
8091 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8092                                    u32 cmd_flags)
8093 {
8094         int count, i, rc;
8095
8096         /* build the command word */
8097         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8098
8099         /* need to clear DONE bit separately */
8100         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8101
8102         /* write the data */
8103         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8104
8105         /* address of the NVRAM to write to */
8106         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8107                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8108
8109         /* issue the write command */
8110         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8111
8112         /* adjust timeout for emulation/FPGA */
8113         count = NVRAM_TIMEOUT_COUNT;
8114         if (CHIP_REV_IS_SLOW(bp))
8115                 count *= 100;
8116
8117         /* wait for completion */
8118         rc = -EBUSY;
8119         for (i = 0; i < count; i++) {
8120                 udelay(5);
8121                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8122                 if (val & MCPR_NVM_COMMAND_DONE) {
8123                         rc = 0;
8124                         break;
8125                 }
8126         }
8127
8128         return rc;
8129 }
8130
8131 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
8132
8133 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8134                               int buf_size)
8135 {
8136         int rc;
8137         u32 cmd_flags;
8138         u32 align_offset;
8139         u32 val;
8140
8141         if (offset + buf_size > bp->common.flash_size) {
8142                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8143                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8144                    offset, buf_size, bp->common.flash_size);
8145                 return -EINVAL;
8146         }
8147
8148         /* request access to nvram interface */
8149         rc = bnx2x_acquire_nvram_lock(bp);
8150         if (rc)
8151                 return rc;
8152
8153         /* enable access to nvram interface */
8154         bnx2x_enable_nvram_access(bp);
8155
8156         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8157         align_offset = (offset & ~0x03);
8158         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8159
8160         if (rc == 0) {
8161                 val &= ~(0xff << BYTE_OFFSET(offset));
8162                 val |= (*data_buf << BYTE_OFFSET(offset));
8163
8164                 /* nvram data is returned as an array of bytes
8165                  * convert it back to cpu order */
8166                 val = be32_to_cpu(val);
8167
8168                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8169                                              cmd_flags);
8170         }
8171
8172         /* disable access to nvram interface */
8173         bnx2x_disable_nvram_access(bp);
8174         bnx2x_release_nvram_lock(bp);
8175
8176         return rc;
8177 }
8178
8179 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8180                              int buf_size)
8181 {
8182         int rc;
8183         u32 cmd_flags;
8184         u32 val;
8185         u32 written_so_far;
8186
8187         if (buf_size == 1)      /* ethtool */
8188                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8189
8190         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8191                 DP(BNX2X_MSG_NVM,
8192                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8193                    offset, buf_size);
8194                 return -EINVAL;
8195         }
8196
8197         if (offset + buf_size > bp->common.flash_size) {
8198                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8199                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8200                    offset, buf_size, bp->common.flash_size);
8201                 return -EINVAL;
8202         }
8203
8204         /* request access to nvram interface */
8205         rc = bnx2x_acquire_nvram_lock(bp);
8206         if (rc)
8207                 return rc;
8208
8209         /* enable access to nvram interface */
8210         bnx2x_enable_nvram_access(bp);
8211
8212         written_so_far = 0;
8213         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8214         while ((written_so_far < buf_size) && (rc == 0)) {
8215                 if (written_so_far == (buf_size - sizeof(u32)))
8216                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8217                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8218                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8219                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8220                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8221
8222                 memcpy(&val, data_buf, 4);
8223
8224                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8225
8226                 /* advance to the next dword */
8227                 offset += sizeof(u32);
8228                 data_buf += sizeof(u32);
8229                 written_so_far += sizeof(u32);
8230                 cmd_flags = 0;
8231         }
8232
8233         /* disable access to nvram interface */
8234         bnx2x_disable_nvram_access(bp);
8235         bnx2x_release_nvram_lock(bp);
8236
8237         return rc;
8238 }
8239
8240 static int bnx2x_set_eeprom(struct net_device *dev,
8241                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8242 {
8243         struct bnx2x *bp = netdev_priv(dev);
8244         int rc;
8245
8246         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8247            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8248            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8249            eeprom->len, eeprom->len);
8250
8251         /* parameters already validated in ethtool_set_eeprom */
8252
8253         /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8254         if (eeprom->magic == 0x00504859)
8255                 if (bp->port.pmf) {
8256
8257                         bnx2x_acquire_phy_lock(bp);
8258                         rc = bnx2x_flash_download(bp, BP_PORT(bp),
8259                                              bp->link_params.ext_phy_config,
8260                                              (bp->state != BNX2X_STATE_CLOSED),
8261                                              eebuf, eeprom->len);
8262                         if ((bp->state == BNX2X_STATE_OPEN) ||
8263                             (bp->state == BNX2X_STATE_DISABLED)) {
8264                                 rc |= bnx2x_link_reset(&bp->link_params,
8265                                                        &bp->link_vars);
8266                                 rc |= bnx2x_phy_init(&bp->link_params,
8267                                                      &bp->link_vars);
8268                         }
8269                         bnx2x_release_phy_lock(bp);
8270
8271                 } else /* Only the PMF can access the PHY */
8272                         return -EINVAL;
8273         else
8274                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8275
8276         return rc;
8277 }
8278
8279 static int bnx2x_get_coalesce(struct net_device *dev,
8280                               struct ethtool_coalesce *coal)
8281 {
8282         struct bnx2x *bp = netdev_priv(dev);
8283
8284         memset(coal, 0, sizeof(struct ethtool_coalesce));
8285
8286         coal->rx_coalesce_usecs = bp->rx_ticks;
8287         coal->tx_coalesce_usecs = bp->tx_ticks;
8288
8289         return 0;
8290 }
8291
8292 static int bnx2x_set_coalesce(struct net_device *dev,
8293                               struct ethtool_coalesce *coal)
8294 {
8295         struct bnx2x *bp = netdev_priv(dev);
8296
8297         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8298         if (bp->rx_ticks > 3000)
8299                 bp->rx_ticks = 3000;
8300
8301         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8302         if (bp->tx_ticks > 0x3000)
8303                 bp->tx_ticks = 0x3000;
8304
8305         if (netif_running(dev))
8306                 bnx2x_update_coalesce(bp);
8307
8308         return 0;
8309 }
8310
8311 static void bnx2x_get_ringparam(struct net_device *dev,
8312                                 struct ethtool_ringparam *ering)
8313 {
8314         struct bnx2x *bp = netdev_priv(dev);
8315
8316         ering->rx_max_pending = MAX_RX_AVAIL;
8317         ering->rx_mini_max_pending = 0;
8318         ering->rx_jumbo_max_pending = 0;
8319
8320         ering->rx_pending = bp->rx_ring_size;
8321         ering->rx_mini_pending = 0;
8322         ering->rx_jumbo_pending = 0;
8323
8324         ering->tx_max_pending = MAX_TX_AVAIL;
8325         ering->tx_pending = bp->tx_ring_size;
8326 }
8327
8328 static int bnx2x_set_ringparam(struct net_device *dev,
8329                                struct ethtool_ringparam *ering)
8330 {
8331         struct bnx2x *bp = netdev_priv(dev);
8332         int rc = 0;
8333
8334         if ((ering->rx_pending > MAX_RX_AVAIL) ||
8335             (ering->tx_pending > MAX_TX_AVAIL) ||
8336             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8337                 return -EINVAL;
8338
8339         bp->rx_ring_size = ering->rx_pending;
8340         bp->tx_ring_size = ering->tx_pending;
8341
8342         if (netif_running(dev)) {
8343                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8344                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8345         }
8346
8347         return rc;
8348 }
8349
8350 static void bnx2x_get_pauseparam(struct net_device *dev,
8351                                  struct ethtool_pauseparam *epause)
8352 {
8353         struct bnx2x *bp = netdev_priv(dev);
8354
8355         epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8356                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8357
8358         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8359                             BNX2X_FLOW_CTRL_RX);
8360         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8361                             BNX2X_FLOW_CTRL_TX);
8362
8363         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8364            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8365            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8366 }
8367
8368 static int bnx2x_set_pauseparam(struct net_device *dev,
8369                                 struct ethtool_pauseparam *epause)
8370 {
8371         struct bnx2x *bp = netdev_priv(dev);
8372
8373         if (IS_E1HMF(bp))
8374                 return 0;
8375
8376         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8377            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8378            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8379
8380         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8381
8382         if (epause->rx_pause)
8383                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
8384
8385         if (epause->tx_pause)
8386                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
8387
8388         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8389                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8390
8391         if (epause->autoneg) {
8392                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8393                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
8394                         return -EINVAL;
8395                 }
8396
8397                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8398                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8399         }
8400
8401         DP(NETIF_MSG_LINK,
8402            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8403
8404         if (netif_running(dev)) {
8405                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8406                 bnx2x_link_set(bp);
8407         }
8408
8409         return 0;
8410 }
8411
8412 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8413 {
8414         struct bnx2x *bp = netdev_priv(dev);
8415         int changed = 0;
8416         int rc = 0;
8417
8418         /* TPA requires Rx CSUM offloading */
8419         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8420                 if (!(dev->features & NETIF_F_LRO)) {
8421                         dev->features |= NETIF_F_LRO;
8422                         bp->flags |= TPA_ENABLE_FLAG;
8423                         changed = 1;
8424                 }
8425
8426         } else if (dev->features & NETIF_F_LRO) {
8427                 dev->features &= ~NETIF_F_LRO;
8428                 bp->flags &= ~TPA_ENABLE_FLAG;
8429                 changed = 1;
8430         }
8431
8432         if (changed && netif_running(dev)) {
8433                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8434                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8435         }
8436
8437         return rc;
8438 }
8439
8440 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8441 {
8442         struct bnx2x *bp = netdev_priv(dev);
8443
8444         return bp->rx_csum;
8445 }
8446
8447 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8448 {
8449         struct bnx2x *bp = netdev_priv(dev);
8450         int rc = 0;
8451
8452         bp->rx_csum = data;
8453
8454         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8455            TPA'ed packets will be discarded due to wrong TCP CSUM */
8456         if (!data) {
8457                 u32 flags = ethtool_op_get_flags(dev);
8458
8459                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8460         }
8461
8462         return rc;
8463 }
8464
8465 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8466 {
8467         if (data) {
8468                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8469                 dev->features |= NETIF_F_TSO6;
8470         } else {
8471                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8472                 dev->features &= ~NETIF_F_TSO6;
8473         }
8474
8475         return 0;
8476 }
8477
8478 static const struct {
8479         char string[ETH_GSTRING_LEN];
8480 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8481         { "register_test (offline)" },
8482         { "memory_test (offline)" },
8483         { "loopback_test (offline)" },
8484         { "nvram_test (online)" },
8485         { "interrupt_test (online)" },
8486         { "link_test (online)" },
8487         { "idle check (online)" },
8488         { "MC errors (online)" }
8489 };
8490
8491 static int bnx2x_self_test_count(struct net_device *dev)
8492 {
8493         return BNX2X_NUM_TESTS;
8494 }
8495
8496 static int bnx2x_test_registers(struct bnx2x *bp)
8497 {
8498         int idx, i, rc = -ENODEV;
8499         u32 wr_val = 0;
8500         int port = BP_PORT(bp);
8501         static const struct {
8502                 u32  offset0;
8503                 u32  offset1;
8504                 u32  mask;
8505         } reg_tbl[] = {
8506 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
8507                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
8508                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
8509                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
8510                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
8511                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
8512                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
8513                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
8514                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
8515                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
8516 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
8517                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
8518                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
8519                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
8520                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
8521                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8522                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
8523                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
8524                 { NIG_REG_EGRESS_MNG0_FIFO,           20, 0xffffffff },
8525                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
8526 /* 20 */        { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
8527                 { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
8528                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
8529                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
8530                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
8531                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
8532                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
8533                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
8534                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
8535                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
8536 /* 30 */        { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
8537                 { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
8538                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
8539                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
8540                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8541                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
8542                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8543                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
8544
8545                 { 0xffffffff, 0, 0x00000000 }
8546         };
8547
8548         if (!netif_running(bp->dev))
8549                 return rc;
8550
8551         /* Repeat the test twice:
8552            First by writing 0x00000000, second by writing 0xffffffff */
8553         for (idx = 0; idx < 2; idx++) {
8554
8555                 switch (idx) {
8556                 case 0:
8557                         wr_val = 0;
8558                         break;
8559                 case 1:
8560                         wr_val = 0xffffffff;
8561                         break;
8562                 }
8563
8564                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8565                         u32 offset, mask, save_val, val;
8566
8567                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8568                         mask = reg_tbl[i].mask;
8569
8570                         save_val = REG_RD(bp, offset);
8571
8572                         REG_WR(bp, offset, wr_val);
8573                         val = REG_RD(bp, offset);
8574
8575                         /* Restore the original register's value */
8576                         REG_WR(bp, offset, save_val);
8577
8578                         /* verify that value is as expected value */
8579                         if ((val & mask) != (wr_val & mask))
8580                                 goto test_reg_exit;
8581                 }
8582         }
8583
8584         rc = 0;
8585
8586 test_reg_exit:
8587         return rc;
8588 }
8589
8590 static int bnx2x_test_memory(struct bnx2x *bp)
8591 {
8592         int i, j, rc = -ENODEV;
8593         u32 val;
8594         static const struct {
8595                 u32 offset;
8596                 int size;
8597         } mem_tbl[] = {
8598                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
8599                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8600                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
8601                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
8602                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
8603                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
8604                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
8605
8606                 { 0xffffffff, 0 }
8607         };
8608         static const struct {
8609                 char *name;
8610                 u32 offset;
8611                 u32 e1_mask;
8612                 u32 e1h_mask;
8613         } prty_tbl[] = {
8614                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
8615                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
8616                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
8617                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
8618                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
8619                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
8620
8621                 { NULL, 0xffffffff, 0, 0 }
8622         };
8623
8624         if (!netif_running(bp->dev))
8625                 return rc;
8626
8627         /* Go through all the memories */
8628         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8629                 for (j = 0; j < mem_tbl[i].size; j++)
8630                         REG_RD(bp, mem_tbl[i].offset + j*4);
8631
8632         /* Check the parity status */
8633         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8634                 val = REG_RD(bp, prty_tbl[i].offset);
8635                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8636                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
8637                         DP(NETIF_MSG_HW,
8638                            "%s is 0x%x\n", prty_tbl[i].name, val);
8639                         goto test_mem_exit;
8640                 }
8641         }
8642
8643         rc = 0;
8644
8645 test_mem_exit:
8646         return rc;
8647 }
8648
8649 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8650 {
8651         int cnt = 1000;
8652
8653         if (link_up)
8654                 while (bnx2x_link_test(bp) && cnt--)
8655                         msleep(10);
8656 }
8657
8658 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8659 {
8660         unsigned int pkt_size, num_pkts, i;
8661         struct sk_buff *skb;
8662         unsigned char *packet;
8663         struct bnx2x_fastpath *fp = &bp->fp[0];
8664         u16 tx_start_idx, tx_idx;
8665         u16 rx_start_idx, rx_idx;
8666         u16 pkt_prod;
8667         struct sw_tx_bd *tx_buf;
8668         struct eth_tx_bd *tx_bd;
8669         dma_addr_t mapping;
8670         union eth_rx_cqe *cqe;
8671         u8 cqe_fp_flags;
8672         struct sw_rx_bd *rx_buf;
8673         u16 len;
8674         int rc = -ENODEV;
8675
8676         if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8677                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8678                 bnx2x_acquire_phy_lock(bp);
8679                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8680                 bnx2x_release_phy_lock(bp);
8681
8682         } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8683                 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8684                 bnx2x_acquire_phy_lock(bp);
8685                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8686                 bnx2x_release_phy_lock(bp);
8687                 /* wait until link state is restored */
8688                 bnx2x_wait_for_link(bp, link_up);
8689
8690         } else
8691                 return -EINVAL;
8692
8693         pkt_size = 1514;
8694         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8695         if (!skb) {
8696                 rc = -ENOMEM;
8697                 goto test_loopback_exit;
8698         }
8699         packet = skb_put(skb, pkt_size);
8700         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8701         memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8702         for (i = ETH_HLEN; i < pkt_size; i++)
8703                 packet[i] = (unsigned char) (i & 0xff);
8704
8705         num_pkts = 0;
8706         tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8707         rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8708
8709         pkt_prod = fp->tx_pkt_prod++;
8710         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8711         tx_buf->first_bd = fp->tx_bd_prod;
8712         tx_buf->skb = skb;
8713
8714         tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8715         mapping = pci_map_single(bp->pdev, skb->data,
8716                                  skb_headlen(skb), PCI_DMA_TODEVICE);
8717         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8718         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8719         tx_bd->nbd = cpu_to_le16(1);
8720         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8721         tx_bd->vlan = cpu_to_le16(pkt_prod);
8722         tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8723                                        ETH_TX_BD_FLAGS_END_BD);
8724         tx_bd->general_data = ((UNICAST_ADDRESS <<
8725                                 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8726
8727         fp->hw_tx_prods->bds_prod =
8728                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8729         mb(); /* FW restriction: must not reorder writing nbd and packets */
8730         fp->hw_tx_prods->packets_prod =
8731                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8732         DOORBELL(bp, FP_IDX(fp), 0);
8733
8734         mmiowb();
8735
8736         num_pkts++;
8737         fp->tx_bd_prod++;
8738         bp->dev->trans_start = jiffies;
8739
8740         udelay(100);
8741
8742         tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8743         if (tx_idx != tx_start_idx + num_pkts)
8744                 goto test_loopback_exit;
8745
8746         rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8747         if (rx_idx != rx_start_idx + num_pkts)
8748                 goto test_loopback_exit;
8749
8750         cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8751         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8752         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8753                 goto test_loopback_rx_exit;
8754
8755         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8756         if (len != pkt_size)
8757                 goto test_loopback_rx_exit;
8758
8759         rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8760         skb = rx_buf->skb;
8761         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8762         for (i = ETH_HLEN; i < pkt_size; i++)
8763                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8764                         goto test_loopback_rx_exit;
8765
8766         rc = 0;
8767
8768 test_loopback_rx_exit:
8769
8770         fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8771         fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8772         fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8773         fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8774
8775         /* Update producers */
8776         bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8777                              fp->rx_sge_prod);
8778         mmiowb(); /* keep prod updates ordered */
8779
8780 test_loopback_exit:
8781         bp->link_params.loopback_mode = LOOPBACK_NONE;
8782
8783         return rc;
8784 }
8785
8786 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8787 {
8788         int rc = 0;
8789
8790         if (!netif_running(bp->dev))
8791                 return BNX2X_LOOPBACK_FAILED;
8792
8793         bnx2x_netif_stop(bp, 1);
8794
8795         if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8796                 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8797                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8798         }
8799
8800         if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8801                 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8802                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8803         }
8804
8805         bnx2x_netif_start(bp);
8806
8807         return rc;
8808 }
8809
8810 #define CRC32_RESIDUAL                  0xdebb20e3
8811
8812 static int bnx2x_test_nvram(struct bnx2x *bp)
8813 {
8814         static const struct {
8815                 int offset;
8816                 int size;
8817         } nvram_tbl[] = {
8818                 {     0,  0x14 }, /* bootstrap */
8819                 {  0x14,  0xec }, /* dir */
8820                 { 0x100, 0x350 }, /* manuf_info */
8821                 { 0x450,  0xf0 }, /* feature_info */
8822                 { 0x640,  0x64 }, /* upgrade_key_info */
8823                 { 0x6a4,  0x64 },
8824                 { 0x708,  0x70 }, /* manuf_key_info */
8825                 { 0x778,  0x70 },
8826                 {     0,     0 }
8827         };
8828         u32 buf[0x350 / 4];
8829         u8 *data = (u8 *)buf;
8830         int i, rc;
8831         u32 magic, csum;
8832
8833         rc = bnx2x_nvram_read(bp, 0, data, 4);
8834         if (rc) {
8835                 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8836                 goto test_nvram_exit;
8837         }
8838
8839         magic = be32_to_cpu(buf[0]);
8840         if (magic != 0x669955aa) {
8841                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8842                 rc = -ENODEV;
8843                 goto test_nvram_exit;
8844         }
8845
8846         for (i = 0; nvram_tbl[i].size; i++) {
8847
8848                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8849                                       nvram_tbl[i].size);
8850                 if (rc) {
8851                         DP(NETIF_MSG_PROBE,
8852                            "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8853                         goto test_nvram_exit;
8854                 }
8855
8856                 csum = ether_crc_le(nvram_tbl[i].size, data);
8857                 if (csum != CRC32_RESIDUAL) {
8858                         DP(NETIF_MSG_PROBE,
8859                            "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8860                         rc = -ENODEV;
8861                         goto test_nvram_exit;
8862                 }
8863         }
8864
8865 test_nvram_exit:
8866         return rc;
8867 }
8868
8869 static int bnx2x_test_intr(struct bnx2x *bp)
8870 {
8871         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8872         int i, rc;
8873
8874         if (!netif_running(bp->dev))
8875                 return -ENODEV;
8876
8877         config->hdr.length_6b = 0;
8878         config->hdr.offset = 0;
8879         config->hdr.client_id = BP_CL_ID(bp);
8880         config->hdr.reserved1 = 0;
8881
8882         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8883                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8884                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8885         if (rc == 0) {
8886                 bp->set_mac_pending++;
8887                 for (i = 0; i < 10; i++) {
8888                         if (!bp->set_mac_pending)
8889                                 break;
8890                         msleep_interruptible(10);
8891                 }
8892                 if (i == 10)
8893                         rc = -ENODEV;
8894         }
8895
8896         return rc;
8897 }
8898
8899 static void bnx2x_self_test(struct net_device *dev,
8900                             struct ethtool_test *etest, u64 *buf)
8901 {
8902         struct bnx2x *bp = netdev_priv(dev);
8903
8904         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8905
8906         if (!netif_running(dev))
8907                 return;
8908
8909         /* offline tests are not supported in MF mode */
8910         if (IS_E1HMF(bp))
8911                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8912
8913         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8914                 u8 link_up;
8915
8916                 link_up = bp->link_vars.link_up;
8917                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8918                 bnx2x_nic_load(bp, LOAD_DIAG);
8919                 /* wait until link state is restored */
8920                 bnx2x_wait_for_link(bp, link_up);
8921
8922                 if (bnx2x_test_registers(bp) != 0) {
8923                         buf[0] = 1;
8924                         etest->flags |= ETH_TEST_FL_FAILED;
8925                 }
8926                 if (bnx2x_test_memory(bp) != 0) {
8927                         buf[1] = 1;
8928                         etest->flags |= ETH_TEST_FL_FAILED;
8929                 }
8930                 buf[2] = bnx2x_test_loopback(bp, link_up);
8931                 if (buf[2] != 0)
8932                         etest->flags |= ETH_TEST_FL_FAILED;
8933
8934                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8935                 bnx2x_nic_load(bp, LOAD_NORMAL);
8936                 /* wait until link state is restored */
8937                 bnx2x_wait_for_link(bp, link_up);
8938         }
8939         if (bnx2x_test_nvram(bp) != 0) {
8940                 buf[3] = 1;
8941                 etest->flags |= ETH_TEST_FL_FAILED;
8942         }
8943         if (bnx2x_test_intr(bp) != 0) {
8944                 buf[4] = 1;
8945                 etest->flags |= ETH_TEST_FL_FAILED;
8946         }
8947         if (bp->port.pmf)
8948                 if (bnx2x_link_test(bp) != 0) {
8949                         buf[5] = 1;
8950                         etest->flags |= ETH_TEST_FL_FAILED;
8951                 }
8952         buf[7] = bnx2x_mc_assert(bp);
8953         if (buf[7] != 0)
8954                 etest->flags |= ETH_TEST_FL_FAILED;
8955
8956 #ifdef BNX2X_EXTRA_DEBUG
8957         bnx2x_panic_dump(bp);
8958 #endif
8959 }
8960
8961 static const struct {
8962         long offset;
8963         int size;
8964         u32 flags;
8965 #define STATS_FLAGS_PORT                1
8966 #define STATS_FLAGS_FUNC                2
8967         u8 string[ETH_GSTRING_LEN];
8968 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
8969 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8970                                 8, STATS_FLAGS_FUNC, "rx_bytes" },
8971         { STATS_OFFSET32(error_bytes_received_hi),
8972                                 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8973         { STATS_OFFSET32(total_bytes_transmitted_hi),
8974                                 8, STATS_FLAGS_FUNC, "tx_bytes" },
8975         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8976                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
8977         { STATS_OFFSET32(total_unicast_packets_received_hi),
8978                                 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
8979         { STATS_OFFSET32(total_multicast_packets_received_hi),
8980                                 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
8981         { STATS_OFFSET32(total_broadcast_packets_received_hi),
8982                                 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
8983         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8984                                 8, STATS_FLAGS_FUNC, "tx_packets" },
8985         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
8986                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
8987 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
8988                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
8989         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
8990                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
8991         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
8992                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
8993         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
8994                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
8995         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
8996                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
8997         { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
8998                                 8, STATS_FLAGS_PORT, "tx_deferred" },
8999         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9000                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9001         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9002                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9003         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9004                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9005         { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9006                                 8, STATS_FLAGS_PORT, "rx_fragments" },
9007 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9008                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
9009         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9010                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9011         { STATS_OFFSET32(jabber_packets_received),
9012                                 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
9013         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9014                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9015         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9016                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9017         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9018                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9019         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9020                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9021         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9022                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9023         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9024                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9025         { STATS_OFFSET32(etherstatspktsover1522octets_hi),
9026                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9027 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
9028                                 8, STATS_FLAGS_PORT, "rx_xon_frames" },
9029         { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
9030                                 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9031         { STATS_OFFSET32(tx_stat_outxonsent_hi),
9032                                 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9033         { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9034                                 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
9035         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9036                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9037         { STATS_OFFSET32(mac_filter_discard),
9038                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9039         { STATS_OFFSET32(no_buff_discard),
9040                                 4, STATS_FLAGS_FUNC, "rx_discards" },
9041         { STATS_OFFSET32(xxoverflow_discard),
9042                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9043         { STATS_OFFSET32(brb_drop_hi),
9044                                 8, STATS_FLAGS_PORT, "brb_discard" },
9045         { STATS_OFFSET32(brb_truncate_hi),
9046                                 8, STATS_FLAGS_PORT, "brb_truncate" },
9047 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9048                                 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9049         { STATS_OFFSET32(rx_skb_alloc_failed),
9050                                 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9051 /* 42 */{ STATS_OFFSET32(hw_csum_err),
9052                                 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
9053 };
9054
9055 #define IS_NOT_E1HMF_STAT(bp, i) \
9056                 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9057
9058 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9059 {
9060         struct bnx2x *bp = netdev_priv(dev);
9061         int i, j;
9062
9063         switch (stringset) {
9064         case ETH_SS_STATS:
9065                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9066                         if (IS_NOT_E1HMF_STAT(bp, i))
9067                                 continue;
9068                         strcpy(buf + j*ETH_GSTRING_LEN,
9069                                bnx2x_stats_arr[i].string);
9070                         j++;
9071                 }
9072                 break;
9073
9074         case ETH_SS_TEST:
9075                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9076                 break;
9077         }
9078 }
9079
9080 static int bnx2x_get_stats_count(struct net_device *dev)
9081 {
9082         struct bnx2x *bp = netdev_priv(dev);
9083         int i, num_stats = 0;
9084
9085         for (i = 0; i < BNX2X_NUM_STATS; i++) {
9086                 if (IS_NOT_E1HMF_STAT(bp, i))
9087                         continue;
9088                 num_stats++;
9089         }
9090         return num_stats;
9091 }
9092
9093 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9094                                     struct ethtool_stats *stats, u64 *buf)
9095 {
9096         struct bnx2x *bp = netdev_priv(dev);
9097         u32 *hw_stats = (u32 *)&bp->eth_stats;
9098         int i, j;
9099
9100         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9101                 if (IS_NOT_E1HMF_STAT(bp, i))
9102                         continue;
9103
9104                 if (bnx2x_stats_arr[i].size == 0) {
9105                         /* skip this counter */
9106                         buf[j] = 0;
9107                         j++;
9108                         continue;
9109                 }
9110                 if (bnx2x_stats_arr[i].size == 4) {
9111                         /* 4-byte counter */
9112                         buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9113                         j++;
9114                         continue;
9115                 }
9116                 /* 8-byte counter */
9117                 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9118                                   *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9119                 j++;
9120         }
9121 }
9122
9123 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9124 {
9125         struct bnx2x *bp = netdev_priv(dev);
9126         int port = BP_PORT(bp);
9127         int i;
9128
9129         if (!netif_running(dev))
9130                 return 0;
9131
9132         if (!bp->port.pmf)
9133                 return 0;
9134
9135         if (data == 0)
9136                 data = 2;
9137
9138         for (i = 0; i < (data * 2); i++) {
9139                 if ((i % 2) == 0)
9140                         bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9141                                       bp->link_params.hw_led_mode,
9142                                       bp->link_params.chip_id);
9143                 else
9144                         bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9145                                       bp->link_params.hw_led_mode,
9146                                       bp->link_params.chip_id);
9147
9148                 msleep_interruptible(500);
9149                 if (signal_pending(current))
9150                         break;
9151         }
9152
9153         if (bp->link_vars.link_up)
9154                 bnx2x_set_led(bp, port, LED_MODE_OPER,
9155                               bp->link_vars.line_speed,
9156                               bp->link_params.hw_led_mode,
9157                               bp->link_params.chip_id);
9158
9159         return 0;
9160 }
9161
9162 static struct ethtool_ops bnx2x_ethtool_ops = {
9163         .get_settings           = bnx2x_get_settings,
9164         .set_settings           = bnx2x_set_settings,
9165         .get_drvinfo            = bnx2x_get_drvinfo,
9166         .get_wol                = bnx2x_get_wol,
9167         .set_wol                = bnx2x_set_wol,
9168         .get_msglevel           = bnx2x_get_msglevel,
9169         .set_msglevel           = bnx2x_set_msglevel,
9170         .nway_reset             = bnx2x_nway_reset,
9171         .get_link               = ethtool_op_get_link,
9172         .get_eeprom_len         = bnx2x_get_eeprom_len,
9173         .get_eeprom             = bnx2x_get_eeprom,
9174         .set_eeprom             = bnx2x_set_eeprom,
9175         .get_coalesce           = bnx2x_get_coalesce,
9176         .set_coalesce           = bnx2x_set_coalesce,
9177         .get_ringparam          = bnx2x_get_ringparam,
9178         .set_ringparam          = bnx2x_set_ringparam,
9179         .get_pauseparam         = bnx2x_get_pauseparam,
9180         .set_pauseparam         = bnx2x_set_pauseparam,
9181         .get_rx_csum            = bnx2x_get_rx_csum,
9182         .set_rx_csum            = bnx2x_set_rx_csum,
9183         .get_tx_csum            = ethtool_op_get_tx_csum,
9184         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
9185         .set_flags              = bnx2x_set_flags,
9186         .get_flags              = ethtool_op_get_flags,
9187         .get_sg                 = ethtool_op_get_sg,
9188         .set_sg                 = ethtool_op_set_sg,
9189         .get_tso                = ethtool_op_get_tso,
9190         .set_tso                = bnx2x_set_tso,
9191         .self_test_count        = bnx2x_self_test_count,
9192         .self_test              = bnx2x_self_test,
9193         .get_strings            = bnx2x_get_strings,
9194         .phys_id                = bnx2x_phys_id,
9195         .get_stats_count        = bnx2x_get_stats_count,
9196         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
9197 };
9198
9199 /* end of ethtool_ops */
9200
9201 /****************************************************************************
9202 * General service functions
9203 ****************************************************************************/
9204
9205 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9206 {
9207         u16 pmcsr;
9208
9209         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9210
9211         switch (state) {
9212         case PCI_D0:
9213                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9214                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9215                                        PCI_PM_CTRL_PME_STATUS));
9216
9217                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9218                         /* delay required during transition out of D3hot */
9219                         msleep(20);
9220                 break;
9221
9222         case PCI_D3hot:
9223                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9224                 pmcsr |= 3;
9225
9226                 if (bp->wol)
9227                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9228
9229                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9230                                       pmcsr);
9231
9232                 /* No more memory access after this point until
9233                 * device is brought back to D0.
9234                 */
9235                 break;
9236
9237         default:
9238                 return -EINVAL;
9239         }
9240         return 0;
9241 }
9242
9243 /*
9244  * net_device service functions
9245  */
9246
9247 static int bnx2x_poll(struct napi_struct *napi, int budget)
9248 {
9249         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9250                                                  napi);
9251         struct bnx2x *bp = fp->bp;
9252         int work_done = 0;
9253         u16 rx_cons_sb;
9254
9255 #ifdef BNX2X_STOP_ON_ERROR
9256         if (unlikely(bp->panic))
9257                 goto poll_panic;
9258 #endif
9259
9260         prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9261         prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9262         prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9263
9264         bnx2x_update_fpsb_idx(fp);
9265
9266         if (BNX2X_HAS_TX_WORK(fp))
9267                 bnx2x_tx_int(fp, budget);
9268
9269         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9270         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9271                 rx_cons_sb++;
9272         if (BNX2X_HAS_RX_WORK(fp))
9273                 work_done = bnx2x_rx_int(fp, budget);
9274
9275         rmb(); /* BNX2X_HAS_WORK() reads the status block */
9276         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9277         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9278                 rx_cons_sb++;
9279
9280         /* must not complete if we consumed full budget */
9281         if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9282
9283 #ifdef BNX2X_STOP_ON_ERROR
9284 poll_panic:
9285 #endif
9286                 netif_rx_complete(napi);
9287
9288                 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9289                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9290                 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9291                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9292         }
9293         return work_done;
9294 }
9295
9296
9297 /* we split the first BD into headers and data BDs
9298  * to ease the pain of our fellow microcode engineers
9299  * we use one mapping for both BDs
9300  * So far this has only been observed to happen
9301  * in Other Operating Systems(TM)
9302  */
9303 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9304                                    struct bnx2x_fastpath *fp,
9305                                    struct eth_tx_bd **tx_bd, u16 hlen,
9306                                    u16 bd_prod, int nbd)
9307 {
9308         struct eth_tx_bd *h_tx_bd = *tx_bd;
9309         struct eth_tx_bd *d_tx_bd;
9310         dma_addr_t mapping;
9311         int old_len = le16_to_cpu(h_tx_bd->nbytes);
9312
9313         /* first fix first BD */
9314         h_tx_bd->nbd = cpu_to_le16(nbd);
9315         h_tx_bd->nbytes = cpu_to_le16(hlen);
9316
9317         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9318            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9319            h_tx_bd->addr_lo, h_tx_bd->nbd);
9320
9321         /* now get a new data BD
9322          * (after the pbd) and fill it */
9323         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9324         d_tx_bd = &fp->tx_desc_ring[bd_prod];
9325
9326         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9327                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9328
9329         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9330         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9331         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9332         d_tx_bd->vlan = 0;
9333         /* this marks the BD as one that has no individual mapping
9334          * the FW ignores this flag in a BD not marked start
9335          */
9336         d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9337         DP(NETIF_MSG_TX_QUEUED,
9338            "TSO split data size is %d (%x:%x)\n",
9339            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9340
9341         /* update tx_bd for marking the last BD flag */
9342         *tx_bd = d_tx_bd;
9343
9344         return bd_prod;
9345 }
9346
9347 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9348 {
9349         if (fix > 0)
9350                 csum = (u16) ~csum_fold(csum_sub(csum,
9351                                 csum_partial(t_header - fix, fix, 0)));
9352
9353         else if (fix < 0)
9354                 csum = (u16) ~csum_fold(csum_add(csum,
9355                                 csum_partial(t_header, -fix, 0)));
9356
9357         return swab16(csum);
9358 }
9359
9360 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9361 {
9362         u32 rc;
9363
9364         if (skb->ip_summed != CHECKSUM_PARTIAL)
9365                 rc = XMIT_PLAIN;
9366
9367         else {
9368                 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9369                         rc = XMIT_CSUM_V6;
9370                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9371                                 rc |= XMIT_CSUM_TCP;
9372
9373                 } else {
9374                         rc = XMIT_CSUM_V4;
9375                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9376                                 rc |= XMIT_CSUM_TCP;
9377                 }
9378         }
9379
9380         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9381                 rc |= XMIT_GSO_V4;
9382
9383         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9384                 rc |= XMIT_GSO_V6;
9385
9386         return rc;
9387 }
9388
9389 /* check if packet requires linearization (packet is too fragmented) */
9390 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9391                              u32 xmit_type)
9392 {
9393         int to_copy = 0;
9394         int hlen = 0;
9395         int first_bd_sz = 0;
9396
9397         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9398         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9399
9400                 if (xmit_type & XMIT_GSO) {
9401                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9402                         /* Check if LSO packet needs to be copied:
9403                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9404                         int wnd_size = MAX_FETCH_BD - 3;
9405                         /* Number of windows to check */
9406                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9407                         int wnd_idx = 0;
9408                         int frag_idx = 0;
9409                         u32 wnd_sum = 0;
9410
9411                         /* Headers length */
9412                         hlen = (int)(skb_transport_header(skb) - skb->data) +
9413                                 tcp_hdrlen(skb);
9414
9415                         /* Amount of data (w/o headers) on linear part of SKB*/
9416                         first_bd_sz = skb_headlen(skb) - hlen;
9417
9418                         wnd_sum  = first_bd_sz;
9419
9420                         /* Calculate the first sum - it's special */
9421                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9422                                 wnd_sum +=
9423                                         skb_shinfo(skb)->frags[frag_idx].size;
9424
9425                         /* If there was data on linear skb data - check it */
9426                         if (first_bd_sz > 0) {
9427                                 if (unlikely(wnd_sum < lso_mss)) {
9428                                         to_copy = 1;
9429                                         goto exit_lbl;
9430                                 }
9431
9432                                 wnd_sum -= first_bd_sz;
9433                         }
9434
9435                         /* Others are easier: run through the frag list and
9436                            check all windows */
9437                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9438                                 wnd_sum +=
9439                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9440
9441                                 if (unlikely(wnd_sum < lso_mss)) {
9442                                         to_copy = 1;
9443                                         break;
9444                                 }
9445                                 wnd_sum -=
9446                                         skb_shinfo(skb)->frags[wnd_idx].size;
9447                         }
9448
9449                 } else {
9450                         /* in non-LSO too fragmented packet should always
9451                            be linearized */
9452                         to_copy = 1;
9453                 }
9454         }
9455
9456 exit_lbl:
9457         if (unlikely(to_copy))
9458                 DP(NETIF_MSG_TX_QUEUED,
9459                    "Linearization IS REQUIRED for %s packet. "
9460                    "num_frags %d  hlen %d  first_bd_sz %d\n",
9461                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9462                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9463
9464         return to_copy;
9465 }
9466
9467 /* called with netif_tx_lock
9468  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9469  * netif_wake_queue()
9470  */
9471 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9472 {
9473         struct bnx2x *bp = netdev_priv(dev);
9474         struct bnx2x_fastpath *fp;
9475         struct sw_tx_bd *tx_buf;
9476         struct eth_tx_bd *tx_bd;
9477         struct eth_tx_parse_bd *pbd = NULL;
9478         u16 pkt_prod, bd_prod;
9479         int nbd, fp_index;
9480         dma_addr_t mapping;
9481         u32 xmit_type = bnx2x_xmit_type(bp, skb);
9482         int vlan_off = (bp->e1hov ? 4 : 0);
9483         int i;
9484         u8 hlen = 0;
9485
9486 #ifdef BNX2X_STOP_ON_ERROR
9487         if (unlikely(bp->panic))
9488                 return NETDEV_TX_BUSY;
9489 #endif
9490
9491         fp_index = (smp_processor_id() % bp->num_queues);
9492         fp = &bp->fp[fp_index];
9493
9494         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
9495                 bp->eth_stats.driver_xoff++,
9496                 netif_stop_queue(dev);
9497                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9498                 return NETDEV_TX_BUSY;
9499         }
9500
9501         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
9502            "  gso type %x  xmit_type %x\n",
9503            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9504            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9505
9506         /* First, check if we need to linearize the skb
9507            (due to FW restrictions) */
9508         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9509                 /* Statistics of linearization */
9510                 bp->lin_cnt++;
9511                 if (skb_linearize(skb) != 0) {
9512                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9513                            "silently dropping this SKB\n");
9514                         dev_kfree_skb_any(skb);
9515                         return NETDEV_TX_OK;
9516                 }
9517         }
9518
9519         /*
9520         Please read carefully. First we use one BD which we mark as start,
9521         then for TSO or xsum we have a parsing info BD,
9522         and only then we have the rest of the TSO BDs.
9523         (don't forget to mark the last one as last,
9524         and to unmap only AFTER you write to the BD ...)
9525         And above all, all pdb sizes are in words - NOT DWORDS!
9526         */
9527
9528         pkt_prod = fp->tx_pkt_prod++;
9529         bd_prod = TX_BD(fp->tx_bd_prod);
9530
9531         /* get a tx_buf and first BD */
9532         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9533         tx_bd = &fp->tx_desc_ring[bd_prod];
9534
9535         tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9536         tx_bd->general_data = (UNICAST_ADDRESS <<
9537                                ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9538         /* header nbd */
9539         tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
9540
9541         /* remember the first BD of the packet */
9542         tx_buf->first_bd = fp->tx_bd_prod;
9543         tx_buf->skb = skb;
9544
9545         DP(NETIF_MSG_TX_QUEUED,
9546            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
9547            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9548
9549         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9550                 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9551                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9552                 vlan_off += 4;
9553         } else
9554                 tx_bd->vlan = cpu_to_le16(pkt_prod);
9555
9556         if (xmit_type) {
9557                 /* turn on parsing and get a BD */
9558                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9559                 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9560
9561                 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9562         }
9563
9564         if (xmit_type & XMIT_CSUM) {
9565                 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9566
9567                 /* for now NS flag is not used in Linux */
9568                 pbd->global_data = (hlen |
9569                                     ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9570                                      ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9571
9572                 pbd->ip_hlen = (skb_transport_header(skb) -
9573                                 skb_network_header(skb)) / 2;
9574
9575                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9576
9577                 pbd->total_hlen = cpu_to_le16(hlen);
9578                 hlen = hlen*2 - vlan_off;
9579
9580                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9581
9582                 if (xmit_type & XMIT_CSUM_V4)
9583                         tx_bd->bd_flags.as_bitfield |=
9584                                                 ETH_TX_BD_FLAGS_IP_CSUM;
9585                 else
9586                         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9587
9588                 if (xmit_type & XMIT_CSUM_TCP) {
9589                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9590
9591                 } else {
9592                         s8 fix = SKB_CS_OFF(skb); /* signed! */
9593
9594                         pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9595                         pbd->cs_offset = fix / 2;
9596
9597                         DP(NETIF_MSG_TX_QUEUED,
9598                            "hlen %d  offset %d  fix %d  csum before fix %x\n",
9599                            le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9600                            SKB_CS(skb));
9601
9602                         /* HW bug: fixup the CSUM */
9603                         pbd->tcp_pseudo_csum =
9604                                 bnx2x_csum_fix(skb_transport_header(skb),
9605                                                SKB_CS(skb), fix);
9606
9607                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9608                            pbd->tcp_pseudo_csum);
9609                 }
9610         }
9611
9612         mapping = pci_map_single(bp->pdev, skb->data,
9613                                  skb_headlen(skb), PCI_DMA_TODEVICE);
9614
9615         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9616         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9617         nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
9618         tx_bd->nbd = cpu_to_le16(nbd);
9619         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9620
9621         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
9622            "  nbytes %d  flags %x  vlan %x\n",
9623            tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9624            le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9625            le16_to_cpu(tx_bd->vlan));
9626
9627         if (xmit_type & XMIT_GSO) {
9628
9629                 DP(NETIF_MSG_TX_QUEUED,
9630                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
9631                    skb->len, hlen, skb_headlen(skb),
9632                    skb_shinfo(skb)->gso_size);
9633
9634                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9635
9636                 if (unlikely(skb_headlen(skb) > hlen))
9637                         bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9638                                                  bd_prod, ++nbd);
9639
9640                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9641                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9642                 pbd->tcp_flags = pbd_tcp_flags(skb);
9643
9644                 if (xmit_type & XMIT_GSO_V4) {
9645                         pbd->ip_id = swab16(ip_hdr(skb)->id);
9646                         pbd->tcp_pseudo_csum =
9647                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9648                                                           ip_hdr(skb)->daddr,
9649                                                           0, IPPROTO_TCP, 0));
9650
9651                 } else
9652                         pbd->tcp_pseudo_csum =
9653                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9654                                                         &ipv6_hdr(skb)->daddr,
9655                                                         0, IPPROTO_TCP, 0));
9656
9657                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9658         }
9659
9660         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9661                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9662
9663                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9664                 tx_bd = &fp->tx_desc_ring[bd_prod];
9665
9666                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9667                                        frag->size, PCI_DMA_TODEVICE);
9668
9669                 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9670                 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9671                 tx_bd->nbytes = cpu_to_le16(frag->size);
9672                 tx_bd->vlan = cpu_to_le16(pkt_prod);
9673                 tx_bd->bd_flags.as_bitfield = 0;
9674
9675                 DP(NETIF_MSG_TX_QUEUED,
9676                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d  flags %x\n",
9677                    i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9678                    le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9679         }
9680
9681         /* now at last mark the BD as the last BD */
9682         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9683
9684         DP(NETIF_MSG_TX_QUEUED, "last bd @%p  flags %x\n",
9685            tx_bd, tx_bd->bd_flags.as_bitfield);
9686
9687         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9688
9689         /* now send a tx doorbell, counting the next BD
9690          * if the packet contains or ends with it
9691          */
9692         if (TX_BD_POFF(bd_prod) < nbd)
9693                 nbd++;
9694
9695         if (pbd)
9696                 DP(NETIF_MSG_TX_QUEUED,
9697                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
9698                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
9699                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9700                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9701                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9702
9703         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
9704
9705         fp->hw_tx_prods->bds_prod =
9706                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9707         mb(); /* FW restriction: must not reorder writing nbd and packets */
9708         fp->hw_tx_prods->packets_prod =
9709                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9710         DOORBELL(bp, FP_IDX(fp), 0);
9711
9712         mmiowb();
9713
9714         fp->tx_bd_prod += nbd;
9715         dev->trans_start = jiffies;
9716
9717         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9718                 netif_stop_queue(dev);
9719                 bp->eth_stats.driver_xoff++;
9720                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9721                         netif_wake_queue(dev);
9722         }
9723         fp->tx_pkt++;
9724
9725         return NETDEV_TX_OK;
9726 }
9727
9728 /* called with rtnl_lock */
9729 static int bnx2x_open(struct net_device *dev)
9730 {
9731         struct bnx2x *bp = netdev_priv(dev);
9732
9733         bnx2x_set_power_state(bp, PCI_D0);
9734
9735         return bnx2x_nic_load(bp, LOAD_OPEN);
9736 }
9737
9738 /* called with rtnl_lock */
9739 static int bnx2x_close(struct net_device *dev)
9740 {
9741         struct bnx2x *bp = netdev_priv(dev);
9742
9743         /* Unload the driver, release IRQs */
9744         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9745         if (atomic_read(&bp->pdev->enable_cnt) == 1)
9746                 if (!CHIP_REV_IS_SLOW(bp))
9747                         bnx2x_set_power_state(bp, PCI_D3hot);
9748
9749         return 0;
9750 }
9751
9752 /* called with netif_tx_lock from set_multicast */
9753 static void bnx2x_set_rx_mode(struct net_device *dev)
9754 {
9755         struct bnx2x *bp = netdev_priv(dev);
9756         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9757         int port = BP_PORT(bp);
9758
9759         if (bp->state != BNX2X_STATE_OPEN) {
9760                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9761                 return;
9762         }
9763
9764         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9765
9766         if (dev->flags & IFF_PROMISC)
9767                 rx_mode = BNX2X_RX_MODE_PROMISC;
9768
9769         else if ((dev->flags & IFF_ALLMULTI) ||
9770                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9771                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9772
9773         else { /* some multicasts */
9774                 if (CHIP_IS_E1(bp)) {
9775                         int i, old, offset;
9776                         struct dev_mc_list *mclist;
9777                         struct mac_configuration_cmd *config =
9778                                                 bnx2x_sp(bp, mcast_config);
9779
9780                         for (i = 0, mclist = dev->mc_list;
9781                              mclist && (i < dev->mc_count);
9782                              i++, mclist = mclist->next) {
9783
9784                                 config->config_table[i].
9785                                         cam_entry.msb_mac_addr =
9786                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
9787                                 config->config_table[i].
9788                                         cam_entry.middle_mac_addr =
9789                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
9790                                 config->config_table[i].
9791                                         cam_entry.lsb_mac_addr =
9792                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
9793                                 config->config_table[i].cam_entry.flags =
9794                                                         cpu_to_le16(port);
9795                                 config->config_table[i].
9796                                         target_table_entry.flags = 0;
9797                                 config->config_table[i].
9798                                         target_table_entry.client_id = 0;
9799                                 config->config_table[i].
9800                                         target_table_entry.vlan_id = 0;
9801
9802                                 DP(NETIF_MSG_IFUP,
9803                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9804                                    config->config_table[i].
9805                                                 cam_entry.msb_mac_addr,
9806                                    config->config_table[i].
9807                                                 cam_entry.middle_mac_addr,
9808                                    config->config_table[i].
9809                                                 cam_entry.lsb_mac_addr);
9810                         }
9811                         old = config->hdr.length_6b;
9812                         if (old > i) {
9813                                 for (; i < old; i++) {
9814                                         if (CAM_IS_INVALID(config->
9815                                                            config_table[i])) {
9816                                                 i--; /* already invalidated */
9817                                                 break;
9818                                         }
9819                                         /* invalidate */
9820                                         CAM_INVALIDATE(config->
9821                                                        config_table[i]);
9822                                 }
9823                         }
9824
9825                         if (CHIP_REV_IS_SLOW(bp))
9826                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9827                         else
9828                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
9829
9830                         config->hdr.length_6b = i;
9831                         config->hdr.offset = offset;
9832                         config->hdr.client_id = BP_CL_ID(bp);
9833                         config->hdr.reserved1 = 0;
9834
9835                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9836                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9837                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9838                                       0);
9839                 } else { /* E1H */
9840                         /* Accept one or more multicasts */
9841                         struct dev_mc_list *mclist;
9842                         u32 mc_filter[MC_HASH_SIZE];
9843                         u32 crc, bit, regidx;
9844                         int i;
9845
9846                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9847
9848                         for (i = 0, mclist = dev->mc_list;
9849                              mclist && (i < dev->mc_count);
9850                              i++, mclist = mclist->next) {
9851
9852                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
9853                                    mclist->dmi_addr);
9854
9855                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9856                                 bit = (crc >> 24) & 0xff;
9857                                 regidx = bit >> 5;
9858                                 bit &= 0x1f;
9859                                 mc_filter[regidx] |= (1 << bit);
9860                         }
9861
9862                         for (i = 0; i < MC_HASH_SIZE; i++)
9863                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9864                                        mc_filter[i]);
9865                 }
9866         }
9867
9868         bp->rx_mode = rx_mode;
9869         bnx2x_set_storm_rx_mode(bp);
9870 }
9871
9872 /* called with rtnl_lock */
9873 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9874 {
9875         struct sockaddr *addr = p;
9876         struct bnx2x *bp = netdev_priv(dev);
9877
9878         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
9879                 return -EINVAL;
9880
9881         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9882         if (netif_running(dev)) {
9883                 if (CHIP_IS_E1(bp))
9884                         bnx2x_set_mac_addr_e1(bp, 1);
9885                 else
9886                         bnx2x_set_mac_addr_e1h(bp, 1);
9887         }
9888
9889         return 0;
9890 }
9891
9892 /* called with rtnl_lock */
9893 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9894 {
9895         struct mii_ioctl_data *data = if_mii(ifr);
9896         struct bnx2x *bp = netdev_priv(dev);
9897         int port = BP_PORT(bp);
9898         int err;
9899
9900         switch (cmd) {
9901         case SIOCGMIIPHY:
9902                 data->phy_id = bp->port.phy_addr;
9903
9904                 /* fallthrough */
9905
9906         case SIOCGMIIREG: {
9907                 u16 mii_regval;
9908
9909                 if (!netif_running(dev))
9910                         return -EAGAIN;
9911
9912                 mutex_lock(&bp->port.phy_mutex);
9913                 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
9914                                       DEFAULT_PHY_DEV_ADDR,
9915                                       (data->reg_num & 0x1f), &mii_regval);
9916                 data->val_out = mii_regval;
9917                 mutex_unlock(&bp->port.phy_mutex);
9918                 return err;
9919         }
9920
9921         case SIOCSMIIREG:
9922                 if (!capable(CAP_NET_ADMIN))
9923                         return -EPERM;
9924
9925                 if (!netif_running(dev))
9926                         return -EAGAIN;
9927
9928                 mutex_lock(&bp->port.phy_mutex);
9929                 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
9930                                        DEFAULT_PHY_DEV_ADDR,
9931                                        (data->reg_num & 0x1f), data->val_in);
9932                 mutex_unlock(&bp->port.phy_mutex);
9933                 return err;
9934
9935         default:
9936                 /* do nothing */
9937                 break;
9938         }
9939
9940         return -EOPNOTSUPP;
9941 }
9942
9943 /* called with rtnl_lock */
9944 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9945 {
9946         struct bnx2x *bp = netdev_priv(dev);
9947         int rc = 0;
9948
9949         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9950             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9951                 return -EINVAL;
9952
9953         /* This does not race with packet allocation
9954          * because the actual alloc size is
9955          * only updated as part of load
9956          */
9957         dev->mtu = new_mtu;
9958
9959         if (netif_running(dev)) {
9960                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9961                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9962         }
9963
9964         return rc;
9965 }
9966
9967 static void bnx2x_tx_timeout(struct net_device *dev)
9968 {
9969         struct bnx2x *bp = netdev_priv(dev);
9970
9971 #ifdef BNX2X_STOP_ON_ERROR
9972         if (!bp->panic)
9973                 bnx2x_panic();
9974 #endif
9975         /* This allows the netif to be shutdown gracefully before resetting */
9976         schedule_work(&bp->reset_task);
9977 }
9978
9979 #ifdef BCM_VLAN
9980 /* called with rtnl_lock */
9981 static void bnx2x_vlan_rx_register(struct net_device *dev,
9982                                    struct vlan_group *vlgrp)
9983 {
9984         struct bnx2x *bp = netdev_priv(dev);
9985
9986         bp->vlgrp = vlgrp;
9987         if (netif_running(dev))
9988                 bnx2x_set_client_config(bp);
9989 }
9990
9991 #endif
9992
9993 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9994 static void poll_bnx2x(struct net_device *dev)
9995 {
9996         struct bnx2x *bp = netdev_priv(dev);
9997
9998         disable_irq(bp->pdev->irq);
9999         bnx2x_interrupt(bp->pdev->irq, dev);
10000         enable_irq(bp->pdev->irq);
10001 }
10002 #endif
10003
10004 static const struct net_device_ops bnx2x_netdev_ops = {
10005         .ndo_open               = bnx2x_open,
10006         .ndo_stop               = bnx2x_close,
10007         .ndo_start_xmit         = bnx2x_start_xmit,
10008         .ndo_set_multicast_list = bnx2x_set_rx_mode,
10009         .ndo_set_mac_address    = bnx2x_change_mac_addr,
10010         .ndo_validate_addr      = eth_validate_addr,
10011         .ndo_do_ioctl           = bnx2x_ioctl,
10012         .ndo_change_mtu         = bnx2x_change_mtu,
10013         .ndo_tx_timeout         = bnx2x_tx_timeout,
10014 #ifdef BCM_VLAN
10015         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
10016 #endif
10017 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10018         .ndo_poll_controller    = poll_bnx2x,
10019 #endif
10020 };
10021
10022
10023 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10024                                     struct net_device *dev)
10025 {
10026         struct bnx2x *bp;
10027         int rc;
10028
10029         SET_NETDEV_DEV(dev, &pdev->dev);
10030         bp = netdev_priv(dev);
10031
10032         bp->dev = dev;
10033         bp->pdev = pdev;
10034         bp->flags = 0;
10035         bp->func = PCI_FUNC(pdev->devfn);
10036
10037         rc = pci_enable_device(pdev);
10038         if (rc) {
10039                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10040                 goto err_out;
10041         }
10042
10043         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10044                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10045                        " aborting\n");
10046                 rc = -ENODEV;
10047                 goto err_out_disable;
10048         }
10049
10050         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10051                 printk(KERN_ERR PFX "Cannot find second PCI device"
10052                        " base address, aborting\n");
10053                 rc = -ENODEV;
10054                 goto err_out_disable;
10055         }
10056
10057         if (atomic_read(&pdev->enable_cnt) == 1) {
10058                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10059                 if (rc) {
10060                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10061                                " aborting\n");
10062                         goto err_out_disable;
10063                 }
10064
10065                 pci_set_master(pdev);
10066                 pci_save_state(pdev);
10067         }
10068
10069         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10070         if (bp->pm_cap == 0) {
10071                 printk(KERN_ERR PFX "Cannot find power management"
10072                        " capability, aborting\n");
10073                 rc = -EIO;
10074                 goto err_out_release;
10075         }
10076
10077         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10078         if (bp->pcie_cap == 0) {
10079                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10080                        " aborting\n");
10081                 rc = -EIO;
10082                 goto err_out_release;
10083         }
10084
10085         if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10086                 bp->flags |= USING_DAC_FLAG;
10087                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10088                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10089                                " failed, aborting\n");
10090                         rc = -EIO;
10091                         goto err_out_release;
10092                 }
10093
10094         } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10095                 printk(KERN_ERR PFX "System does not support DMA,"
10096                        " aborting\n");
10097                 rc = -EIO;
10098                 goto err_out_release;
10099         }
10100
10101         dev->mem_start = pci_resource_start(pdev, 0);
10102         dev->base_addr = dev->mem_start;
10103         dev->mem_end = pci_resource_end(pdev, 0);
10104
10105         dev->irq = pdev->irq;
10106
10107         bp->regview = pci_ioremap_bar(pdev, 0);
10108         if (!bp->regview) {
10109                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10110                 rc = -ENOMEM;
10111                 goto err_out_release;
10112         }
10113
10114         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10115                                         min_t(u64, BNX2X_DB_SIZE,
10116                                               pci_resource_len(pdev, 2)));
10117         if (!bp->doorbells) {
10118                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10119                 rc = -ENOMEM;
10120                 goto err_out_unmap;
10121         }
10122
10123         bnx2x_set_power_state(bp, PCI_D0);
10124
10125         /* clean indirect addresses */
10126         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10127                                PCICFG_VENDOR_ID_OFFSET);
10128         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10129         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10130         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10131         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10132
10133         dev->watchdog_timeo = TX_TIMEOUT;
10134
10135         dev->netdev_ops = &bnx2x_netdev_ops;
10136         dev->ethtool_ops = &bnx2x_ethtool_ops;
10137         dev->features |= NETIF_F_SG;
10138         dev->features |= NETIF_F_HW_CSUM;
10139         if (bp->flags & USING_DAC_FLAG)
10140                 dev->features |= NETIF_F_HIGHDMA;
10141 #ifdef BCM_VLAN
10142         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10143 #endif
10144         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10145         dev->features |= NETIF_F_TSO6;
10146
10147         return 0;
10148
10149 err_out_unmap:
10150         if (bp->regview) {
10151                 iounmap(bp->regview);
10152                 bp->regview = NULL;
10153         }
10154         if (bp->doorbells) {
10155                 iounmap(bp->doorbells);
10156                 bp->doorbells = NULL;
10157         }
10158
10159 err_out_release:
10160         if (atomic_read(&pdev->enable_cnt) == 1)
10161                 pci_release_regions(pdev);
10162
10163 err_out_disable:
10164         pci_disable_device(pdev);
10165         pci_set_drvdata(pdev, NULL);
10166
10167 err_out:
10168         return rc;
10169 }
10170
10171 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10172 {
10173         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10174
10175         val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10176         return val;
10177 }
10178
10179 /* return value of 1=2.5GHz 2=5GHz */
10180 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10181 {
10182         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10183
10184         val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10185         return val;
10186 }
10187
10188 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10189                                     const struct pci_device_id *ent)
10190 {
10191         static int version_printed;
10192         struct net_device *dev = NULL;
10193         struct bnx2x *bp;
10194         int rc;
10195
10196         if (version_printed++ == 0)
10197                 printk(KERN_INFO "%s", version);
10198
10199         /* dev zeroed in init_etherdev */
10200         dev = alloc_etherdev(sizeof(*bp));
10201         if (!dev) {
10202                 printk(KERN_ERR PFX "Cannot allocate net device\n");
10203                 return -ENOMEM;
10204         }
10205
10206         bp = netdev_priv(dev);
10207         bp->msglevel = debug;
10208
10209         rc = bnx2x_init_dev(pdev, dev);
10210         if (rc < 0) {
10211                 free_netdev(dev);
10212                 return rc;
10213         }
10214
10215         rc = register_netdev(dev);
10216         if (rc) {
10217                 dev_err(&pdev->dev, "Cannot register net device\n");
10218                 goto init_one_exit;
10219         }
10220
10221         pci_set_drvdata(pdev, dev);
10222
10223         rc = bnx2x_init_bp(bp);
10224         if (rc) {
10225                 unregister_netdev(dev);
10226                 goto init_one_exit;
10227         }
10228
10229         netif_carrier_off(dev);
10230
10231         bp->common.name = board_info[ent->driver_data].name;
10232         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10233                " IRQ %d, ", dev->name, bp->common.name,
10234                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10235                bnx2x_get_pcie_width(bp),
10236                (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10237                dev->base_addr, bp->pdev->irq);
10238         printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
10239         return 0;
10240
10241 init_one_exit:
10242         if (bp->regview)
10243                 iounmap(bp->regview);
10244
10245         if (bp->doorbells)
10246                 iounmap(bp->doorbells);
10247
10248         free_netdev(dev);
10249
10250         if (atomic_read(&pdev->enable_cnt) == 1)
10251                 pci_release_regions(pdev);
10252
10253         pci_disable_device(pdev);
10254         pci_set_drvdata(pdev, NULL);
10255
10256         return rc;
10257 }
10258
10259 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10260 {
10261         struct net_device *dev = pci_get_drvdata(pdev);
10262         struct bnx2x *bp;
10263
10264         if (!dev) {
10265                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10266                 return;
10267         }
10268         bp = netdev_priv(dev);
10269
10270         unregister_netdev(dev);
10271
10272         if (bp->regview)
10273                 iounmap(bp->regview);
10274
10275         if (bp->doorbells)
10276                 iounmap(bp->doorbells);
10277
10278         free_netdev(dev);
10279
10280         if (atomic_read(&pdev->enable_cnt) == 1)
10281                 pci_release_regions(pdev);
10282
10283         pci_disable_device(pdev);
10284         pci_set_drvdata(pdev, NULL);
10285 }
10286
10287 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10288 {
10289         struct net_device *dev = pci_get_drvdata(pdev);
10290         struct bnx2x *bp;
10291
10292         if (!dev) {
10293                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10294                 return -ENODEV;
10295         }
10296         bp = netdev_priv(dev);
10297
10298         rtnl_lock();
10299
10300         pci_save_state(pdev);
10301
10302         if (!netif_running(dev)) {
10303                 rtnl_unlock();
10304                 return 0;
10305         }
10306
10307         netif_device_detach(dev);
10308
10309         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10310
10311         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10312
10313         rtnl_unlock();
10314
10315         return 0;
10316 }
10317
10318 static int bnx2x_resume(struct pci_dev *pdev)
10319 {
10320         struct net_device *dev = pci_get_drvdata(pdev);
10321         struct bnx2x *bp;
10322         int rc;
10323
10324         if (!dev) {
10325                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10326                 return -ENODEV;
10327         }
10328         bp = netdev_priv(dev);
10329
10330         rtnl_lock();
10331
10332         pci_restore_state(pdev);
10333
10334         if (!netif_running(dev)) {
10335                 rtnl_unlock();
10336                 return 0;
10337         }
10338
10339         bnx2x_set_power_state(bp, PCI_D0);
10340         netif_device_attach(dev);
10341
10342         rc = bnx2x_nic_load(bp, LOAD_OPEN);
10343
10344         rtnl_unlock();
10345
10346         return rc;
10347 }
10348
10349 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10350 {
10351         int i;
10352
10353         bp->state = BNX2X_STATE_ERROR;
10354
10355         bp->rx_mode = BNX2X_RX_MODE_NONE;
10356
10357         bnx2x_netif_stop(bp, 0);
10358
10359         del_timer_sync(&bp->timer);
10360         bp->stats_state = STATS_STATE_DISABLED;
10361         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10362
10363         /* Release IRQs */
10364         bnx2x_free_irq(bp);
10365
10366         if (CHIP_IS_E1(bp)) {
10367                 struct mac_configuration_cmd *config =
10368                                                 bnx2x_sp(bp, mcast_config);
10369
10370                 for (i = 0; i < config->hdr.length_6b; i++)
10371                         CAM_INVALIDATE(config->config_table[i]);
10372         }
10373
10374         /* Free SKBs, SGEs, TPA pool and driver internals */
10375         bnx2x_free_skbs(bp);
10376         for_each_queue(bp, i)
10377                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10378         bnx2x_free_mem(bp);
10379
10380         bp->state = BNX2X_STATE_CLOSED;
10381
10382         netif_carrier_off(bp->dev);
10383
10384         return 0;
10385 }
10386
10387 static void bnx2x_eeh_recover(struct bnx2x *bp)
10388 {
10389         u32 val;
10390
10391         mutex_init(&bp->port.phy_mutex);
10392
10393         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10394         bp->link_params.shmem_base = bp->common.shmem_base;
10395         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10396
10397         if (!bp->common.shmem_base ||
10398             (bp->common.shmem_base < 0xA0000) ||
10399             (bp->common.shmem_base >= 0xC0000)) {
10400                 BNX2X_DEV_INFO("MCP not active\n");
10401                 bp->flags |= NO_MCP_FLAG;
10402                 return;
10403         }
10404
10405         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10406         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10407                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10408                 BNX2X_ERR("BAD MCP validity signature\n");
10409
10410         if (!BP_NOMCP(bp)) {
10411                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10412                               & DRV_MSG_SEQ_NUMBER_MASK);
10413                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10414         }
10415 }
10416
10417 /**
10418  * bnx2x_io_error_detected - called when PCI error is detected
10419  * @pdev: Pointer to PCI device
10420  * @state: The current pci connection state
10421  *
10422  * This function is called after a PCI bus error affecting
10423  * this device has been detected.
10424  */
10425 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10426                                                 pci_channel_state_t state)
10427 {
10428         struct net_device *dev = pci_get_drvdata(pdev);
10429         struct bnx2x *bp = netdev_priv(dev);
10430
10431         rtnl_lock();
10432
10433         netif_device_detach(dev);
10434
10435         if (netif_running(dev))
10436                 bnx2x_eeh_nic_unload(bp);
10437
10438         pci_disable_device(pdev);
10439
10440         rtnl_unlock();
10441
10442         /* Request a slot reset */
10443         return PCI_ERS_RESULT_NEED_RESET;
10444 }
10445
10446 /**
10447  * bnx2x_io_slot_reset - called after the PCI bus has been reset
10448  * @pdev: Pointer to PCI device
10449  *
10450  * Restart the card from scratch, as if from a cold-boot.
10451  */
10452 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10453 {
10454         struct net_device *dev = pci_get_drvdata(pdev);
10455         struct bnx2x *bp = netdev_priv(dev);
10456
10457         rtnl_lock();
10458
10459         if (pci_enable_device(pdev)) {
10460                 dev_err(&pdev->dev,
10461                         "Cannot re-enable PCI device after reset\n");
10462                 rtnl_unlock();
10463                 return PCI_ERS_RESULT_DISCONNECT;
10464         }
10465
10466         pci_set_master(pdev);
10467         pci_restore_state(pdev);
10468
10469         if (netif_running(dev))
10470                 bnx2x_set_power_state(bp, PCI_D0);
10471
10472         rtnl_unlock();
10473
10474         return PCI_ERS_RESULT_RECOVERED;
10475 }
10476
10477 /**
10478  * bnx2x_io_resume - called when traffic can start flowing again
10479  * @pdev: Pointer to PCI device
10480  *
10481  * This callback is called when the error recovery driver tells us that
10482  * its OK to resume normal operation.
10483  */
10484 static void bnx2x_io_resume(struct pci_dev *pdev)
10485 {
10486         struct net_device *dev = pci_get_drvdata(pdev);
10487         struct bnx2x *bp = netdev_priv(dev);
10488
10489         rtnl_lock();
10490
10491         bnx2x_eeh_recover(bp);
10492
10493         if (netif_running(dev))
10494                 bnx2x_nic_load(bp, LOAD_NORMAL);
10495
10496         netif_device_attach(dev);
10497
10498         rtnl_unlock();
10499 }
10500
10501 static struct pci_error_handlers bnx2x_err_handler = {
10502         .error_detected = bnx2x_io_error_detected,
10503         .slot_reset = bnx2x_io_slot_reset,
10504         .resume = bnx2x_io_resume,
10505 };
10506
10507 static struct pci_driver bnx2x_pci_driver = {
10508         .name        = DRV_MODULE_NAME,
10509         .id_table    = bnx2x_pci_tbl,
10510         .probe       = bnx2x_init_one,
10511         .remove      = __devexit_p(bnx2x_remove_one),
10512         .suspend     = bnx2x_suspend,
10513         .resume      = bnx2x_resume,
10514         .err_handler = &bnx2x_err_handler,
10515 };
10516
10517 static int __init bnx2x_init(void)
10518 {
10519         return pci_register_driver(&bnx2x_pci_driver);
10520 }
10521
10522 static void __exit bnx2x_cleanup(void)
10523 {
10524         pci_unregister_driver(&bnx2x_pci_driver);
10525 }
10526
10527 module_init(bnx2x_init);
10528 module_exit(bnx2x_cleanup);
10529