bnx2x: GPIO accessories
[linux-2.6] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53 #include "bnx2x_reg.h"
54 #include "bnx2x_fw_defs.h"
55 #include "bnx2x_hsi.h"
56 #include "bnx2x_link.h"
57 #include "bnx2x.h"
58 #include "bnx2x_init.h"
59
60 #define DRV_MODULE_VERSION      "1.45.26"
61 #define DRV_MODULE_RELDATE      "2009/01/26"
62 #define BNX2X_BC_VER            0x040200
63
64 /* Time in jiffies before concluding the transmitter is hung */
65 #define TX_TIMEOUT              (5*HZ)
66
67 static char version[] __devinitdata =
68         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
69         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
71 MODULE_AUTHOR("Eliezer Tamir");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
75
76 static int multi_mode = 1;
77 module_param(multi_mode, int, 0);
78
79 static int disable_tpa;
80 static int poll;
81 static int debug;
82 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
83
84 module_param(disable_tpa, int, 0);
85
86 static int int_mode;
87 module_param(int_mode, int, 0);
88 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
89
90 module_param(poll, int, 0);
91 module_param(debug, int, 0);
92 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
93 MODULE_PARM_DESC(poll, "use polling (for debug)");
94 MODULE_PARM_DESC(debug, "default debug msglevel");
95
96 static struct workqueue_struct *bnx2x_wq;
97
98 enum bnx2x_board_type {
99         BCM57710 = 0,
100         BCM57711 = 1,
101         BCM57711E = 2,
102 };
103
104 /* indexed by board_type, above */
105 static struct {
106         char *name;
107 } board_info[] __devinitdata = {
108         { "Broadcom NetXtreme II BCM57710 XGb" },
109         { "Broadcom NetXtreme II BCM57711 XGb" },
110         { "Broadcom NetXtreme II BCM57711E XGb" }
111 };
112
113
114 static const struct pci_device_id bnx2x_pci_tbl[] = {
115         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
117         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
121         { 0 }
122 };
123
124 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
125
126 /****************************************************************************
127 * General service functions
128 ****************************************************************************/
129
130 /* used only at init
131  * locking is done by mcp
132  */
133 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
134 {
135         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138                                PCICFG_VENDOR_ID_OFFSET);
139 }
140
141 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
142 {
143         u32 val;
144
145         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148                                PCICFG_VENDOR_ID_OFFSET);
149
150         return val;
151 }
152
153 static const u32 dmae_reg_go_c[] = {
154         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
158 };
159
160 /* copy command into DMAE command memory and set DMAE command go */
161 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
162                             int idx)
163 {
164         u32 cmd_offset;
165         int i;
166
167         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
170
171                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
173         }
174         REG_WR(bp, dmae_reg_go_c[idx], 1);
175 }
176
177 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
178                       u32 len32)
179 {
180         struct dmae_command *dmae = &bp->init_dmae;
181         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
182         int cnt = 200;
183
184         if (!bp->dmae_ready) {
185                 u32 *data = bnx2x_sp(bp, wb_data[0]);
186
187                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
188                    "  using indirect\n", dst_addr, len32);
189                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
190                 return;
191         }
192
193         mutex_lock(&bp->dmae_mutex);
194
195         memset(dmae, 0, sizeof(struct dmae_command));
196
197         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
200 #ifdef __BIG_ENDIAN
201                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
202 #else
203                         DMAE_CMD_ENDIANITY_DW_SWAP |
204 #endif
205                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
207         dmae->src_addr_lo = U64_LO(dma_addr);
208         dmae->src_addr_hi = U64_HI(dma_addr);
209         dmae->dst_addr_lo = dst_addr >> 2;
210         dmae->dst_addr_hi = 0;
211         dmae->len = len32;
212         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
214         dmae->comp_val = DMAE_COMP_VAL;
215
216         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
217            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
218                     "dst_addr [%x:%08x (%08x)]\n"
219            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
220            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
223         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
224            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
226
227         *wb_comp = 0;
228
229         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
230
231         udelay(5);
232
233         while (*wb_comp != DMAE_COMP_VAL) {
234                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
235
236                 if (!cnt) {
237                         BNX2X_ERR("dmae timeout!\n");
238                         break;
239                 }
240                 cnt--;
241                 /* adjust delay for emulation/FPGA */
242                 if (CHIP_REV_IS_SLOW(bp))
243                         msleep(100);
244                 else
245                         udelay(5);
246         }
247
248         mutex_unlock(&bp->dmae_mutex);
249 }
250
251 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
252 {
253         struct dmae_command *dmae = &bp->init_dmae;
254         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
255         int cnt = 200;
256
257         if (!bp->dmae_ready) {
258                 u32 *data = bnx2x_sp(bp, wb_data[0]);
259                 int i;
260
261                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
262                    "  using indirect\n", src_addr, len32);
263                 for (i = 0; i < len32; i++)
264                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
265                 return;
266         }
267
268         mutex_lock(&bp->dmae_mutex);
269
270         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271         memset(dmae, 0, sizeof(struct dmae_command));
272
273         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
276 #ifdef __BIG_ENDIAN
277                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
278 #else
279                         DMAE_CMD_ENDIANITY_DW_SWAP |
280 #endif
281                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
283         dmae->src_addr_lo = src_addr >> 2;
284         dmae->src_addr_hi = 0;
285         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
287         dmae->len = len32;
288         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
290         dmae->comp_val = DMAE_COMP_VAL;
291
292         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
293            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
294                     "dst_addr [%x:%08x (%08x)]\n"
295            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
296            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
299
300         *wb_comp = 0;
301
302         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
303
304         udelay(5);
305
306         while (*wb_comp != DMAE_COMP_VAL) {
307
308                 if (!cnt) {
309                         BNX2X_ERR("dmae timeout!\n");
310                         break;
311                 }
312                 cnt--;
313                 /* adjust delay for emulation/FPGA */
314                 if (CHIP_REV_IS_SLOW(bp))
315                         msleep(100);
316                 else
317                         udelay(5);
318         }
319         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
320            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
322
323         mutex_unlock(&bp->dmae_mutex);
324 }
325
326 /* used only for slowpath so not inlined */
327 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
328 {
329         u32 wb_write[2];
330
331         wb_write[0] = val_hi;
332         wb_write[1] = val_lo;
333         REG_WR_DMAE(bp, reg, wb_write, 2);
334 }
335
336 #ifdef USE_WB_RD
337 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
338 {
339         u32 wb_data[2];
340
341         REG_RD_DMAE(bp, reg, wb_data, 2);
342
343         return HILO_U64(wb_data[0], wb_data[1]);
344 }
345 #endif
346
347 static int bnx2x_mc_assert(struct bnx2x *bp)
348 {
349         char last_idx;
350         int i, rc = 0;
351         u32 row0, row1, row2, row3;
352
353         /* XSTORM */
354         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
356         if (last_idx)
357                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
358
359         /* print the asserts */
360         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
361
362                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363                               XSTORM_ASSERT_LIST_OFFSET(i));
364                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
370
371                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373                                   " 0x%08x 0x%08x 0x%08x\n",
374                                   i, row3, row2, row1, row0);
375                         rc++;
376                 } else {
377                         break;
378                 }
379         }
380
381         /* TSTORM */
382         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
384         if (last_idx)
385                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
386
387         /* print the asserts */
388         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
389
390                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391                               TSTORM_ASSERT_LIST_OFFSET(i));
392                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
398
399                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401                                   " 0x%08x 0x%08x 0x%08x\n",
402                                   i, row3, row2, row1, row0);
403                         rc++;
404                 } else {
405                         break;
406                 }
407         }
408
409         /* CSTORM */
410         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
412         if (last_idx)
413                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
414
415         /* print the asserts */
416         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
417
418                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419                               CSTORM_ASSERT_LIST_OFFSET(i));
420                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
426
427                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429                                   " 0x%08x 0x%08x 0x%08x\n",
430                                   i, row3, row2, row1, row0);
431                         rc++;
432                 } else {
433                         break;
434                 }
435         }
436
437         /* USTORM */
438         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439                            USTORM_ASSERT_LIST_INDEX_OFFSET);
440         if (last_idx)
441                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
442
443         /* print the asserts */
444         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
445
446                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447                               USTORM_ASSERT_LIST_OFFSET(i));
448                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
450                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
452                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
454
455                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457                                   " 0x%08x 0x%08x 0x%08x\n",
458                                   i, row3, row2, row1, row0);
459                         rc++;
460                 } else {
461                         break;
462                 }
463         }
464
465         return rc;
466 }
467
468 static void bnx2x_fw_dump(struct bnx2x *bp)
469 {
470         u32 mark, offset;
471         u32 data[9];
472         int word;
473
474         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
475         mark = ((mark + 0x3) & ~0x3);
476         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
477
478         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479                 for (word = 0; word < 8; word++)
480                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
481                                                   offset + 4*word));
482                 data[8] = 0x0;
483                 printk(KERN_CONT "%s", (char *)data);
484         }
485         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486                 for (word = 0; word < 8; word++)
487                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
488                                                   offset + 4*word));
489                 data[8] = 0x0;
490                 printk(KERN_CONT "%s", (char *)data);
491         }
492         printk("\n" KERN_ERR PFX "end of fw dump\n");
493 }
494
495 static void bnx2x_panic_dump(struct bnx2x *bp)
496 {
497         int i;
498         u16 j, start, end;
499
500         bp->stats_state = STATS_STATE_DISABLED;
501         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
502
503         BNX2X_ERR("begin crash dump -----------------\n");
504
505         for_each_queue(bp, i) {
506                 struct bnx2x_fastpath *fp = &bp->fp[i];
507                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
508
509                 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
510                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
511                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
512                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
513                 BNX2X_ERR("          rx_bd_prod(%x)  rx_bd_cons(%x)"
514                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
515                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
516                           fp->rx_bd_prod, fp->rx_bd_cons,
517                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519                 BNX2X_ERR("          rx_sge_prod(%x)  last_max_sge(%x)"
520                           "  fp_c_idx(%x)  *sb_c_idx(%x)  fp_u_idx(%x)"
521                           "  *sb_u_idx(%x)  bd data(%x,%x)\n",
522                           fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523                           fp->status_blk->c_status_block.status_block_index,
524                           fp->fp_u_idx,
525                           fp->status_blk->u_status_block.status_block_index,
526                           hw_prods->packets_prod, hw_prods->bds_prod);
527
528                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530                 for (j = start; j < end; j++) {
531                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
532
533                         BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534                                   sw_bd->skb, sw_bd->first_bd);
535                 }
536
537                 start = TX_BD(fp->tx_bd_cons - 10);
538                 end = TX_BD(fp->tx_bd_cons + 254);
539                 for (j = start; j < end; j++) {
540                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
541
542                         BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543                                   j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
544                 }
545
546                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548                 for (j = start; j < end; j++) {
549                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
551
552                         BNX2X_ERR("rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
553                                   j, rx_bd[1], rx_bd[0], sw_bd->skb);
554                 }
555
556                 start = RX_SGE(fp->rx_sge_prod);
557                 end = RX_SGE(fp->last_max_sge);
558                 for (j = start; j < end; j++) {
559                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
561
562                         BNX2X_ERR("rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
563                                   j, rx_sge[1], rx_sge[0], sw_page->page);
564                 }
565
566                 start = RCQ_BD(fp->rx_comp_cons - 10);
567                 end = RCQ_BD(fp->rx_comp_cons + 503);
568                 for (j = start; j < end; j++) {
569                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
570
571                         BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572                                   j, cqe[0], cqe[1], cqe[2], cqe[3]);
573                 }
574         }
575
576         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
577                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
578                   "  spq_prod_idx(%u)\n",
579                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
580                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
581
582         bnx2x_fw_dump(bp);
583         bnx2x_mc_assert(bp);
584         BNX2X_ERR("end crash dump -----------------\n");
585 }
586
587 static void bnx2x_int_enable(struct bnx2x *bp)
588 {
589         int port = BP_PORT(bp);
590         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591         u32 val = REG_RD(bp, addr);
592         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
593         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
594
595         if (msix) {
596                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
597                          HC_CONFIG_0_REG_INT_LINE_EN_0);
598                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
599                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
600         } else if (msi) {
601                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
602                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
603                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
604                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
605         } else {
606                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
607                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
608                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
609                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
610
611                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
612                    val, port, addr);
613
614                 REG_WR(bp, addr, val);
615
616                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
617         }
618
619         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
620            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
621
622         REG_WR(bp, addr, val);
623
624         if (CHIP_IS_E1H(bp)) {
625                 /* init leading/trailing edge */
626                 if (IS_E1HMF(bp)) {
627                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
628                         if (bp->port.pmf)
629                                 /* enable nig and gpio3 attention */
630                                 val |= 0x1100;
631                 } else
632                         val = 0xffff;
633
634                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
635                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
636         }
637 }
638
639 static void bnx2x_int_disable(struct bnx2x *bp)
640 {
641         int port = BP_PORT(bp);
642         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
643         u32 val = REG_RD(bp, addr);
644
645         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
646                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
647                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
648                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
649
650         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
651            val, port, addr);
652
653         /* flush all outstanding writes */
654         mmiowb();
655
656         REG_WR(bp, addr, val);
657         if (REG_RD(bp, addr) != val)
658                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
659 }
660
661 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
662 {
663         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
664         int i, offset;
665
666         /* disable interrupt handling */
667         atomic_inc(&bp->intr_sem);
668         if (disable_hw)
669                 /* prevent the HW from sending interrupts */
670                 bnx2x_int_disable(bp);
671
672         /* make sure all ISRs are done */
673         if (msix) {
674                 synchronize_irq(bp->msix_table[0].vector);
675                 offset = 1;
676                 for_each_queue(bp, i)
677                         synchronize_irq(bp->msix_table[i + offset].vector);
678         } else
679                 synchronize_irq(bp->pdev->irq);
680
681         /* make sure sp_task is not running */
682         cancel_delayed_work(&bp->sp_task);
683         flush_workqueue(bnx2x_wq);
684 }
685
686 /* fast path */
687
688 /*
689  * General service functions
690  */
691
692 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
693                                 u8 storm, u16 index, u8 op, u8 update)
694 {
695         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
696                        COMMAND_REG_INT_ACK);
697         struct igu_ack_register igu_ack;
698
699         igu_ack.status_block_index = index;
700         igu_ack.sb_id_and_flags =
701                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
702                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
703                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
704                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
705
706         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
707            (*(u32 *)&igu_ack), hc_addr);
708         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
709 }
710
711 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
712 {
713         struct host_status_block *fpsb = fp->status_blk;
714         u16 rc = 0;
715
716         barrier(); /* status block is written to by the chip */
717         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
718                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
719                 rc |= 1;
720         }
721         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
722                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
723                 rc |= 2;
724         }
725         return rc;
726 }
727
728 static u16 bnx2x_ack_int(struct bnx2x *bp)
729 {
730         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
731                        COMMAND_REG_SIMD_MASK);
732         u32 result = REG_RD(bp, hc_addr);
733
734         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
735            result, hc_addr);
736
737         return result;
738 }
739
740
741 /*
742  * fast path service functions
743  */
744
745 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
746 {
747         u16 tx_cons_sb;
748
749         /* Tell compiler that status block fields can change */
750         barrier();
751         tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
752         return (fp->tx_pkt_cons != tx_cons_sb);
753 }
754
755 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
756 {
757         /* Tell compiler that consumer and producer can change */
758         barrier();
759         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
760
761 }
762
763 /* free skb in the packet ring at pos idx
764  * return idx of last bd freed
765  */
766 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
767                              u16 idx)
768 {
769         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
770         struct eth_tx_bd *tx_bd;
771         struct sk_buff *skb = tx_buf->skb;
772         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
773         int nbd;
774
775         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
776            idx, tx_buf, skb);
777
778         /* unmap first bd */
779         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
780         tx_bd = &fp->tx_desc_ring[bd_idx];
781         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
782                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
783
784         nbd = le16_to_cpu(tx_bd->nbd) - 1;
785         new_cons = nbd + tx_buf->first_bd;
786 #ifdef BNX2X_STOP_ON_ERROR
787         if (nbd > (MAX_SKB_FRAGS + 2)) {
788                 BNX2X_ERR("BAD nbd!\n");
789                 bnx2x_panic();
790         }
791 #endif
792
793         /* Skip a parse bd and the TSO split header bd
794            since they have no mapping */
795         if (nbd)
796                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
797
798         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
799                                            ETH_TX_BD_FLAGS_TCP_CSUM |
800                                            ETH_TX_BD_FLAGS_SW_LSO)) {
801                 if (--nbd)
802                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
803                 tx_bd = &fp->tx_desc_ring[bd_idx];
804                 /* is this a TSO split header bd? */
805                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
806                         if (--nbd)
807                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
808                 }
809         }
810
811         /* now free frags */
812         while (nbd > 0) {
813
814                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
815                 tx_bd = &fp->tx_desc_ring[bd_idx];
816                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
817                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
818                 if (--nbd)
819                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
820         }
821
822         /* release skb */
823         WARN_ON(!skb);
824         dev_kfree_skb(skb);
825         tx_buf->first_bd = 0;
826         tx_buf->skb = NULL;
827
828         return new_cons;
829 }
830
831 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
832 {
833         s16 used;
834         u16 prod;
835         u16 cons;
836
837         barrier(); /* Tell compiler that prod and cons can change */
838         prod = fp->tx_bd_prod;
839         cons = fp->tx_bd_cons;
840
841         /* NUM_TX_RINGS = number of "next-page" entries
842            It will be used as a threshold */
843         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
844
845 #ifdef BNX2X_STOP_ON_ERROR
846         WARN_ON(used < 0);
847         WARN_ON(used > fp->bp->tx_ring_size);
848         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
849 #endif
850
851         return (s16)(fp->bp->tx_ring_size) - used;
852 }
853
854 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
855 {
856         struct bnx2x *bp = fp->bp;
857         struct netdev_queue *txq;
858         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
859         int done = 0;
860
861 #ifdef BNX2X_STOP_ON_ERROR
862         if (unlikely(bp->panic))
863                 return;
864 #endif
865
866         txq = netdev_get_tx_queue(bp->dev, fp->index);
867         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
868         sw_cons = fp->tx_pkt_cons;
869
870         while (sw_cons != hw_cons) {
871                 u16 pkt_cons;
872
873                 pkt_cons = TX_BD(sw_cons);
874
875                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
876
877                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
878                    hw_cons, sw_cons, pkt_cons);
879
880 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
881                         rmb();
882                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
883                 }
884 */
885                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
886                 sw_cons++;
887                 done++;
888
889                 if (done == work)
890                         break;
891         }
892
893         fp->tx_pkt_cons = sw_cons;
894         fp->tx_bd_cons = bd_cons;
895
896         /* Need to make the tx_bd_cons update visible to start_xmit()
897          * before checking for netif_tx_queue_stopped().  Without the
898          * memory barrier, there is a small possibility that start_xmit()
899          * will miss it and cause the queue to be stopped forever.
900          */
901         smp_mb();
902
903         /* TBD need a thresh? */
904         if (unlikely(netif_tx_queue_stopped(txq))) {
905
906                 __netif_tx_lock(txq, smp_processor_id());
907
908                 if ((netif_tx_queue_stopped(txq)) &&
909                     (bp->state == BNX2X_STATE_OPEN) &&
910                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
911                         netif_tx_wake_queue(txq);
912
913                 __netif_tx_unlock(txq);
914         }
915 }
916
917
918 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
919                            union eth_rx_cqe *rr_cqe)
920 {
921         struct bnx2x *bp = fp->bp;
922         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
923         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
924
925         DP(BNX2X_MSG_SP,
926            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
927            FP_IDX(fp), cid, command, bp->state,
928            rr_cqe->ramrod_cqe.ramrod_type);
929
930         bp->spq_left++;
931
932         if (FP_IDX(fp)) {
933                 switch (command | fp->state) {
934                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
935                                                 BNX2X_FP_STATE_OPENING):
936                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
937                            cid);
938                         fp->state = BNX2X_FP_STATE_OPEN;
939                         break;
940
941                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
942                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
943                            cid);
944                         fp->state = BNX2X_FP_STATE_HALTED;
945                         break;
946
947                 default:
948                         BNX2X_ERR("unexpected MC reply (%d)  "
949                                   "fp->state is %x\n", command, fp->state);
950                         break;
951                 }
952                 mb(); /* force bnx2x_wait_ramrod() to see the change */
953                 return;
954         }
955
956         switch (command | bp->state) {
957         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
958                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
959                 bp->state = BNX2X_STATE_OPEN;
960                 break;
961
962         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
963                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
964                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
965                 fp->state = BNX2X_FP_STATE_HALTED;
966                 break;
967
968         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
969                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
970                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
971                 break;
972
973
974         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
975         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
976                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
977                 bp->set_mac_pending = 0;
978                 break;
979
980         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
981                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
982                 break;
983
984         default:
985                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
986                           command, bp->state);
987                 break;
988         }
989         mb(); /* force bnx2x_wait_ramrod() to see the change */
990 }
991
992 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
993                                      struct bnx2x_fastpath *fp, u16 index)
994 {
995         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
996         struct page *page = sw_buf->page;
997         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
998
999         /* Skip "next page" elements */
1000         if (!page)
1001                 return;
1002
1003         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1004                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1005         __free_pages(page, PAGES_PER_SGE_SHIFT);
1006
1007         sw_buf->page = NULL;
1008         sge->addr_hi = 0;
1009         sge->addr_lo = 0;
1010 }
1011
1012 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1013                                            struct bnx2x_fastpath *fp, int last)
1014 {
1015         int i;
1016
1017         for (i = 0; i < last; i++)
1018                 bnx2x_free_rx_sge(bp, fp, i);
1019 }
1020
1021 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1022                                      struct bnx2x_fastpath *fp, u16 index)
1023 {
1024         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1025         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1026         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1027         dma_addr_t mapping;
1028
1029         if (unlikely(page == NULL))
1030                 return -ENOMEM;
1031
1032         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1033                                PCI_DMA_FROMDEVICE);
1034         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1035                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1036                 return -ENOMEM;
1037         }
1038
1039         sw_buf->page = page;
1040         pci_unmap_addr_set(sw_buf, mapping, mapping);
1041
1042         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1043         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1044
1045         return 0;
1046 }
1047
1048 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1049                                      struct bnx2x_fastpath *fp, u16 index)
1050 {
1051         struct sk_buff *skb;
1052         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1053         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1054         dma_addr_t mapping;
1055
1056         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1057         if (unlikely(skb == NULL))
1058                 return -ENOMEM;
1059
1060         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1061                                  PCI_DMA_FROMDEVICE);
1062         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1063                 dev_kfree_skb(skb);
1064                 return -ENOMEM;
1065         }
1066
1067         rx_buf->skb = skb;
1068         pci_unmap_addr_set(rx_buf, mapping, mapping);
1069
1070         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1071         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1072
1073         return 0;
1074 }
1075
1076 /* note that we are not allocating a new skb,
1077  * we are just moving one from cons to prod
1078  * we are not creating a new mapping,
1079  * so there is no need to check for dma_mapping_error().
1080  */
1081 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1082                                struct sk_buff *skb, u16 cons, u16 prod)
1083 {
1084         struct bnx2x *bp = fp->bp;
1085         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1086         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1087         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1088         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1089
1090         pci_dma_sync_single_for_device(bp->pdev,
1091                                        pci_unmap_addr(cons_rx_buf, mapping),
1092                                        RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1093
1094         prod_rx_buf->skb = cons_rx_buf->skb;
1095         pci_unmap_addr_set(prod_rx_buf, mapping,
1096                            pci_unmap_addr(cons_rx_buf, mapping));
1097         *prod_bd = *cons_bd;
1098 }
1099
1100 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1101                                              u16 idx)
1102 {
1103         u16 last_max = fp->last_max_sge;
1104
1105         if (SUB_S16(idx, last_max) > 0)
1106                 fp->last_max_sge = idx;
1107 }
1108
1109 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1110 {
1111         int i, j;
1112
1113         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1114                 int idx = RX_SGE_CNT * i - 1;
1115
1116                 for (j = 0; j < 2; j++) {
1117                         SGE_MASK_CLEAR_BIT(fp, idx);
1118                         idx--;
1119                 }
1120         }
1121 }
1122
1123 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1124                                   struct eth_fast_path_rx_cqe *fp_cqe)
1125 {
1126         struct bnx2x *bp = fp->bp;
1127         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1128                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1129                       SGE_PAGE_SHIFT;
1130         u16 last_max, last_elem, first_elem;
1131         u16 delta = 0;
1132         u16 i;
1133
1134         if (!sge_len)
1135                 return;
1136
1137         /* First mark all used pages */
1138         for (i = 0; i < sge_len; i++)
1139                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1140
1141         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1142            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1143
1144         /* Here we assume that the last SGE index is the biggest */
1145         prefetch((void *)(fp->sge_mask));
1146         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1147
1148         last_max = RX_SGE(fp->last_max_sge);
1149         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1150         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1151
1152         /* If ring is not full */
1153         if (last_elem + 1 != first_elem)
1154                 last_elem++;
1155
1156         /* Now update the prod */
1157         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1158                 if (likely(fp->sge_mask[i]))
1159                         break;
1160
1161                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1162                 delta += RX_SGE_MASK_ELEM_SZ;
1163         }
1164
1165         if (delta > 0) {
1166                 fp->rx_sge_prod += delta;
1167                 /* clear page-end entries */
1168                 bnx2x_clear_sge_mask_next_elems(fp);
1169         }
1170
1171         DP(NETIF_MSG_RX_STATUS,
1172            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1173            fp->last_max_sge, fp->rx_sge_prod);
1174 }
1175
1176 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1177 {
1178         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1179         memset(fp->sge_mask, 0xff,
1180                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1181
1182         /* Clear the two last indices in the page to 1:
1183            these are the indices that correspond to the "next" element,
1184            hence will never be indicated and should be removed from
1185            the calculations. */
1186         bnx2x_clear_sge_mask_next_elems(fp);
1187 }
1188
1189 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1190                             struct sk_buff *skb, u16 cons, u16 prod)
1191 {
1192         struct bnx2x *bp = fp->bp;
1193         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1194         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1195         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1196         dma_addr_t mapping;
1197
1198         /* move empty skb from pool to prod and map it */
1199         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1200         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1201                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1202         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1203
1204         /* move partial skb from cons to pool (don't unmap yet) */
1205         fp->tpa_pool[queue] = *cons_rx_buf;
1206
1207         /* mark bin state as start - print error if current state != stop */
1208         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1209                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1210
1211         fp->tpa_state[queue] = BNX2X_TPA_START;
1212
1213         /* point prod_bd to new skb */
1214         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1215         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1216
1217 #ifdef BNX2X_STOP_ON_ERROR
1218         fp->tpa_queue_used |= (1 << queue);
1219 #ifdef __powerpc64__
1220         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1221 #else
1222         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1223 #endif
1224            fp->tpa_queue_used);
1225 #endif
1226 }
1227
1228 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1229                                struct sk_buff *skb,
1230                                struct eth_fast_path_rx_cqe *fp_cqe,
1231                                u16 cqe_idx)
1232 {
1233         struct sw_rx_page *rx_pg, old_rx_pg;
1234         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1235         u32 i, frag_len, frag_size, pages;
1236         int err;
1237         int j;
1238
1239         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1240         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1241
1242         /* This is needed in order to enable forwarding support */
1243         if (frag_size)
1244                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1245                                                max(frag_size, (u32)len_on_bd));
1246
1247 #ifdef BNX2X_STOP_ON_ERROR
1248         if (pages >
1249             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1250                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1251                           pages, cqe_idx);
1252                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1253                           fp_cqe->pkt_len, len_on_bd);
1254                 bnx2x_panic();
1255                 return -EINVAL;
1256         }
1257 #endif
1258
1259         /* Run through the SGL and compose the fragmented skb */
1260         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1261                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1262
1263                 /* FW gives the indices of the SGE as if the ring is an array
1264                    (meaning that "next" element will consume 2 indices) */
1265                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1266                 rx_pg = &fp->rx_page_ring[sge_idx];
1267                 old_rx_pg = *rx_pg;
1268
1269                 /* If we fail to allocate a substitute page, we simply stop
1270                    where we are and drop the whole packet */
1271                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1272                 if (unlikely(err)) {
1273                         fp->eth_q_stats.rx_skb_alloc_failed++;
1274                         return err;
1275                 }
1276
1277                 /* Unmap the page as we r going to pass it to the stack */
1278                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1279                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1280
1281                 /* Add one frag and update the appropriate fields in the skb */
1282                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1283
1284                 skb->data_len += frag_len;
1285                 skb->truesize += frag_len;
1286                 skb->len += frag_len;
1287
1288                 frag_size -= frag_len;
1289         }
1290
1291         return 0;
1292 }
1293
1294 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1295                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1296                            u16 cqe_idx)
1297 {
1298         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1299         struct sk_buff *skb = rx_buf->skb;
1300         /* alloc new skb */
1301         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1302
1303         /* Unmap skb in the pool anyway, as we are going to change
1304            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1305            fails. */
1306         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1307                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1308
1309         if (likely(new_skb)) {
1310                 /* fix ip xsum and give it to the stack */
1311                 /* (no need to map the new skb) */
1312 #ifdef BCM_VLAN
1313                 int is_vlan_cqe =
1314                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1315                          PARSING_FLAGS_VLAN);
1316                 int is_not_hwaccel_vlan_cqe =
1317                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1318 #endif
1319
1320                 prefetch(skb);
1321                 prefetch(((char *)(skb)) + 128);
1322
1323 #ifdef BNX2X_STOP_ON_ERROR
1324                 if (pad + len > bp->rx_buf_size) {
1325                         BNX2X_ERR("skb_put is about to fail...  "
1326                                   "pad %d  len %d  rx_buf_size %d\n",
1327                                   pad, len, bp->rx_buf_size);
1328                         bnx2x_panic();
1329                         return;
1330                 }
1331 #endif
1332
1333                 skb_reserve(skb, pad);
1334                 skb_put(skb, len);
1335
1336                 skb->protocol = eth_type_trans(skb, bp->dev);
1337                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1338
1339                 {
1340                         struct iphdr *iph;
1341
1342                         iph = (struct iphdr *)skb->data;
1343 #ifdef BCM_VLAN
1344                         /* If there is no Rx VLAN offloading -
1345                            take VLAN tag into an account */
1346                         if (unlikely(is_not_hwaccel_vlan_cqe))
1347                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1348 #endif
1349                         iph->check = 0;
1350                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1351                 }
1352
1353                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1354                                          &cqe->fast_path_cqe, cqe_idx)) {
1355 #ifdef BCM_VLAN
1356                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1357                             (!is_not_hwaccel_vlan_cqe))
1358                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1359                                                 le16_to_cpu(cqe->fast_path_cqe.
1360                                                             vlan_tag));
1361                         else
1362 #endif
1363                                 netif_receive_skb(skb);
1364                 } else {
1365                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1366                            " - dropping packet!\n");
1367                         dev_kfree_skb(skb);
1368                 }
1369
1370
1371                 /* put new skb in bin */
1372                 fp->tpa_pool[queue].skb = new_skb;
1373
1374         } else {
1375                 /* else drop the packet and keep the buffer in the bin */
1376                 DP(NETIF_MSG_RX_STATUS,
1377                    "Failed to allocate new skb - dropping packet!\n");
1378                 fp->eth_q_stats.rx_skb_alloc_failed++;
1379         }
1380
1381         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1382 }
1383
1384 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1385                                         struct bnx2x_fastpath *fp,
1386                                         u16 bd_prod, u16 rx_comp_prod,
1387                                         u16 rx_sge_prod)
1388 {
1389         struct ustorm_eth_rx_producers rx_prods = {0};
1390         int i;
1391
1392         /* Update producers */
1393         rx_prods.bd_prod = bd_prod;
1394         rx_prods.cqe_prod = rx_comp_prod;
1395         rx_prods.sge_prod = rx_sge_prod;
1396
1397         /*
1398          * Make sure that the BD and SGE data is updated before updating the
1399          * producers since FW might read the BD/SGE right after the producer
1400          * is updated.
1401          * This is only applicable for weak-ordered memory model archs such
1402          * as IA-64. The following barrier is also mandatory since FW will
1403          * assumes BDs must have buffers.
1404          */
1405         wmb();
1406
1407         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1408                 REG_WR(bp, BAR_USTRORM_INTMEM +
1409                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1410                        ((u32 *)&rx_prods)[i]);
1411
1412         mmiowb(); /* keep prod updates ordered */
1413
1414         DP(NETIF_MSG_RX_STATUS,
1415            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1416            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1417 }
1418
1419 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1420 {
1421         struct bnx2x *bp = fp->bp;
1422         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1423         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1424         int rx_pkt = 0;
1425
1426 #ifdef BNX2X_STOP_ON_ERROR
1427         if (unlikely(bp->panic))
1428                 return 0;
1429 #endif
1430
1431         /* CQ "next element" is of the size of the regular element,
1432            that's why it's ok here */
1433         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1434         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1435                 hw_comp_cons++;
1436
1437         bd_cons = fp->rx_bd_cons;
1438         bd_prod = fp->rx_bd_prod;
1439         bd_prod_fw = bd_prod;
1440         sw_comp_cons = fp->rx_comp_cons;
1441         sw_comp_prod = fp->rx_comp_prod;
1442
1443         /* Memory barrier necessary as speculative reads of the rx
1444          * buffer can be ahead of the index in the status block
1445          */
1446         rmb();
1447
1448         DP(NETIF_MSG_RX_STATUS,
1449            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1450            FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1451
1452         while (sw_comp_cons != hw_comp_cons) {
1453                 struct sw_rx_bd *rx_buf = NULL;
1454                 struct sk_buff *skb;
1455                 union eth_rx_cqe *cqe;
1456                 u8 cqe_fp_flags;
1457                 u16 len, pad;
1458
1459                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1460                 bd_prod = RX_BD(bd_prod);
1461                 bd_cons = RX_BD(bd_cons);
1462
1463                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1464                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1465
1466                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1467                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1468                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1469                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1470                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1471                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1472
1473                 /* is this a slowpath msg? */
1474                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1475                         bnx2x_sp_event(fp, cqe);
1476                         goto next_cqe;
1477
1478                 /* this is an rx packet */
1479                 } else {
1480                         rx_buf = &fp->rx_buf_ring[bd_cons];
1481                         skb = rx_buf->skb;
1482                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1483                         pad = cqe->fast_path_cqe.placement_offset;
1484
1485                         /* If CQE is marked both TPA_START and TPA_END
1486                            it is a non-TPA CQE */
1487                         if ((!fp->disable_tpa) &&
1488                             (TPA_TYPE(cqe_fp_flags) !=
1489                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1490                                 u16 queue = cqe->fast_path_cqe.queue_index;
1491
1492                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1493                                         DP(NETIF_MSG_RX_STATUS,
1494                                            "calling tpa_start on queue %d\n",
1495                                            queue);
1496
1497                                         bnx2x_tpa_start(fp, queue, skb,
1498                                                         bd_cons, bd_prod);
1499                                         goto next_rx;
1500                                 }
1501
1502                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1503                                         DP(NETIF_MSG_RX_STATUS,
1504                                            "calling tpa_stop on queue %d\n",
1505                                            queue);
1506
1507                                         if (!BNX2X_RX_SUM_FIX(cqe))
1508                                                 BNX2X_ERR("STOP on none TCP "
1509                                                           "data\n");
1510
1511                                         /* This is a size of the linear data
1512                                            on this skb */
1513                                         len = le16_to_cpu(cqe->fast_path_cqe.
1514                                                                 len_on_bd);
1515                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1516                                                     len, cqe, comp_ring_cons);
1517 #ifdef BNX2X_STOP_ON_ERROR
1518                                         if (bp->panic)
1519                                                 return -EINVAL;
1520 #endif
1521
1522                                         bnx2x_update_sge_prod(fp,
1523                                                         &cqe->fast_path_cqe);
1524                                         goto next_cqe;
1525                                 }
1526                         }
1527
1528                         pci_dma_sync_single_for_device(bp->pdev,
1529                                         pci_unmap_addr(rx_buf, mapping),
1530                                                        pad + RX_COPY_THRESH,
1531                                                        PCI_DMA_FROMDEVICE);
1532                         prefetch(skb);
1533                         prefetch(((char *)(skb)) + 128);
1534
1535                         /* is this an error packet? */
1536                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1537                                 DP(NETIF_MSG_RX_ERR,
1538                                    "ERROR  flags %x  rx packet %u\n",
1539                                    cqe_fp_flags, sw_comp_cons);
1540                                 fp->eth_q_stats.rx_err_discard_pkt++;
1541                                 goto reuse_rx;
1542                         }
1543
1544                         /* Since we don't have a jumbo ring
1545                          * copy small packets if mtu > 1500
1546                          */
1547                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1548                             (len <= RX_COPY_THRESH)) {
1549                                 struct sk_buff *new_skb;
1550
1551                                 new_skb = netdev_alloc_skb(bp->dev,
1552                                                            len + pad);
1553                                 if (new_skb == NULL) {
1554                                         DP(NETIF_MSG_RX_ERR,
1555                                            "ERROR  packet dropped "
1556                                            "because of alloc failure\n");
1557                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1558                                         goto reuse_rx;
1559                                 }
1560
1561                                 /* aligned copy */
1562                                 skb_copy_from_linear_data_offset(skb, pad,
1563                                                     new_skb->data + pad, len);
1564                                 skb_reserve(new_skb, pad);
1565                                 skb_put(new_skb, len);
1566
1567                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1568
1569                                 skb = new_skb;
1570
1571                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1572                                 pci_unmap_single(bp->pdev,
1573                                         pci_unmap_addr(rx_buf, mapping),
1574                                                  bp->rx_buf_size,
1575                                                  PCI_DMA_FROMDEVICE);
1576                                 skb_reserve(skb, pad);
1577                                 skb_put(skb, len);
1578
1579                         } else {
1580                                 DP(NETIF_MSG_RX_ERR,
1581                                    "ERROR  packet dropped because "
1582                                    "of alloc failure\n");
1583                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1584 reuse_rx:
1585                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1586                                 goto next_rx;
1587                         }
1588
1589                         skb->protocol = eth_type_trans(skb, bp->dev);
1590
1591                         skb->ip_summed = CHECKSUM_NONE;
1592                         if (bp->rx_csum) {
1593                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1594                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1595                                 else
1596                                         fp->eth_q_stats.hw_csum_err++;
1597                         }
1598                 }
1599
1600                 skb_record_rx_queue(skb, fp->index);
1601 #ifdef BCM_VLAN
1602                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1603                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1604                      PARSING_FLAGS_VLAN))
1605                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1606                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1607                 else
1608 #endif
1609                         netif_receive_skb(skb);
1610
1611
1612 next_rx:
1613                 rx_buf->skb = NULL;
1614
1615                 bd_cons = NEXT_RX_IDX(bd_cons);
1616                 bd_prod = NEXT_RX_IDX(bd_prod);
1617                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1618                 rx_pkt++;
1619 next_cqe:
1620                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1621                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1622
1623                 if (rx_pkt == budget)
1624                         break;
1625         } /* while */
1626
1627         fp->rx_bd_cons = bd_cons;
1628         fp->rx_bd_prod = bd_prod_fw;
1629         fp->rx_comp_cons = sw_comp_cons;
1630         fp->rx_comp_prod = sw_comp_prod;
1631
1632         /* Update producers */
1633         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1634                              fp->rx_sge_prod);
1635
1636         fp->rx_pkt += rx_pkt;
1637         fp->rx_calls++;
1638
1639         return rx_pkt;
1640 }
1641
1642 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1643 {
1644         struct bnx2x_fastpath *fp = fp_cookie;
1645         struct bnx2x *bp = fp->bp;
1646         int index = FP_IDX(fp);
1647
1648         /* Return here if interrupt is disabled */
1649         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1650                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1651                 return IRQ_HANDLED;
1652         }
1653
1654         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1655            index, FP_SB_ID(fp));
1656         bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1657
1658 #ifdef BNX2X_STOP_ON_ERROR
1659         if (unlikely(bp->panic))
1660                 return IRQ_HANDLED;
1661 #endif
1662
1663         prefetch(fp->rx_cons_sb);
1664         prefetch(fp->tx_cons_sb);
1665         prefetch(&fp->status_blk->c_status_block.status_block_index);
1666         prefetch(&fp->status_blk->u_status_block.status_block_index);
1667
1668         napi_schedule(&bnx2x_fp(bp, index, napi));
1669
1670         return IRQ_HANDLED;
1671 }
1672
1673 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1674 {
1675         struct bnx2x *bp = netdev_priv(dev_instance);
1676         u16 status = bnx2x_ack_int(bp);
1677         u16 mask;
1678
1679         /* Return here if interrupt is shared and it's not for us */
1680         if (unlikely(status == 0)) {
1681                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1682                 return IRQ_NONE;
1683         }
1684         DP(NETIF_MSG_INTR, "got an interrupt  status %u\n", status);
1685
1686         /* Return here if interrupt is disabled */
1687         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1688                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1689                 return IRQ_HANDLED;
1690         }
1691
1692 #ifdef BNX2X_STOP_ON_ERROR
1693         if (unlikely(bp->panic))
1694                 return IRQ_HANDLED;
1695 #endif
1696
1697         mask = 0x2 << bp->fp[0].sb_id;
1698         if (status & mask) {
1699                 struct bnx2x_fastpath *fp = &bp->fp[0];
1700
1701                 prefetch(fp->rx_cons_sb);
1702                 prefetch(fp->tx_cons_sb);
1703                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1704                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1705
1706                 napi_schedule(&bnx2x_fp(bp, 0, napi));
1707
1708                 status &= ~mask;
1709         }
1710
1711
1712         if (unlikely(status & 0x1)) {
1713                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1714
1715                 status &= ~0x1;
1716                 if (!status)
1717                         return IRQ_HANDLED;
1718         }
1719
1720         if (status)
1721                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1722                    status);
1723
1724         return IRQ_HANDLED;
1725 }
1726
1727 /* end of fast path */
1728
1729 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1730
1731 /* Link */
1732
1733 /*
1734  * General service functions
1735  */
1736
1737 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1738 {
1739         u32 lock_status;
1740         u32 resource_bit = (1 << resource);
1741         int func = BP_FUNC(bp);
1742         u32 hw_lock_control_reg;
1743         int cnt;
1744
1745         /* Validating that the resource is within range */
1746         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1747                 DP(NETIF_MSG_HW,
1748                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1749                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1750                 return -EINVAL;
1751         }
1752
1753         if (func <= 5) {
1754                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1755         } else {
1756                 hw_lock_control_reg =
1757                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1758         }
1759
1760         /* Validating that the resource is not already taken */
1761         lock_status = REG_RD(bp, hw_lock_control_reg);
1762         if (lock_status & resource_bit) {
1763                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1764                    lock_status, resource_bit);
1765                 return -EEXIST;
1766         }
1767
1768         /* Try for 5 second every 5ms */
1769         for (cnt = 0; cnt < 1000; cnt++) {
1770                 /* Try to acquire the lock */
1771                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1772                 lock_status = REG_RD(bp, hw_lock_control_reg);
1773                 if (lock_status & resource_bit)
1774                         return 0;
1775
1776                 msleep(5);
1777         }
1778         DP(NETIF_MSG_HW, "Timeout\n");
1779         return -EAGAIN;
1780 }
1781
1782 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1783 {
1784         u32 lock_status;
1785         u32 resource_bit = (1 << resource);
1786         int func = BP_FUNC(bp);
1787         u32 hw_lock_control_reg;
1788
1789         /* Validating that the resource is within range */
1790         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1791                 DP(NETIF_MSG_HW,
1792                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1793                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1794                 return -EINVAL;
1795         }
1796
1797         if (func <= 5) {
1798                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1799         } else {
1800                 hw_lock_control_reg =
1801                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1802         }
1803
1804         /* Validating that the resource is currently taken */
1805         lock_status = REG_RD(bp, hw_lock_control_reg);
1806         if (!(lock_status & resource_bit)) {
1807                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1808                    lock_status, resource_bit);
1809                 return -EFAULT;
1810         }
1811
1812         REG_WR(bp, hw_lock_control_reg, resource_bit);
1813         return 0;
1814 }
1815
1816 /* HW Lock for shared dual port PHYs */
1817 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1818 {
1819         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1820
1821         mutex_lock(&bp->port.phy_mutex);
1822
1823         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1824             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1825                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1826 }
1827
1828 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1829 {
1830         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1831
1832         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1833             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1834                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1835
1836         mutex_unlock(&bp->port.phy_mutex);
1837 }
1838
1839 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1840 {
1841         /* The GPIO should be swapped if swap register is set and active */
1842         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1843                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1844         int gpio_shift = gpio_num +
1845                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1846         u32 gpio_mask = (1 << gpio_shift);
1847         u32 gpio_reg;
1848         int value;
1849
1850         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1851                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1852                 return -EINVAL;
1853         }
1854
1855         /* read GPIO value */
1856         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1857
1858         /* get the requested pin value */
1859         if ((gpio_reg & gpio_mask) == gpio_mask)
1860                 value = 1;
1861         else
1862                 value = 0;
1863
1864         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1865
1866         return value;
1867 }
1868
1869 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1870 {
1871         /* The GPIO should be swapped if swap register is set and active */
1872         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1873                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1874         int gpio_shift = gpio_num +
1875                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1876         u32 gpio_mask = (1 << gpio_shift);
1877         u32 gpio_reg;
1878
1879         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1880                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1881                 return -EINVAL;
1882         }
1883
1884         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1885         /* read GPIO and mask except the float bits */
1886         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1887
1888         switch (mode) {
1889         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1890                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1891                    gpio_num, gpio_shift);
1892                 /* clear FLOAT and set CLR */
1893                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1894                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1895                 break;
1896
1897         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1898                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1899                    gpio_num, gpio_shift);
1900                 /* clear FLOAT and set SET */
1901                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1902                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1903                 break;
1904
1905         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1906                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1907                    gpio_num, gpio_shift);
1908                 /* set FLOAT */
1909                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1910                 break;
1911
1912         default:
1913                 break;
1914         }
1915
1916         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1917         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1918
1919         return 0;
1920 }
1921
1922 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1923 {
1924         /* The GPIO should be swapped if swap register is set and active */
1925         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1926                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1927         int gpio_shift = gpio_num +
1928                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1929         u32 gpio_mask = (1 << gpio_shift);
1930         u32 gpio_reg;
1931
1932         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1933                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1934                 return -EINVAL;
1935         }
1936
1937         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1938         /* read GPIO int */
1939         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1940
1941         switch (mode) {
1942         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1943                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1944                                    "output low\n", gpio_num, gpio_shift);
1945                 /* clear SET and set CLR */
1946                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1947                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1948                 break;
1949
1950         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1951                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1952                                    "output high\n", gpio_num, gpio_shift);
1953                 /* clear CLR and set SET */
1954                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1955                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1956                 break;
1957
1958         default:
1959                 break;
1960         }
1961
1962         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1963         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1964
1965         return 0;
1966 }
1967
1968 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1969 {
1970         u32 spio_mask = (1 << spio_num);
1971         u32 spio_reg;
1972
1973         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1974             (spio_num > MISC_REGISTERS_SPIO_7)) {
1975                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1976                 return -EINVAL;
1977         }
1978
1979         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1980         /* read SPIO and mask except the float bits */
1981         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1982
1983         switch (mode) {
1984         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1985                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1986                 /* clear FLOAT and set CLR */
1987                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1988                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1989                 break;
1990
1991         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1992                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1993                 /* clear FLOAT and set SET */
1994                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1995                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1996                 break;
1997
1998         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1999                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2000                 /* set FLOAT */
2001                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2002                 break;
2003
2004         default:
2005                 break;
2006         }
2007
2008         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2009         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2010
2011         return 0;
2012 }
2013
2014 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2015 {
2016         switch (bp->link_vars.ieee_fc &
2017                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2018         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2019                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2020                                           ADVERTISED_Pause);
2021                 break;
2022         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2023                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2024                                          ADVERTISED_Pause);
2025                 break;
2026         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2027                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2028                 break;
2029         default:
2030                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2031                                           ADVERTISED_Pause);
2032                 break;
2033         }
2034 }
2035
2036 static void bnx2x_link_report(struct bnx2x *bp)
2037 {
2038         if (bp->link_vars.link_up) {
2039                 if (bp->state == BNX2X_STATE_OPEN)
2040                         netif_carrier_on(bp->dev);
2041                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2042
2043                 printk("%d Mbps ", bp->link_vars.line_speed);
2044
2045                 if (bp->link_vars.duplex == DUPLEX_FULL)
2046                         printk("full duplex");
2047                 else
2048                         printk("half duplex");
2049
2050                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2051                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2052                                 printk(", receive ");
2053                                 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2054                                         printk("& transmit ");
2055                         } else {
2056                                 printk(", transmit ");
2057                         }
2058                         printk("flow control ON");
2059                 }
2060                 printk("\n");
2061
2062         } else { /* link_down */
2063                 netif_carrier_off(bp->dev);
2064                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2065         }
2066 }
2067
2068 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
2069 {
2070         if (!BP_NOMCP(bp)) {
2071                 u8 rc;
2072
2073                 /* Initialize link parameters structure variables */
2074                 /* It is recommended to turn off RX FC for jumbo frames
2075                    for better performance */
2076                 if (IS_E1HMF(bp))
2077                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2078                 else if (bp->dev->mtu > 5000)
2079                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2080                 else
2081                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2082
2083                 bnx2x_acquire_phy_lock(bp);
2084                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2085                 bnx2x_release_phy_lock(bp);
2086
2087                 bnx2x_calc_fc_adv(bp);
2088
2089                 if (bp->link_vars.link_up)
2090                         bnx2x_link_report(bp);
2091
2092
2093                 return rc;
2094         }
2095         BNX2X_ERR("Bootcode is missing -not initializing link\n");
2096         return -EINVAL;
2097 }
2098
2099 static void bnx2x_link_set(struct bnx2x *bp)
2100 {
2101         if (!BP_NOMCP(bp)) {
2102                 bnx2x_acquire_phy_lock(bp);
2103                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2104                 bnx2x_release_phy_lock(bp);
2105
2106                 bnx2x_calc_fc_adv(bp);
2107         } else
2108                 BNX2X_ERR("Bootcode is missing -not setting link\n");
2109 }
2110
2111 static void bnx2x__link_reset(struct bnx2x *bp)
2112 {
2113         if (!BP_NOMCP(bp)) {
2114                 bnx2x_acquire_phy_lock(bp);
2115                 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
2116                 bnx2x_release_phy_lock(bp);
2117         } else
2118                 BNX2X_ERR("Bootcode is missing -not resetting link\n");
2119 }
2120
2121 static u8 bnx2x_link_test(struct bnx2x *bp)
2122 {
2123         u8 rc;
2124
2125         bnx2x_acquire_phy_lock(bp);
2126         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2127         bnx2x_release_phy_lock(bp);
2128
2129         return rc;
2130 }
2131
2132 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2133 {
2134         u32 r_param = bp->link_vars.line_speed / 8;
2135         u32 fair_periodic_timeout_usec;
2136         u32 t_fair;
2137
2138         memset(&(bp->cmng.rs_vars), 0,
2139                sizeof(struct rate_shaping_vars_per_port));
2140         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2141
2142         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2143         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2144
2145         /* this is the threshold below which no timer arming will occur
2146            1.25 coefficient is for the threshold to be a little bigger
2147            than the real time, to compensate for timer in-accuracy */
2148         bp->cmng.rs_vars.rs_threshold =
2149                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2150
2151         /* resolution of fairness timer */
2152         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2153         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2154         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2155
2156         /* this is the threshold below which we won't arm the timer anymore */
2157         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2158
2159         /* we multiply by 1e3/8 to get bytes/msec.
2160            We don't want the credits to pass a credit
2161            of the t_fair*FAIR_MEM (algorithm resolution) */
2162         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2163         /* since each tick is 4 usec */
2164         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2165 }
2166
2167 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2168 {
2169         struct rate_shaping_vars_per_vn m_rs_vn;
2170         struct fairness_vars_per_vn m_fair_vn;
2171         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2172         u16 vn_min_rate, vn_max_rate;
2173         int i;
2174
2175         /* If function is hidden - set min and max to zeroes */
2176         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2177                 vn_min_rate = 0;
2178                 vn_max_rate = 0;
2179
2180         } else {
2181                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2182                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2183                 /* If fairness is enabled (not all min rates are zeroes) and
2184                    if current min rate is zero - set it to 1.
2185                    This is a requirement of the algorithm. */
2186                 if (bp->vn_weight_sum && (vn_min_rate == 0))
2187                         vn_min_rate = DEF_MIN_RATE;
2188                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2189                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2190         }
2191
2192         DP(NETIF_MSG_IFUP,
2193            "func %d: vn_min_rate=%d  vn_max_rate=%d  vn_weight_sum=%d\n",
2194            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2195
2196         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2197         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2198
2199         /* global vn counter - maximal Mbps for this vn */
2200         m_rs_vn.vn_counter.rate = vn_max_rate;
2201
2202         /* quota - number of bytes transmitted in this period */
2203         m_rs_vn.vn_counter.quota =
2204                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2205
2206         if (bp->vn_weight_sum) {
2207                 /* credit for each period of the fairness algorithm:
2208                    number of bytes in T_FAIR (the vn share the port rate).
2209                    vn_weight_sum should not be larger than 10000, thus
2210                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2211                    than zero */
2212                 m_fair_vn.vn_credit_delta =
2213                         max((u32)(vn_min_rate * (T_FAIR_COEF /
2214                                                  (8 * bp->vn_weight_sum))),
2215                             (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2216                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2217                    m_fair_vn.vn_credit_delta);
2218         }
2219
2220         /* Store it to internal memory */
2221         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2222                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2223                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2224                        ((u32 *)(&m_rs_vn))[i]);
2225
2226         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2227                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2228                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2229                        ((u32 *)(&m_fair_vn))[i]);
2230 }
2231
2232
2233 /* This function is called upon link interrupt */
2234 static void bnx2x_link_attn(struct bnx2x *bp)
2235 {
2236         /* Make sure that we are synced with the current statistics */
2237         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2238
2239         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2240
2241         if (bp->link_vars.link_up) {
2242
2243                 /* dropless flow control */
2244                 if (CHIP_IS_E1H(bp)) {
2245                         int port = BP_PORT(bp);
2246                         u32 pause_enabled = 0;
2247
2248                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2249                                 pause_enabled = 1;
2250
2251                         REG_WR(bp, BAR_USTRORM_INTMEM +
2252                                USTORM_PAUSE_ENABLED_OFFSET(port),
2253                                pause_enabled);
2254                 }
2255
2256                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2257                         struct host_port_stats *pstats;
2258
2259                         pstats = bnx2x_sp(bp, port_stats);
2260                         /* reset old bmac stats */
2261                         memset(&(pstats->mac_stx[0]), 0,
2262                                sizeof(struct mac_stx));
2263                 }
2264                 if ((bp->state == BNX2X_STATE_OPEN) ||
2265                     (bp->state == BNX2X_STATE_DISABLED))
2266                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2267         }
2268
2269         /* indicate link status */
2270         bnx2x_link_report(bp);
2271
2272         if (IS_E1HMF(bp)) {
2273                 int port = BP_PORT(bp);
2274                 int func;
2275                 int vn;
2276
2277                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2278                         if (vn == BP_E1HVN(bp))
2279                                 continue;
2280
2281                         func = ((vn << 1) | port);
2282
2283                         /* Set the attention towards other drivers
2284                            on the same port */
2285                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2286                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2287                 }
2288
2289                 if (bp->link_vars.link_up) {
2290                         int i;
2291
2292                         /* Init rate shaping and fairness contexts */
2293                         bnx2x_init_port_minmax(bp);
2294
2295                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2296                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2297
2298                         /* Store it to internal memory */
2299                         for (i = 0;
2300                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2301                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2302                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2303                                        ((u32 *)(&bp->cmng))[i]);
2304                 }
2305         }
2306 }
2307
2308 static void bnx2x__link_status_update(struct bnx2x *bp)
2309 {
2310         if (bp->state != BNX2X_STATE_OPEN)
2311                 return;
2312
2313         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2314
2315         if (bp->link_vars.link_up)
2316                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2317         else
2318                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2319
2320         /* indicate link status */
2321         bnx2x_link_report(bp);
2322 }
2323
2324 static void bnx2x_pmf_update(struct bnx2x *bp)
2325 {
2326         int port = BP_PORT(bp);
2327         u32 val;
2328
2329         bp->port.pmf = 1;
2330         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2331
2332         /* enable nig attention */
2333         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2334         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2335         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2336
2337         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2338 }
2339
2340 /* end of Link */
2341
2342 /* slow path */
2343
2344 /*
2345  * General service functions
2346  */
2347
2348 /* the slow path queue is odd since completions arrive on the fastpath ring */
2349 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2350                          u32 data_hi, u32 data_lo, int common)
2351 {
2352         int func = BP_FUNC(bp);
2353
2354         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2355            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2356            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2357            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2358            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2359
2360 #ifdef BNX2X_STOP_ON_ERROR
2361         if (unlikely(bp->panic))
2362                 return -EIO;
2363 #endif
2364
2365         spin_lock_bh(&bp->spq_lock);
2366
2367         if (!bp->spq_left) {
2368                 BNX2X_ERR("BUG! SPQ ring full!\n");
2369                 spin_unlock_bh(&bp->spq_lock);
2370                 bnx2x_panic();
2371                 return -EBUSY;
2372         }
2373
2374         /* CID needs port number to be encoded int it */
2375         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2376                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2377                                      HW_CID(bp, cid)));
2378         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2379         if (common)
2380                 bp->spq_prod_bd->hdr.type |=
2381                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2382
2383         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2384         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2385
2386         bp->spq_left--;
2387
2388         if (bp->spq_prod_bd == bp->spq_last_bd) {
2389                 bp->spq_prod_bd = bp->spq;
2390                 bp->spq_prod_idx = 0;
2391                 DP(NETIF_MSG_TIMER, "end of spq\n");
2392
2393         } else {
2394                 bp->spq_prod_bd++;
2395                 bp->spq_prod_idx++;
2396         }
2397
2398         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2399                bp->spq_prod_idx);
2400
2401         spin_unlock_bh(&bp->spq_lock);
2402         return 0;
2403 }
2404
2405 /* acquire split MCP access lock register */
2406 static int bnx2x_acquire_alr(struct bnx2x *bp)
2407 {
2408         u32 i, j, val;
2409         int rc = 0;
2410
2411         might_sleep();
2412         i = 100;
2413         for (j = 0; j < i*10; j++) {
2414                 val = (1UL << 31);
2415                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2416                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2417                 if (val & (1L << 31))
2418                         break;
2419
2420                 msleep(5);
2421         }
2422         if (!(val & (1L << 31))) {
2423                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2424                 rc = -EBUSY;
2425         }
2426
2427         return rc;
2428 }
2429
2430 /* release split MCP access lock register */
2431 static void bnx2x_release_alr(struct bnx2x *bp)
2432 {
2433         u32 val = 0;
2434
2435         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2436 }
2437
2438 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2439 {
2440         struct host_def_status_block *def_sb = bp->def_status_blk;
2441         u16 rc = 0;
2442
2443         barrier(); /* status block is written to by the chip */
2444         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2445                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2446                 rc |= 1;
2447         }
2448         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2449                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2450                 rc |= 2;
2451         }
2452         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2453                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2454                 rc |= 4;
2455         }
2456         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2457                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2458                 rc |= 8;
2459         }
2460         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2461                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2462                 rc |= 16;
2463         }
2464         return rc;
2465 }
2466
2467 /*
2468  * slow path service functions
2469  */
2470
2471 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2472 {
2473         int port = BP_PORT(bp);
2474         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2475                        COMMAND_REG_ATTN_BITS_SET);
2476         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2477                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2478         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2479                                        NIG_REG_MASK_INTERRUPT_PORT0;
2480         u32 aeu_mask;
2481         u32 nig_mask = 0;
2482
2483         if (bp->attn_state & asserted)
2484                 BNX2X_ERR("IGU ERROR\n");
2485
2486         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2487         aeu_mask = REG_RD(bp, aeu_addr);
2488
2489         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2490            aeu_mask, asserted);
2491         aeu_mask &= ~(asserted & 0xff);
2492         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2493
2494         REG_WR(bp, aeu_addr, aeu_mask);
2495         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2496
2497         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2498         bp->attn_state |= asserted;
2499         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2500
2501         if (asserted & ATTN_HARD_WIRED_MASK) {
2502                 if (asserted & ATTN_NIG_FOR_FUNC) {
2503
2504                         bnx2x_acquire_phy_lock(bp);
2505
2506                         /* save nig interrupt mask */
2507                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2508                         REG_WR(bp, nig_int_mask_addr, 0);
2509
2510                         bnx2x_link_attn(bp);
2511
2512                         /* handle unicore attn? */
2513                 }
2514                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2515                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2516
2517                 if (asserted & GPIO_2_FUNC)
2518                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2519
2520                 if (asserted & GPIO_3_FUNC)
2521                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2522
2523                 if (asserted & GPIO_4_FUNC)
2524                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2525
2526                 if (port == 0) {
2527                         if (asserted & ATTN_GENERAL_ATTN_1) {
2528                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2529                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2530                         }
2531                         if (asserted & ATTN_GENERAL_ATTN_2) {
2532                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2533                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2534                         }
2535                         if (asserted & ATTN_GENERAL_ATTN_3) {
2536                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2537                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2538                         }
2539                 } else {
2540                         if (asserted & ATTN_GENERAL_ATTN_4) {
2541                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2542                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2543                         }
2544                         if (asserted & ATTN_GENERAL_ATTN_5) {
2545                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2546                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2547                         }
2548                         if (asserted & ATTN_GENERAL_ATTN_6) {
2549                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2550                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2551                         }
2552                 }
2553
2554         } /* if hardwired */
2555
2556         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2557            asserted, hc_addr);
2558         REG_WR(bp, hc_addr, asserted);
2559
2560         /* now set back the mask */
2561         if (asserted & ATTN_NIG_FOR_FUNC) {
2562                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2563                 bnx2x_release_phy_lock(bp);
2564         }
2565 }
2566
2567 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2568 {
2569         int port = BP_PORT(bp);
2570         int reg_offset;
2571         u32 val;
2572
2573         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2574                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2575
2576         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2577
2578                 val = REG_RD(bp, reg_offset);
2579                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2580                 REG_WR(bp, reg_offset, val);
2581
2582                 BNX2X_ERR("SPIO5 hw attention\n");
2583
2584                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2585                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2586                         /* Fan failure attention */
2587
2588                         /* The PHY reset is controlled by GPIO 1 */
2589                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2590                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2591                         /* Low power mode is controlled by GPIO 2 */
2592                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2593                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2594                         /* mark the failure */
2595                         bp->link_params.ext_phy_config &=
2596                                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2597                         bp->link_params.ext_phy_config |=
2598                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2599                         SHMEM_WR(bp,
2600                                  dev_info.port_hw_config[port].
2601                                                         external_phy_config,
2602                                  bp->link_params.ext_phy_config);
2603                         /* log the failure */
2604                         printk(KERN_ERR PFX "Fan Failure on Network"
2605                                " Controller %s has caused the driver to"
2606                                " shutdown the card to prevent permanent"
2607                                " damage.  Please contact Dell Support for"
2608                                " assistance\n", bp->dev->name);
2609                         break;
2610
2611                 default:
2612                         break;
2613                 }
2614         }
2615
2616         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2617
2618                 val = REG_RD(bp, reg_offset);
2619                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2620                 REG_WR(bp, reg_offset, val);
2621
2622                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2623                           (attn & HW_INTERRUT_ASSERT_SET_0));
2624                 bnx2x_panic();
2625         }
2626 }
2627
2628 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2629 {
2630         u32 val;
2631
2632         if (attn & BNX2X_DOORQ_ASSERT) {
2633
2634                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2635                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2636                 /* DORQ discard attention */
2637                 if (val & 0x2)
2638                         BNX2X_ERR("FATAL error from DORQ\n");
2639         }
2640
2641         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2642
2643                 int port = BP_PORT(bp);
2644                 int reg_offset;
2645
2646                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2647                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2648
2649                 val = REG_RD(bp, reg_offset);
2650                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2651                 REG_WR(bp, reg_offset, val);
2652
2653                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2654                           (attn & HW_INTERRUT_ASSERT_SET_1));
2655                 bnx2x_panic();
2656         }
2657 }
2658
2659 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2660 {
2661         u32 val;
2662
2663         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2664
2665                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2666                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2667                 /* CFC error attention */
2668                 if (val & 0x2)
2669                         BNX2X_ERR("FATAL error from CFC\n");
2670         }
2671
2672         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2673
2674                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2675                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2676                 /* RQ_USDMDP_FIFO_OVERFLOW */
2677                 if (val & 0x18000)
2678                         BNX2X_ERR("FATAL error from PXP\n");
2679         }
2680
2681         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2682
2683                 int port = BP_PORT(bp);
2684                 int reg_offset;
2685
2686                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2687                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2688
2689                 val = REG_RD(bp, reg_offset);
2690                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2691                 REG_WR(bp, reg_offset, val);
2692
2693                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2694                           (attn & HW_INTERRUT_ASSERT_SET_2));
2695                 bnx2x_panic();
2696         }
2697 }
2698
2699 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2700 {
2701         u32 val;
2702
2703         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2704
2705                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2706                         int func = BP_FUNC(bp);
2707
2708                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2709                         bnx2x__link_status_update(bp);
2710                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2711                                                         DRV_STATUS_PMF)
2712                                 bnx2x_pmf_update(bp);
2713
2714                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2715
2716                         BNX2X_ERR("MC assert!\n");
2717                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2718                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2719                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2720                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2721                         bnx2x_panic();
2722
2723                 } else if (attn & BNX2X_MCP_ASSERT) {
2724
2725                         BNX2X_ERR("MCP assert!\n");
2726                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2727                         bnx2x_fw_dump(bp);
2728
2729                 } else
2730                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2731         }
2732
2733         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2734                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2735                 if (attn & BNX2X_GRC_TIMEOUT) {
2736                         val = CHIP_IS_E1H(bp) ?
2737                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2738                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2739                 }
2740                 if (attn & BNX2X_GRC_RSV) {
2741                         val = CHIP_IS_E1H(bp) ?
2742                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2743                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2744                 }
2745                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2746         }
2747 }
2748
2749 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2750 {
2751         struct attn_route attn;
2752         struct attn_route group_mask;
2753         int port = BP_PORT(bp);
2754         int index;
2755         u32 reg_addr;
2756         u32 val;
2757         u32 aeu_mask;
2758
2759         /* need to take HW lock because MCP or other port might also
2760            try to handle this event */
2761         bnx2x_acquire_alr(bp);
2762
2763         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2764         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2765         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2766         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2767         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2768            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2769
2770         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2771                 if (deasserted & (1 << index)) {
2772                         group_mask = bp->attn_group[index];
2773
2774                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2775                            index, group_mask.sig[0], group_mask.sig[1],
2776                            group_mask.sig[2], group_mask.sig[3]);
2777
2778                         bnx2x_attn_int_deasserted3(bp,
2779                                         attn.sig[3] & group_mask.sig[3]);
2780                         bnx2x_attn_int_deasserted1(bp,
2781                                         attn.sig[1] & group_mask.sig[1]);
2782                         bnx2x_attn_int_deasserted2(bp,
2783                                         attn.sig[2] & group_mask.sig[2]);
2784                         bnx2x_attn_int_deasserted0(bp,
2785                                         attn.sig[0] & group_mask.sig[0]);
2786
2787                         if ((attn.sig[0] & group_mask.sig[0] &
2788                                                 HW_PRTY_ASSERT_SET_0) ||
2789                             (attn.sig[1] & group_mask.sig[1] &
2790                                                 HW_PRTY_ASSERT_SET_1) ||
2791                             (attn.sig[2] & group_mask.sig[2] &
2792                                                 HW_PRTY_ASSERT_SET_2))
2793                                 BNX2X_ERR("FATAL HW block parity attention\n");
2794                 }
2795         }
2796
2797         bnx2x_release_alr(bp);
2798
2799         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2800
2801         val = ~deasserted;
2802         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2803            val, reg_addr);
2804         REG_WR(bp, reg_addr, val);
2805
2806         if (~bp->attn_state & deasserted)
2807                 BNX2X_ERR("IGU ERROR\n");
2808
2809         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2810                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2811
2812         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2813         aeu_mask = REG_RD(bp, reg_addr);
2814
2815         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2816            aeu_mask, deasserted);
2817         aeu_mask |= (deasserted & 0xff);
2818         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2819
2820         REG_WR(bp, reg_addr, aeu_mask);
2821         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2822
2823         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2824         bp->attn_state &= ~deasserted;
2825         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2826 }
2827
2828 static void bnx2x_attn_int(struct bnx2x *bp)
2829 {
2830         /* read local copy of bits */
2831         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2832                                                                 attn_bits);
2833         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2834                                                                 attn_bits_ack);
2835         u32 attn_state = bp->attn_state;
2836
2837         /* look for changed bits */
2838         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2839         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2840
2841         DP(NETIF_MSG_HW,
2842            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2843            attn_bits, attn_ack, asserted, deasserted);
2844
2845         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2846                 BNX2X_ERR("BAD attention state\n");
2847
2848         /* handle bits that were raised */
2849         if (asserted)
2850                 bnx2x_attn_int_asserted(bp, asserted);
2851
2852         if (deasserted)
2853                 bnx2x_attn_int_deasserted(bp, deasserted);
2854 }
2855
2856 static void bnx2x_sp_task(struct work_struct *work)
2857 {
2858         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2859         u16 status;
2860
2861
2862         /* Return here if interrupt is disabled */
2863         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2864                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2865                 return;
2866         }
2867
2868         status = bnx2x_update_dsb_idx(bp);
2869 /*      if (status == 0)                                     */
2870 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2871
2872         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2873
2874         /* HW attentions */
2875         if (status & 0x1)
2876                 bnx2x_attn_int(bp);
2877
2878         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2879                      IGU_INT_NOP, 1);
2880         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2881                      IGU_INT_NOP, 1);
2882         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2883                      IGU_INT_NOP, 1);
2884         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2885                      IGU_INT_NOP, 1);
2886         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2887                      IGU_INT_ENABLE, 1);
2888
2889 }
2890
2891 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2892 {
2893         struct net_device *dev = dev_instance;
2894         struct bnx2x *bp = netdev_priv(dev);
2895
2896         /* Return here if interrupt is disabled */
2897         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2898                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2899                 return IRQ_HANDLED;
2900         }
2901
2902         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2903
2904 #ifdef BNX2X_STOP_ON_ERROR
2905         if (unlikely(bp->panic))
2906                 return IRQ_HANDLED;
2907 #endif
2908
2909         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2910
2911         return IRQ_HANDLED;
2912 }
2913
2914 /* end of slow path */
2915
2916 /* Statistics */
2917
2918 /****************************************************************************
2919 * Macros
2920 ****************************************************************************/
2921
2922 /* sum[hi:lo] += add[hi:lo] */
2923 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2924         do { \
2925                 s_lo += a_lo; \
2926                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2927         } while (0)
2928
2929 /* difference = minuend - subtrahend */
2930 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2931         do { \
2932                 if (m_lo < s_lo) { \
2933                         /* underflow */ \
2934                         d_hi = m_hi - s_hi; \
2935                         if (d_hi > 0) { \
2936                                 /* we can 'loan' 1 */ \
2937                                 d_hi--; \
2938                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2939                         } else { \
2940                                 /* m_hi <= s_hi */ \
2941                                 d_hi = 0; \
2942                                 d_lo = 0; \
2943                         } \
2944                 } else { \
2945                         /* m_lo >= s_lo */ \
2946                         if (m_hi < s_hi) { \
2947                                 d_hi = 0; \
2948                                 d_lo = 0; \
2949                         } else { \
2950                                 /* m_hi >= s_hi */ \
2951                                 d_hi = m_hi - s_hi; \
2952                                 d_lo = m_lo - s_lo; \
2953                         } \
2954                 } \
2955         } while (0)
2956
2957 #define UPDATE_STAT64(s, t) \
2958         do { \
2959                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2960                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2961                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2962                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2963                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2964                        pstats->mac_stx[1].t##_lo, diff.lo); \
2965         } while (0)
2966
2967 #define UPDATE_STAT64_NIG(s, t) \
2968         do { \
2969                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2970                         diff.lo, new->s##_lo, old->s##_lo); \
2971                 ADD_64(estats->t##_hi, diff.hi, \
2972                        estats->t##_lo, diff.lo); \
2973         } while (0)
2974
2975 /* sum[hi:lo] += add */
2976 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2977         do { \
2978                 s_lo += a; \
2979                 s_hi += (s_lo < a) ? 1 : 0; \
2980         } while (0)
2981
2982 #define UPDATE_EXTEND_STAT(s) \
2983         do { \
2984                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2985                               pstats->mac_stx[1].s##_lo, \
2986                               new->s); \
2987         } while (0)
2988
2989 #define UPDATE_EXTEND_TSTAT(s, t) \
2990         do { \
2991                 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2992                 old_tclient->s = le32_to_cpu(tclient->s); \
2993                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
2994         } while (0)
2995
2996 #define UPDATE_EXTEND_USTAT(s, t) \
2997         do { \
2998                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
2999                 old_uclient->s = uclient->s; \
3000                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3001         } while (0)
3002
3003 #define UPDATE_EXTEND_XSTAT(s, t) \
3004         do { \
3005                 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
3006                 old_xclient->s = le32_to_cpu(xclient->s); \
3007                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3008         } while (0)
3009
3010 /* minuend -= subtrahend */
3011 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3012         do { \
3013                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3014         } while (0)
3015
3016 /* minuend[hi:lo] -= subtrahend */
3017 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3018         do { \
3019                 SUB_64(m_hi, 0, m_lo, s); \
3020         } while (0)
3021
3022 #define SUB_EXTEND_USTAT(s, t) \
3023         do { \
3024                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3025                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3026         } while (0)
3027
3028 /*
3029  * General service functions
3030  */
3031
3032 static inline long bnx2x_hilo(u32 *hiref)
3033 {
3034         u32 lo = *(hiref + 1);
3035 #if (BITS_PER_LONG == 64)
3036         u32 hi = *hiref;
3037
3038         return HILO_U64(hi, lo);
3039 #else
3040         return lo;
3041 #endif
3042 }
3043
3044 /*
3045  * Init service functions
3046  */
3047
3048 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3049 {
3050         if (!bp->stats_pending) {
3051                 struct eth_query_ramrod_data ramrod_data = {0};
3052                 int i, rc;
3053
3054                 ramrod_data.drv_counter = bp->stats_counter++;
3055                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3056                 for_each_queue(bp, i)
3057                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3058
3059                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3060                                    ((u32 *)&ramrod_data)[1],
3061                                    ((u32 *)&ramrod_data)[0], 0);
3062                 if (rc == 0) {
3063                         /* stats ramrod has it's own slot on the spq */
3064                         bp->spq_left++;
3065                         bp->stats_pending = 1;
3066                 }
3067         }
3068 }
3069
3070 static void bnx2x_stats_init(struct bnx2x *bp)
3071 {
3072         int port = BP_PORT(bp);
3073         int i;
3074
3075         bp->stats_pending = 0;
3076         bp->executer_idx = 0;
3077         bp->stats_counter = 0;
3078
3079         /* port stats */
3080         if (!BP_NOMCP(bp))
3081                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3082         else
3083                 bp->port.port_stx = 0;
3084         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3085
3086         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3087         bp->port.old_nig_stats.brb_discard =
3088                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3089         bp->port.old_nig_stats.brb_truncate =
3090                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3091         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3092                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3093         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3094                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3095
3096         /* function stats */
3097         for_each_queue(bp, i) {
3098                 struct bnx2x_fastpath *fp = &bp->fp[i];
3099
3100                 memset(&fp->old_tclient, 0,
3101                        sizeof(struct tstorm_per_client_stats));
3102                 memset(&fp->old_uclient, 0,
3103                        sizeof(struct ustorm_per_client_stats));
3104                 memset(&fp->old_xclient, 0,
3105                        sizeof(struct xstorm_per_client_stats));
3106                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3107         }
3108
3109         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3110         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3111
3112         bp->stats_state = STATS_STATE_DISABLED;
3113         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3114                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3115 }
3116
3117 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3118 {
3119         struct dmae_command *dmae = &bp->stats_dmae;
3120         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3121
3122         *stats_comp = DMAE_COMP_VAL;
3123         if (CHIP_REV_IS_SLOW(bp))
3124                 return;
3125
3126         /* loader */
3127         if (bp->executer_idx) {
3128                 int loader_idx = PMF_DMAE_C(bp);
3129
3130                 memset(dmae, 0, sizeof(struct dmae_command));
3131
3132                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3133                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3134                                 DMAE_CMD_DST_RESET |
3135 #ifdef __BIG_ENDIAN
3136                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3137 #else
3138                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3139 #endif
3140                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3141                                                DMAE_CMD_PORT_0) |
3142                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3143                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3144                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3145                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3146                                      sizeof(struct dmae_command) *
3147                                      (loader_idx + 1)) >> 2;
3148                 dmae->dst_addr_hi = 0;
3149                 dmae->len = sizeof(struct dmae_command) >> 2;
3150                 if (CHIP_IS_E1(bp))
3151                         dmae->len--;
3152                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3153                 dmae->comp_addr_hi = 0;
3154                 dmae->comp_val = 1;
3155
3156                 *stats_comp = 0;
3157                 bnx2x_post_dmae(bp, dmae, loader_idx);
3158
3159         } else if (bp->func_stx) {
3160                 *stats_comp = 0;
3161                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3162         }
3163 }
3164
3165 static int bnx2x_stats_comp(struct bnx2x *bp)
3166 {
3167         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3168         int cnt = 10;
3169
3170         might_sleep();
3171         while (*stats_comp != DMAE_COMP_VAL) {
3172                 if (!cnt) {
3173                         BNX2X_ERR("timeout waiting for stats finished\n");
3174                         break;
3175                 }
3176                 cnt--;
3177                 msleep(1);
3178         }
3179         return 1;
3180 }
3181
3182 /*
3183  * Statistics service functions
3184  */
3185
3186 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3187 {
3188         struct dmae_command *dmae;
3189         u32 opcode;
3190         int loader_idx = PMF_DMAE_C(bp);
3191         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3192
3193         /* sanity */
3194         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3195                 BNX2X_ERR("BUG!\n");
3196                 return;
3197         }
3198
3199         bp->executer_idx = 0;
3200
3201         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3202                   DMAE_CMD_C_ENABLE |
3203                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3204 #ifdef __BIG_ENDIAN
3205                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3206 #else
3207                   DMAE_CMD_ENDIANITY_DW_SWAP |
3208 #endif
3209                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3210                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3211
3212         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3213         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3214         dmae->src_addr_lo = bp->port.port_stx >> 2;
3215         dmae->src_addr_hi = 0;
3216         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3217         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3218         dmae->len = DMAE_LEN32_RD_MAX;
3219         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3220         dmae->comp_addr_hi = 0;
3221         dmae->comp_val = 1;
3222
3223         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3224         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3225         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3226         dmae->src_addr_hi = 0;
3227         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3228                                    DMAE_LEN32_RD_MAX * 4);
3229         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3230                                    DMAE_LEN32_RD_MAX * 4);
3231         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3232         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3233         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3234         dmae->comp_val = DMAE_COMP_VAL;
3235
3236         *stats_comp = 0;
3237         bnx2x_hw_stats_post(bp);
3238         bnx2x_stats_comp(bp);
3239 }
3240
3241 static void bnx2x_port_stats_init(struct bnx2x *bp)
3242 {
3243         struct dmae_command *dmae;
3244         int port = BP_PORT(bp);
3245         int vn = BP_E1HVN(bp);
3246         u32 opcode;
3247         int loader_idx = PMF_DMAE_C(bp);
3248         u32 mac_addr;
3249         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3250
3251         /* sanity */
3252         if (!bp->link_vars.link_up || !bp->port.pmf) {
3253                 BNX2X_ERR("BUG!\n");
3254                 return;
3255         }
3256
3257         bp->executer_idx = 0;
3258
3259         /* MCP */
3260         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3261                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3262                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3263 #ifdef __BIG_ENDIAN
3264                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3265 #else
3266                   DMAE_CMD_ENDIANITY_DW_SWAP |
3267 #endif
3268                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3269                   (vn << DMAE_CMD_E1HVN_SHIFT));
3270
3271         if (bp->port.port_stx) {
3272
3273                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3274                 dmae->opcode = opcode;
3275                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3276                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3277                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3278                 dmae->dst_addr_hi = 0;
3279                 dmae->len = sizeof(struct host_port_stats) >> 2;
3280                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3281                 dmae->comp_addr_hi = 0;
3282                 dmae->comp_val = 1;
3283         }
3284
3285         if (bp->func_stx) {
3286
3287                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3288                 dmae->opcode = opcode;
3289                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3290                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3291                 dmae->dst_addr_lo = bp->func_stx >> 2;
3292                 dmae->dst_addr_hi = 0;
3293                 dmae->len = sizeof(struct host_func_stats) >> 2;
3294                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3295                 dmae->comp_addr_hi = 0;
3296                 dmae->comp_val = 1;
3297         }
3298
3299         /* MAC */
3300         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3301                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3302                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3303 #ifdef __BIG_ENDIAN
3304                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3305 #else
3306                   DMAE_CMD_ENDIANITY_DW_SWAP |
3307 #endif
3308                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3309                   (vn << DMAE_CMD_E1HVN_SHIFT));
3310
3311         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3312
3313                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3314                                    NIG_REG_INGRESS_BMAC0_MEM);
3315
3316                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3317                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3318                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3319                 dmae->opcode = opcode;
3320                 dmae->src_addr_lo = (mac_addr +
3321                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3322                 dmae->src_addr_hi = 0;
3323                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3324                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3325                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3326                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3327                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3328                 dmae->comp_addr_hi = 0;
3329                 dmae->comp_val = 1;
3330
3331                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3332                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3333                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3334                 dmae->opcode = opcode;
3335                 dmae->src_addr_lo = (mac_addr +
3336                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3337                 dmae->src_addr_hi = 0;
3338                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3339                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3340                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3341                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3342                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3343                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3344                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3345                 dmae->comp_addr_hi = 0;
3346                 dmae->comp_val = 1;
3347
3348         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3349
3350                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3351
3352                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3353                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3354                 dmae->opcode = opcode;
3355                 dmae->src_addr_lo = (mac_addr +
3356                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3357                 dmae->src_addr_hi = 0;
3358                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3359                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3360                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3361                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3362                 dmae->comp_addr_hi = 0;
3363                 dmae->comp_val = 1;
3364
3365                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3366                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3367                 dmae->opcode = opcode;
3368                 dmae->src_addr_lo = (mac_addr +
3369                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3370                 dmae->src_addr_hi = 0;
3371                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3372                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3373                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3374                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3375                 dmae->len = 1;
3376                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3377                 dmae->comp_addr_hi = 0;
3378                 dmae->comp_val = 1;
3379
3380                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3381                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3382                 dmae->opcode = opcode;
3383                 dmae->src_addr_lo = (mac_addr +
3384                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3385                 dmae->src_addr_hi = 0;
3386                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3387                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3388                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3389                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3390                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3391                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3392                 dmae->comp_addr_hi = 0;
3393                 dmae->comp_val = 1;
3394         }
3395
3396         /* NIG */
3397         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3398         dmae->opcode = opcode;
3399         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3400                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3401         dmae->src_addr_hi = 0;
3402         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3403         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3404         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3405         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3406         dmae->comp_addr_hi = 0;
3407         dmae->comp_val = 1;
3408
3409         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3410         dmae->opcode = opcode;
3411         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3412                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3413         dmae->src_addr_hi = 0;
3414         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3415                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3416         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3417                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3418         dmae->len = (2*sizeof(u32)) >> 2;
3419         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3420         dmae->comp_addr_hi = 0;
3421         dmae->comp_val = 1;
3422
3423         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3424         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3425                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3426                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3427 #ifdef __BIG_ENDIAN
3428                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3429 #else
3430                         DMAE_CMD_ENDIANITY_DW_SWAP |
3431 #endif
3432                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3433                         (vn << DMAE_CMD_E1HVN_SHIFT));
3434         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3435                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3436         dmae->src_addr_hi = 0;
3437         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3438                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3439         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3440                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3441         dmae->len = (2*sizeof(u32)) >> 2;
3442         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3443         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3444         dmae->comp_val = DMAE_COMP_VAL;
3445
3446         *stats_comp = 0;
3447 }
3448
3449 static void bnx2x_func_stats_init(struct bnx2x *bp)
3450 {
3451         struct dmae_command *dmae = &bp->stats_dmae;
3452         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3453
3454         /* sanity */
3455         if (!bp->func_stx) {
3456                 BNX2X_ERR("BUG!\n");
3457                 return;
3458         }
3459
3460         bp->executer_idx = 0;
3461         memset(dmae, 0, sizeof(struct dmae_command));
3462
3463         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3464                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3465                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3466 #ifdef __BIG_ENDIAN
3467                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3468 #else
3469                         DMAE_CMD_ENDIANITY_DW_SWAP |
3470 #endif
3471                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3472                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3473         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3474         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3475         dmae->dst_addr_lo = bp->func_stx >> 2;
3476         dmae->dst_addr_hi = 0;
3477         dmae->len = sizeof(struct host_func_stats) >> 2;
3478         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3479         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3480         dmae->comp_val = DMAE_COMP_VAL;
3481
3482         *stats_comp = 0;
3483 }
3484
3485 static void bnx2x_stats_start(struct bnx2x *bp)
3486 {
3487         if (bp->port.pmf)
3488                 bnx2x_port_stats_init(bp);
3489
3490         else if (bp->func_stx)
3491                 bnx2x_func_stats_init(bp);
3492
3493         bnx2x_hw_stats_post(bp);
3494         bnx2x_storm_stats_post(bp);
3495 }
3496
3497 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3498 {
3499         bnx2x_stats_comp(bp);
3500         bnx2x_stats_pmf_update(bp);
3501         bnx2x_stats_start(bp);
3502 }
3503
3504 static void bnx2x_stats_restart(struct bnx2x *bp)
3505 {
3506         bnx2x_stats_comp(bp);
3507         bnx2x_stats_start(bp);
3508 }
3509
3510 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3511 {
3512         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3513         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3514         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3515         struct regpair diff;
3516
3517         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3518         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3519         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3520         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3521         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3522         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3523         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3524         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3525         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3526         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3527         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3528         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3529         UPDATE_STAT64(tx_stat_gt127,
3530                                 tx_stat_etherstatspkts65octetsto127octets);
3531         UPDATE_STAT64(tx_stat_gt255,
3532                                 tx_stat_etherstatspkts128octetsto255octets);
3533         UPDATE_STAT64(tx_stat_gt511,
3534                                 tx_stat_etherstatspkts256octetsto511octets);
3535         UPDATE_STAT64(tx_stat_gt1023,
3536                                 tx_stat_etherstatspkts512octetsto1023octets);
3537         UPDATE_STAT64(tx_stat_gt1518,
3538                                 tx_stat_etherstatspkts1024octetsto1522octets);
3539         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3540         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3541         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3542         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3543         UPDATE_STAT64(tx_stat_gterr,
3544                                 tx_stat_dot3statsinternalmactransmiterrors);
3545         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3546
3547         estats->pause_frames_received_hi =
3548                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3549         estats->pause_frames_received_lo =
3550                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3551
3552         estats->pause_frames_sent_hi =
3553                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3554         estats->pause_frames_sent_lo =
3555                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3556 }
3557
3558 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3559 {
3560         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3561         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3562         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3563
3564         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3565         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3566         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3567         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3568         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3569         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3570         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3571         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3572         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3573         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3574         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3575         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3576         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3577         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3578         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3579         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3580         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3581         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3582         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3583         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3584         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3585         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3586         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3587         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3588         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3589         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3590         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3591         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3592         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3593         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3594         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3595
3596         estats->pause_frames_received_hi =
3597                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3598         estats->pause_frames_received_lo =
3599                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3600         ADD_64(estats->pause_frames_received_hi,
3601                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3602                estats->pause_frames_received_lo,
3603                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3604
3605         estats->pause_frames_sent_hi =
3606                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
3607         estats->pause_frames_sent_lo =
3608                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
3609         ADD_64(estats->pause_frames_sent_hi,
3610                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3611                estats->pause_frames_sent_lo,
3612                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3613 }
3614
3615 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3616 {
3617         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3618         struct nig_stats *old = &(bp->port.old_nig_stats);
3619         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3620         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3621         struct regpair diff;
3622         u32 nig_timer_max;
3623
3624         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3625                 bnx2x_bmac_stats_update(bp);
3626
3627         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3628                 bnx2x_emac_stats_update(bp);
3629
3630         else { /* unreached */
3631                 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3632                 return -1;
3633         }
3634
3635         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3636                       new->brb_discard - old->brb_discard);
3637         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3638                       new->brb_truncate - old->brb_truncate);
3639
3640         UPDATE_STAT64_NIG(egress_mac_pkt0,
3641                                         etherstatspkts1024octetsto1522octets);
3642         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3643
3644         memcpy(old, new, sizeof(struct nig_stats));
3645
3646         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3647                sizeof(struct mac_stx));
3648         estats->brb_drop_hi = pstats->brb_drop_hi;
3649         estats->brb_drop_lo = pstats->brb_drop_lo;
3650
3651         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3652
3653         nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3654         if (nig_timer_max != estats->nig_timer_max) {
3655                 estats->nig_timer_max = nig_timer_max;
3656                 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3657         }
3658
3659         return 0;
3660 }
3661
3662 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3663 {
3664         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3665         struct tstorm_per_port_stats *tport =
3666                                         &stats->tstorm_common.port_statistics;
3667         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3668         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3669         int i;
3670
3671         memset(&(fstats->total_bytes_received_hi), 0,
3672                sizeof(struct host_func_stats) - 2*sizeof(u32));
3673         estats->error_bytes_received_hi = 0;
3674         estats->error_bytes_received_lo = 0;
3675         estats->etherstatsoverrsizepkts_hi = 0;
3676         estats->etherstatsoverrsizepkts_lo = 0;
3677         estats->no_buff_discard_hi = 0;
3678         estats->no_buff_discard_lo = 0;
3679
3680         for_each_queue(bp, i) {
3681                 struct bnx2x_fastpath *fp = &bp->fp[i];
3682                 int cl_id = fp->cl_id;
3683                 struct tstorm_per_client_stats *tclient =
3684                                 &stats->tstorm_common.client_statistics[cl_id];
3685                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3686                 struct ustorm_per_client_stats *uclient =
3687                                 &stats->ustorm_common.client_statistics[cl_id];
3688                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3689                 struct xstorm_per_client_stats *xclient =
3690                                 &stats->xstorm_common.client_statistics[cl_id];
3691                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3692                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3693                 u32 diff;
3694
3695                 /* are storm stats valid? */
3696                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3697                                                         bp->stats_counter) {
3698                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3699                            "  xstorm counter (%d) != stats_counter (%d)\n",
3700                            i, xclient->stats_counter, bp->stats_counter);
3701                         return -1;
3702                 }
3703                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3704                                                         bp->stats_counter) {
3705                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3706                            "  tstorm counter (%d) != stats_counter (%d)\n",
3707                            i, tclient->stats_counter, bp->stats_counter);
3708                         return -2;
3709                 }
3710                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3711                                                         bp->stats_counter) {
3712                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3713                            "  ustorm counter (%d) != stats_counter (%d)\n",
3714                            i, uclient->stats_counter, bp->stats_counter);
3715                         return -4;
3716                 }
3717
3718                 qstats->total_bytes_received_hi =
3719                 qstats->valid_bytes_received_hi =
3720                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3721                 qstats->total_bytes_received_lo =
3722                 qstats->valid_bytes_received_lo =
3723                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3724
3725                 qstats->error_bytes_received_hi =
3726                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3727                 qstats->error_bytes_received_lo =
3728                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3729
3730                 ADD_64(qstats->total_bytes_received_hi,
3731                        qstats->error_bytes_received_hi,
3732                        qstats->total_bytes_received_lo,
3733                        qstats->error_bytes_received_lo);
3734
3735                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3736                                         total_unicast_packets_received);
3737                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3738                                         total_multicast_packets_received);
3739                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3740                                         total_broadcast_packets_received);
3741                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3742                                         etherstatsoverrsizepkts);
3743                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3744
3745                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3746                                         total_unicast_packets_received);
3747                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3748                                         total_multicast_packets_received);
3749                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3750                                         total_broadcast_packets_received);
3751                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3752                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3753                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3754
3755                 qstats->total_bytes_transmitted_hi =
3756                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3757                 qstats->total_bytes_transmitted_lo =
3758                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3759
3760                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3761                                         total_unicast_packets_transmitted);
3762                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3763                                         total_multicast_packets_transmitted);
3764                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3765                                         total_broadcast_packets_transmitted);
3766
3767                 old_tclient->checksum_discard = tclient->checksum_discard;
3768                 old_tclient->ttl0_discard = tclient->ttl0_discard;
3769
3770                 ADD_64(fstats->total_bytes_received_hi,
3771                        qstats->total_bytes_received_hi,
3772                        fstats->total_bytes_received_lo,
3773                        qstats->total_bytes_received_lo);
3774                 ADD_64(fstats->total_bytes_transmitted_hi,
3775                        qstats->total_bytes_transmitted_hi,
3776                        fstats->total_bytes_transmitted_lo,
3777                        qstats->total_bytes_transmitted_lo);
3778                 ADD_64(fstats->total_unicast_packets_received_hi,
3779                        qstats->total_unicast_packets_received_hi,
3780                        fstats->total_unicast_packets_received_lo,
3781                        qstats->total_unicast_packets_received_lo);
3782                 ADD_64(fstats->total_multicast_packets_received_hi,
3783                        qstats->total_multicast_packets_received_hi,
3784                        fstats->total_multicast_packets_received_lo,
3785                        qstats->total_multicast_packets_received_lo);
3786                 ADD_64(fstats->total_broadcast_packets_received_hi,
3787                        qstats->total_broadcast_packets_received_hi,
3788                        fstats->total_broadcast_packets_received_lo,
3789                        qstats->total_broadcast_packets_received_lo);
3790                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3791                        qstats->total_unicast_packets_transmitted_hi,
3792                        fstats->total_unicast_packets_transmitted_lo,
3793                        qstats->total_unicast_packets_transmitted_lo);
3794                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3795                        qstats->total_multicast_packets_transmitted_hi,
3796                        fstats->total_multicast_packets_transmitted_lo,
3797                        qstats->total_multicast_packets_transmitted_lo);
3798                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3799                        qstats->total_broadcast_packets_transmitted_hi,
3800                        fstats->total_broadcast_packets_transmitted_lo,
3801                        qstats->total_broadcast_packets_transmitted_lo);
3802                 ADD_64(fstats->valid_bytes_received_hi,
3803                        qstats->valid_bytes_received_hi,
3804                        fstats->valid_bytes_received_lo,
3805                        qstats->valid_bytes_received_lo);
3806
3807                 ADD_64(estats->error_bytes_received_hi,
3808                        qstats->error_bytes_received_hi,
3809                        estats->error_bytes_received_lo,
3810                        qstats->error_bytes_received_lo);
3811                 ADD_64(estats->etherstatsoverrsizepkts_hi,
3812                        qstats->etherstatsoverrsizepkts_hi,
3813                        estats->etherstatsoverrsizepkts_lo,
3814                        qstats->etherstatsoverrsizepkts_lo);
3815                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3816                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3817         }
3818
3819         ADD_64(fstats->total_bytes_received_hi,
3820                estats->rx_stat_ifhcinbadoctets_hi,
3821                fstats->total_bytes_received_lo,
3822                estats->rx_stat_ifhcinbadoctets_lo);
3823
3824         memcpy(estats, &(fstats->total_bytes_received_hi),
3825                sizeof(struct host_func_stats) - 2*sizeof(u32));
3826
3827         ADD_64(estats->etherstatsoverrsizepkts_hi,
3828                estats->rx_stat_dot3statsframestoolong_hi,
3829                estats->etherstatsoverrsizepkts_lo,
3830                estats->rx_stat_dot3statsframestoolong_lo);
3831         ADD_64(estats->error_bytes_received_hi,
3832                estats->rx_stat_ifhcinbadoctets_hi,
3833                estats->error_bytes_received_lo,
3834                estats->rx_stat_ifhcinbadoctets_lo);
3835
3836         if (bp->port.pmf) {
3837                 estats->mac_filter_discard =
3838                                 le32_to_cpu(tport->mac_filter_discard);
3839                 estats->xxoverflow_discard =
3840                                 le32_to_cpu(tport->xxoverflow_discard);
3841                 estats->brb_truncate_discard =
3842                                 le32_to_cpu(tport->brb_truncate_discard);
3843                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3844         }
3845
3846         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3847
3848         bp->stats_pending = 0;
3849
3850         return 0;
3851 }
3852
3853 static void bnx2x_net_stats_update(struct bnx2x *bp)
3854 {
3855         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3856         struct net_device_stats *nstats = &bp->dev->stats;
3857         int i;
3858
3859         nstats->rx_packets =
3860                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3861                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3862                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3863
3864         nstats->tx_packets =
3865                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3866                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3867                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3868
3869         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
3870
3871         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3872
3873         nstats->rx_dropped = estats->mac_discard;
3874         for_each_queue(bp, i)
3875                 nstats->rx_dropped +=
3876                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3877
3878         nstats->tx_dropped = 0;
3879
3880         nstats->multicast =
3881                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
3882
3883         nstats->collisions =
3884                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
3885
3886         nstats->rx_length_errors =
3887                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3888                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3889         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3890                                  bnx2x_hilo(&estats->brb_truncate_hi);
3891         nstats->rx_crc_errors =
3892                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3893         nstats->rx_frame_errors =
3894                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3895         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
3896         nstats->rx_missed_errors = estats->xxoverflow_discard;
3897
3898         nstats->rx_errors = nstats->rx_length_errors +
3899                             nstats->rx_over_errors +
3900                             nstats->rx_crc_errors +
3901                             nstats->rx_frame_errors +
3902                             nstats->rx_fifo_errors +
3903                             nstats->rx_missed_errors;
3904
3905         nstats->tx_aborted_errors =
3906                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3907                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3908         nstats->tx_carrier_errors =
3909                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
3910         nstats->tx_fifo_errors = 0;
3911         nstats->tx_heartbeat_errors = 0;
3912         nstats->tx_window_errors = 0;
3913
3914         nstats->tx_errors = nstats->tx_aborted_errors +
3915                             nstats->tx_carrier_errors +
3916             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3917 }
3918
3919 static void bnx2x_drv_stats_update(struct bnx2x *bp)
3920 {
3921         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3922         int i;
3923
3924         estats->driver_xoff = 0;
3925         estats->rx_err_discard_pkt = 0;
3926         estats->rx_skb_alloc_failed = 0;
3927         estats->hw_csum_err = 0;
3928         for_each_queue(bp, i) {
3929                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3930
3931                 estats->driver_xoff += qstats->driver_xoff;
3932                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3933                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3934                 estats->hw_csum_err += qstats->hw_csum_err;
3935         }
3936 }
3937
3938 static void bnx2x_stats_update(struct bnx2x *bp)
3939 {
3940         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3941
3942         if (*stats_comp != DMAE_COMP_VAL)
3943                 return;
3944
3945         if (bp->port.pmf)
3946                 bnx2x_hw_stats_update(bp);
3947
3948         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3949                 BNX2X_ERR("storm stats were not updated for 3 times\n");
3950                 bnx2x_panic();
3951                 return;
3952         }
3953
3954         bnx2x_net_stats_update(bp);
3955         bnx2x_drv_stats_update(bp);
3956
3957         if (bp->msglevel & NETIF_MSG_TIMER) {
3958                 struct tstorm_per_client_stats *old_tclient =
3959                                                         &bp->fp->old_tclient;
3960                 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
3961                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3962                 struct net_device_stats *nstats = &bp->dev->stats;
3963                 int i;
3964
3965                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3966                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
3967                                   "  tx pkt (%lx)\n",
3968                        bnx2x_tx_avail(bp->fp),
3969                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3970                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
3971                                   "  rx pkt (%lx)\n",
3972                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3973                              bp->fp->rx_comp_cons),
3974                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3975                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u  "
3976                                   "brb truncate %u\n",
3977                        (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
3978                        qstats->driver_xoff,
3979                        estats->brb_drop_lo, estats->brb_truncate_lo);
3980                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
3981                         "packets_too_big_discard %lu  no_buff_discard %lu  "
3982                         "mac_discard %u  mac_filter_discard %u  "
3983                         "xxovrflow_discard %u  brb_truncate_discard %u  "
3984                         "ttl0_discard %u\n",
3985                        old_tclient->checksum_discard,
3986                        bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
3987                        bnx2x_hilo(&qstats->no_buff_discard_hi),
3988                        estats->mac_discard, estats->mac_filter_discard,
3989                        estats->xxoverflow_discard, estats->brb_truncate_discard,
3990                        old_tclient->ttl0_discard);
3991
3992                 for_each_queue(bp, i) {
3993                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3994                                bnx2x_fp(bp, i, tx_pkt),
3995                                bnx2x_fp(bp, i, rx_pkt),
3996                                bnx2x_fp(bp, i, rx_calls));
3997                 }
3998         }
3999
4000         bnx2x_hw_stats_post(bp);
4001         bnx2x_storm_stats_post(bp);
4002 }
4003
4004 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4005 {
4006         struct dmae_command *dmae;
4007         u32 opcode;
4008         int loader_idx = PMF_DMAE_C(bp);
4009         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4010
4011         bp->executer_idx = 0;
4012
4013         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4014                   DMAE_CMD_C_ENABLE |
4015                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4016 #ifdef __BIG_ENDIAN
4017                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4018 #else
4019                   DMAE_CMD_ENDIANITY_DW_SWAP |
4020 #endif
4021                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4022                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4023
4024         if (bp->port.port_stx) {
4025
4026                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4027                 if (bp->func_stx)
4028                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4029                 else
4030                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4031                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4032                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4033                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4034                 dmae->dst_addr_hi = 0;
4035                 dmae->len = sizeof(struct host_port_stats) >> 2;
4036                 if (bp->func_stx) {
4037                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4038                         dmae->comp_addr_hi = 0;
4039                         dmae->comp_val = 1;
4040                 } else {
4041                         dmae->comp_addr_lo =
4042                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4043                         dmae->comp_addr_hi =
4044                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4045                         dmae->comp_val = DMAE_COMP_VAL;
4046
4047                         *stats_comp = 0;
4048                 }
4049         }
4050
4051         if (bp->func_stx) {
4052
4053                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4054                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4055                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4056                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4057                 dmae->dst_addr_lo = bp->func_stx >> 2;
4058                 dmae->dst_addr_hi = 0;
4059                 dmae->len = sizeof(struct host_func_stats) >> 2;
4060                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4061                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4062                 dmae->comp_val = DMAE_COMP_VAL;
4063
4064                 *stats_comp = 0;
4065         }
4066 }
4067
4068 static void bnx2x_stats_stop(struct bnx2x *bp)
4069 {
4070         int update = 0;
4071
4072         bnx2x_stats_comp(bp);
4073
4074         if (bp->port.pmf)
4075                 update = (bnx2x_hw_stats_update(bp) == 0);
4076
4077         update |= (bnx2x_storm_stats_update(bp) == 0);
4078
4079         if (update) {
4080                 bnx2x_net_stats_update(bp);
4081
4082                 if (bp->port.pmf)
4083                         bnx2x_port_stats_stop(bp);
4084
4085                 bnx2x_hw_stats_post(bp);
4086                 bnx2x_stats_comp(bp);
4087         }
4088 }
4089
4090 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4091 {
4092 }
4093
4094 static const struct {
4095         void (*action)(struct bnx2x *bp);
4096         enum bnx2x_stats_state next_state;
4097 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4098 /* state        event   */
4099 {
4100 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4101 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4102 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4103 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4104 },
4105 {
4106 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4107 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4108 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4109 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4110 }
4111 };
4112
4113 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4114 {
4115         enum bnx2x_stats_state state = bp->stats_state;
4116
4117         bnx2x_stats_stm[state][event].action(bp);
4118         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4119
4120         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4121                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4122                    state, event, bp->stats_state);
4123 }
4124
4125 static void bnx2x_timer(unsigned long data)
4126 {
4127         struct bnx2x *bp = (struct bnx2x *) data;
4128
4129         if (!netif_running(bp->dev))
4130                 return;
4131
4132         if (atomic_read(&bp->intr_sem) != 0)
4133                 goto timer_restart;
4134
4135         if (poll) {
4136                 struct bnx2x_fastpath *fp = &bp->fp[0];
4137                 int rc;
4138
4139                 bnx2x_tx_int(fp, 1000);
4140                 rc = bnx2x_rx_int(fp, 1000);
4141         }
4142
4143         if (!BP_NOMCP(bp)) {
4144                 int func = BP_FUNC(bp);
4145                 u32 drv_pulse;
4146                 u32 mcp_pulse;
4147
4148                 ++bp->fw_drv_pulse_wr_seq;
4149                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4150                 /* TBD - add SYSTEM_TIME */
4151                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4152                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4153
4154                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4155                              MCP_PULSE_SEQ_MASK);
4156                 /* The delta between driver pulse and mcp response
4157                  * should be 1 (before mcp response) or 0 (after mcp response)
4158                  */
4159                 if ((drv_pulse != mcp_pulse) &&
4160                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4161                         /* someone lost a heartbeat... */
4162                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4163                                   drv_pulse, mcp_pulse);
4164                 }
4165         }
4166
4167         if ((bp->state == BNX2X_STATE_OPEN) ||
4168             (bp->state == BNX2X_STATE_DISABLED))
4169                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4170
4171 timer_restart:
4172         mod_timer(&bp->timer, jiffies + bp->current_interval);
4173 }
4174
4175 /* end of Statistics */
4176
4177 /* nic init */
4178
4179 /*
4180  * nic init service functions
4181  */
4182
4183 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4184 {
4185         int port = BP_PORT(bp);
4186
4187         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4188                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4189                         sizeof(struct ustorm_status_block)/4);
4190         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4191                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4192                         sizeof(struct cstorm_status_block)/4);
4193 }
4194
4195 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4196                           dma_addr_t mapping, int sb_id)
4197 {
4198         int port = BP_PORT(bp);
4199         int func = BP_FUNC(bp);
4200         int index;
4201         u64 section;
4202
4203         /* USTORM */
4204         section = ((u64)mapping) + offsetof(struct host_status_block,
4205                                             u_status_block);
4206         sb->u_status_block.status_block_id = sb_id;
4207
4208         REG_WR(bp, BAR_USTRORM_INTMEM +
4209                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4210         REG_WR(bp, BAR_USTRORM_INTMEM +
4211                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4212                U64_HI(section));
4213         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4214                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4215
4216         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4217                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4218                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4219
4220         /* CSTORM */
4221         section = ((u64)mapping) + offsetof(struct host_status_block,
4222                                             c_status_block);
4223         sb->c_status_block.status_block_id = sb_id;
4224
4225         REG_WR(bp, BAR_CSTRORM_INTMEM +
4226                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4227         REG_WR(bp, BAR_CSTRORM_INTMEM +
4228                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4229                U64_HI(section));
4230         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4231                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4232
4233         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4234                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4235                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4236
4237         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4238 }
4239
4240 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4241 {
4242         int func = BP_FUNC(bp);
4243
4244         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4245                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4246                         sizeof(struct ustorm_def_status_block)/4);
4247         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4248                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4249                         sizeof(struct cstorm_def_status_block)/4);
4250         bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4251                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4252                         sizeof(struct xstorm_def_status_block)/4);
4253         bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4254                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4255                         sizeof(struct tstorm_def_status_block)/4);
4256 }
4257
4258 static void bnx2x_init_def_sb(struct bnx2x *bp,
4259                               struct host_def_status_block *def_sb,
4260                               dma_addr_t mapping, int sb_id)
4261 {
4262         int port = BP_PORT(bp);
4263         int func = BP_FUNC(bp);
4264         int index, val, reg_offset;
4265         u64 section;
4266
4267         /* ATTN */
4268         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4269                                             atten_status_block);
4270         def_sb->atten_status_block.status_block_id = sb_id;
4271
4272         bp->attn_state = 0;
4273
4274         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4275                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4276
4277         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4278                 bp->attn_group[index].sig[0] = REG_RD(bp,
4279                                                      reg_offset + 0x10*index);
4280                 bp->attn_group[index].sig[1] = REG_RD(bp,
4281                                                reg_offset + 0x4 + 0x10*index);
4282                 bp->attn_group[index].sig[2] = REG_RD(bp,
4283                                                reg_offset + 0x8 + 0x10*index);
4284                 bp->attn_group[index].sig[3] = REG_RD(bp,
4285                                                reg_offset + 0xc + 0x10*index);
4286         }
4287
4288         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4289                              HC_REG_ATTN_MSG0_ADDR_L);
4290
4291         REG_WR(bp, reg_offset, U64_LO(section));
4292         REG_WR(bp, reg_offset + 4, U64_HI(section));
4293
4294         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4295
4296         val = REG_RD(bp, reg_offset);
4297         val |= sb_id;
4298         REG_WR(bp, reg_offset, val);
4299
4300         /* USTORM */
4301         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4302                                             u_def_status_block);
4303         def_sb->u_def_status_block.status_block_id = sb_id;
4304
4305         REG_WR(bp, BAR_USTRORM_INTMEM +
4306                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4307         REG_WR(bp, BAR_USTRORM_INTMEM +
4308                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4309                U64_HI(section));
4310         REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4311                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4312
4313         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4314                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4315                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4316
4317         /* CSTORM */
4318         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4319                                             c_def_status_block);
4320         def_sb->c_def_status_block.status_block_id = sb_id;
4321
4322         REG_WR(bp, BAR_CSTRORM_INTMEM +
4323                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4324         REG_WR(bp, BAR_CSTRORM_INTMEM +
4325                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4326                U64_HI(section));
4327         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4328                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4329
4330         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4331                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4332                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4333
4334         /* TSTORM */
4335         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4336                                             t_def_status_block);
4337         def_sb->t_def_status_block.status_block_id = sb_id;
4338
4339         REG_WR(bp, BAR_TSTRORM_INTMEM +
4340                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4341         REG_WR(bp, BAR_TSTRORM_INTMEM +
4342                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4343                U64_HI(section));
4344         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4345                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4346
4347         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4348                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4349                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4350
4351         /* XSTORM */
4352         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4353                                             x_def_status_block);
4354         def_sb->x_def_status_block.status_block_id = sb_id;
4355
4356         REG_WR(bp, BAR_XSTRORM_INTMEM +
4357                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4358         REG_WR(bp, BAR_XSTRORM_INTMEM +
4359                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4360                U64_HI(section));
4361         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4362                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4363
4364         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4365                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4366                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4367
4368         bp->stats_pending = 0;
4369         bp->set_mac_pending = 0;
4370
4371         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4372 }
4373
4374 static void bnx2x_update_coalesce(struct bnx2x *bp)
4375 {
4376         int port = BP_PORT(bp);
4377         int i;
4378
4379         for_each_queue(bp, i) {
4380                 int sb_id = bp->fp[i].sb_id;
4381
4382                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4383                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4384                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4385                                                     U_SB_ETH_RX_CQ_INDEX),
4386                         bp->rx_ticks/12);
4387                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4388                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4389                                                      U_SB_ETH_RX_CQ_INDEX),
4390                          bp->rx_ticks ? 0 : 1);
4391
4392                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4393                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4394                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4395                                                     C_SB_ETH_TX_CQ_INDEX),
4396                         bp->tx_ticks/12);
4397                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4398                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4399                                                      C_SB_ETH_TX_CQ_INDEX),
4400                          bp->tx_ticks ? 0 : 1);
4401         }
4402 }
4403
4404 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4405                                        struct bnx2x_fastpath *fp, int last)
4406 {
4407         int i;
4408
4409         for (i = 0; i < last; i++) {
4410                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4411                 struct sk_buff *skb = rx_buf->skb;
4412
4413                 if (skb == NULL) {
4414                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4415                         continue;
4416                 }
4417
4418                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4419                         pci_unmap_single(bp->pdev,
4420                                          pci_unmap_addr(rx_buf, mapping),
4421                                          bp->rx_buf_size,
4422                                          PCI_DMA_FROMDEVICE);
4423
4424                 dev_kfree_skb(skb);
4425                 rx_buf->skb = NULL;
4426         }
4427 }
4428
4429 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4430 {
4431         int func = BP_FUNC(bp);
4432         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4433                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4434         u16 ring_prod, cqe_ring_prod;
4435         int i, j;
4436
4437         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4438         DP(NETIF_MSG_IFUP,
4439            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4440
4441         if (bp->flags & TPA_ENABLE_FLAG) {
4442
4443                 for_each_rx_queue(bp, j) {
4444                         struct bnx2x_fastpath *fp = &bp->fp[j];
4445
4446                         for (i = 0; i < max_agg_queues; i++) {
4447                                 fp->tpa_pool[i].skb =
4448                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4449                                 if (!fp->tpa_pool[i].skb) {
4450                                         BNX2X_ERR("Failed to allocate TPA "
4451                                                   "skb pool for queue[%d] - "
4452                                                   "disabling TPA on this "
4453                                                   "queue!\n", j);
4454                                         bnx2x_free_tpa_pool(bp, fp, i);
4455                                         fp->disable_tpa = 1;
4456                                         break;
4457                                 }
4458                                 pci_unmap_addr_set((struct sw_rx_bd *)
4459                                                         &bp->fp->tpa_pool[i],
4460                                                    mapping, 0);
4461                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4462                         }
4463                 }
4464         }
4465
4466         for_each_rx_queue(bp, j) {
4467                 struct bnx2x_fastpath *fp = &bp->fp[j];
4468
4469                 fp->rx_bd_cons = 0;
4470                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4471                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4472
4473                 /* "next page" elements initialization */
4474                 /* SGE ring */
4475                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4476                         struct eth_rx_sge *sge;
4477
4478                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4479                         sge->addr_hi =
4480                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4481                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4482                         sge->addr_lo =
4483                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4484                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4485                 }
4486
4487                 bnx2x_init_sge_ring_bit_mask(fp);
4488
4489                 /* RX BD ring */
4490                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4491                         struct eth_rx_bd *rx_bd;
4492
4493                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4494                         rx_bd->addr_hi =
4495                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4496                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4497                         rx_bd->addr_lo =
4498                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4499                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4500                 }
4501
4502                 /* CQ ring */
4503                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4504                         struct eth_rx_cqe_next_page *nextpg;
4505
4506                         nextpg = (struct eth_rx_cqe_next_page *)
4507                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4508                         nextpg->addr_hi =
4509                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4510                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4511                         nextpg->addr_lo =
4512                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4513                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4514                 }
4515
4516                 /* Allocate SGEs and initialize the ring elements */
4517                 for (i = 0, ring_prod = 0;
4518                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4519
4520                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4521                                 BNX2X_ERR("was only able to allocate "
4522                                           "%d rx sges\n", i);
4523                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4524                                 /* Cleanup already allocated elements */
4525                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4526                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4527                                 fp->disable_tpa = 1;
4528                                 ring_prod = 0;
4529                                 break;
4530                         }
4531                         ring_prod = NEXT_SGE_IDX(ring_prod);
4532                 }
4533                 fp->rx_sge_prod = ring_prod;
4534
4535                 /* Allocate BDs and initialize BD ring */
4536                 fp->rx_comp_cons = 0;
4537                 cqe_ring_prod = ring_prod = 0;
4538                 for (i = 0; i < bp->rx_ring_size; i++) {
4539                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4540                                 BNX2X_ERR("was only able to allocate "
4541                                           "%d rx skbs on queue[%d]\n", i, j);
4542                                 fp->eth_q_stats.rx_skb_alloc_failed++;
4543                                 break;
4544                         }
4545                         ring_prod = NEXT_RX_IDX(ring_prod);
4546                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4547                         WARN_ON(ring_prod <= i);
4548                 }
4549
4550                 fp->rx_bd_prod = ring_prod;
4551                 /* must not have more available CQEs than BDs */
4552                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4553                                        cqe_ring_prod);
4554                 fp->rx_pkt = fp->rx_calls = 0;
4555
4556                 /* Warning!
4557                  * this will generate an interrupt (to the TSTORM)
4558                  * must only be done after chip is initialized
4559                  */
4560                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4561                                      fp->rx_sge_prod);
4562                 if (j != 0)
4563                         continue;
4564
4565                 REG_WR(bp, BAR_USTRORM_INTMEM +
4566                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4567                        U64_LO(fp->rx_comp_mapping));
4568                 REG_WR(bp, BAR_USTRORM_INTMEM +
4569                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4570                        U64_HI(fp->rx_comp_mapping));
4571         }
4572 }
4573
4574 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4575 {
4576         int i, j;
4577
4578         for_each_tx_queue(bp, j) {
4579                 struct bnx2x_fastpath *fp = &bp->fp[j];
4580
4581                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4582                         struct eth_tx_bd *tx_bd =
4583                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4584
4585                         tx_bd->addr_hi =
4586                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4587                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4588                         tx_bd->addr_lo =
4589                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4590                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4591                 }
4592
4593                 fp->tx_pkt_prod = 0;
4594                 fp->tx_pkt_cons = 0;
4595                 fp->tx_bd_prod = 0;
4596                 fp->tx_bd_cons = 0;
4597                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4598                 fp->tx_pkt = 0;
4599         }
4600 }
4601
4602 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4603 {
4604         int func = BP_FUNC(bp);
4605
4606         spin_lock_init(&bp->spq_lock);
4607
4608         bp->spq_left = MAX_SPQ_PENDING;
4609         bp->spq_prod_idx = 0;
4610         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4611         bp->spq_prod_bd = bp->spq;
4612         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4613
4614         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4615                U64_LO(bp->spq_mapping));
4616         REG_WR(bp,
4617                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4618                U64_HI(bp->spq_mapping));
4619
4620         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4621                bp->spq_prod_idx);
4622 }
4623
4624 static void bnx2x_init_context(struct bnx2x *bp)
4625 {
4626         int i;
4627
4628         for_each_queue(bp, i) {
4629                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4630                 struct bnx2x_fastpath *fp = &bp->fp[i];
4631                 u8 cl_id = fp->cl_id;
4632                 u8 sb_id = FP_SB_ID(fp);
4633
4634                 context->ustorm_st_context.common.sb_index_numbers =
4635                                                 BNX2X_RX_SB_INDEX_NUM;
4636                 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4637                 context->ustorm_st_context.common.status_block_id = sb_id;
4638                 context->ustorm_st_context.common.flags =
4639                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4640                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4641                 context->ustorm_st_context.common.statistics_counter_id =
4642                                                 cl_id;
4643                 context->ustorm_st_context.common.mc_alignment_log_size =
4644                                                 BNX2X_RX_ALIGN_SHIFT;
4645                 context->ustorm_st_context.common.bd_buff_size =
4646                                                 bp->rx_buf_size;
4647                 context->ustorm_st_context.common.bd_page_base_hi =
4648                                                 U64_HI(fp->rx_desc_mapping);
4649                 context->ustorm_st_context.common.bd_page_base_lo =
4650                                                 U64_LO(fp->rx_desc_mapping);
4651                 if (!fp->disable_tpa) {
4652                         context->ustorm_st_context.common.flags |=
4653                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4654                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4655                         context->ustorm_st_context.common.sge_buff_size =
4656                                 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4657                                          (u32)0xffff);
4658                         context->ustorm_st_context.common.sge_page_base_hi =
4659                                                 U64_HI(fp->rx_sge_mapping);
4660                         context->ustorm_st_context.common.sge_page_base_lo =
4661                                                 U64_LO(fp->rx_sge_mapping);
4662                 }
4663
4664                 context->ustorm_ag_context.cdu_usage =
4665                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4666                                                CDU_REGION_NUMBER_UCM_AG,
4667                                                ETH_CONNECTION_TYPE);
4668
4669                 context->xstorm_st_context.tx_bd_page_base_hi =
4670                                                 U64_HI(fp->tx_desc_mapping);
4671                 context->xstorm_st_context.tx_bd_page_base_lo =
4672                                                 U64_LO(fp->tx_desc_mapping);
4673                 context->xstorm_st_context.db_data_addr_hi =
4674                                                 U64_HI(fp->tx_prods_mapping);
4675                 context->xstorm_st_context.db_data_addr_lo =
4676                                                 U64_LO(fp->tx_prods_mapping);
4677                 context->xstorm_st_context.statistics_data = (fp->cl_id |
4678                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4679                 context->cstorm_st_context.sb_index_number =
4680                                                 C_SB_ETH_TX_CQ_INDEX;
4681                 context->cstorm_st_context.status_block_id = sb_id;
4682
4683                 context->xstorm_ag_context.cdu_reserved =
4684                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4685                                                CDU_REGION_NUMBER_XCM_AG,
4686                                                ETH_CONNECTION_TYPE);
4687         }
4688 }
4689
4690 static void bnx2x_init_ind_table(struct bnx2x *bp)
4691 {
4692         int func = BP_FUNC(bp);
4693         int i;
4694
4695         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4696                 return;
4697
4698         DP(NETIF_MSG_IFUP,
4699            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
4700         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4701                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4702                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4703                         BP_CL_ID(bp) + (i % bp->num_rx_queues));
4704 }
4705
4706 static void bnx2x_set_client_config(struct bnx2x *bp)
4707 {
4708         struct tstorm_eth_client_config tstorm_client = {0};
4709         int port = BP_PORT(bp);
4710         int i;
4711
4712         tstorm_client.mtu = bp->dev->mtu;
4713         tstorm_client.config_flags =
4714                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4715                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
4716 #ifdef BCM_VLAN
4717         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4718                 tstorm_client.config_flags |=
4719                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4720                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4721         }
4722 #endif
4723
4724         if (bp->flags & TPA_ENABLE_FLAG) {
4725                 tstorm_client.max_sges_for_packet =
4726                         SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4727                 tstorm_client.max_sges_for_packet =
4728                         ((tstorm_client.max_sges_for_packet +
4729                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4730                         PAGES_PER_SGE_SHIFT;
4731
4732                 tstorm_client.config_flags |=
4733                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4734         }
4735
4736         for_each_queue(bp, i) {
4737                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4738
4739                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4740                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4741                        ((u32 *)&tstorm_client)[0]);
4742                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4743                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4744                        ((u32 *)&tstorm_client)[1]);
4745         }
4746
4747         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4748            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4749 }
4750
4751 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4752 {
4753         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4754         int mode = bp->rx_mode;
4755         int mask = (1 << BP_L_ID(bp));
4756         int func = BP_FUNC(bp);
4757         int i;
4758
4759         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
4760
4761         switch (mode) {
4762         case BNX2X_RX_MODE_NONE: /* no Rx */
4763                 tstorm_mac_filter.ucast_drop_all = mask;
4764                 tstorm_mac_filter.mcast_drop_all = mask;
4765                 tstorm_mac_filter.bcast_drop_all = mask;
4766                 break;
4767         case BNX2X_RX_MODE_NORMAL:
4768                 tstorm_mac_filter.bcast_accept_all = mask;
4769                 break;
4770         case BNX2X_RX_MODE_ALLMULTI:
4771                 tstorm_mac_filter.mcast_accept_all = mask;
4772                 tstorm_mac_filter.bcast_accept_all = mask;
4773                 break;
4774         case BNX2X_RX_MODE_PROMISC:
4775                 tstorm_mac_filter.ucast_accept_all = mask;
4776                 tstorm_mac_filter.mcast_accept_all = mask;
4777                 tstorm_mac_filter.bcast_accept_all = mask;
4778                 break;
4779         default:
4780                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4781                 break;
4782         }
4783
4784         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4785                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4786                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4787                        ((u32 *)&tstorm_mac_filter)[i]);
4788
4789 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4790                    ((u32 *)&tstorm_mac_filter)[i]); */
4791         }
4792
4793         if (mode != BNX2X_RX_MODE_NONE)
4794                 bnx2x_set_client_config(bp);
4795 }
4796
4797 static void bnx2x_init_internal_common(struct bnx2x *bp)
4798 {
4799         int i;
4800
4801         if (bp->flags & TPA_ENABLE_FLAG) {
4802                 struct tstorm_eth_tpa_exist tpa = {0};
4803
4804                 tpa.tpa_exist = 1;
4805
4806                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4807                        ((u32 *)&tpa)[0]);
4808                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4809                        ((u32 *)&tpa)[1]);
4810         }
4811
4812         /* Zero this manually as its initialization is
4813            currently missing in the initTool */
4814         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4815                 REG_WR(bp, BAR_USTRORM_INTMEM +
4816                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4817 }
4818
4819 static void bnx2x_init_internal_port(struct bnx2x *bp)
4820 {
4821         int port = BP_PORT(bp);
4822
4823         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4824         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4825         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4826         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4827 }
4828
4829 /* Calculates the sum of vn_min_rates.
4830    It's needed for further normalizing of the min_rates.
4831    Returns:
4832      sum of vn_min_rates.
4833        or
4834      0 - if all the min_rates are 0.
4835      In the later case fainess algorithm should be deactivated.
4836      If not all min_rates are zero then those that are zeroes will be set to 1.
4837  */
4838 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4839 {
4840         int all_zero = 1;
4841         int port = BP_PORT(bp);
4842         int vn;
4843
4844         bp->vn_weight_sum = 0;
4845         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4846                 int func = 2*vn + port;
4847                 u32 vn_cfg =
4848                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4849                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4850                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4851
4852                 /* Skip hidden vns */
4853                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4854                         continue;
4855
4856                 /* If min rate is zero - set it to 1 */
4857                 if (!vn_min_rate)
4858                         vn_min_rate = DEF_MIN_RATE;
4859                 else
4860                         all_zero = 0;
4861
4862                 bp->vn_weight_sum += vn_min_rate;
4863         }
4864
4865         /* ... only if all min rates are zeros - disable fairness */
4866         if (all_zero)
4867                 bp->vn_weight_sum = 0;
4868 }
4869
4870 static void bnx2x_init_internal_func(struct bnx2x *bp)
4871 {
4872         struct tstorm_eth_function_common_config tstorm_config = {0};
4873         struct stats_indication_flags stats_flags = {0};
4874         int port = BP_PORT(bp);
4875         int func = BP_FUNC(bp);
4876         int i, j;
4877         u32 offset;
4878         u16 max_agg_size;
4879
4880         if (is_multi(bp)) {
4881                 tstorm_config.config_flags = MULTI_FLAGS(bp);
4882                 tstorm_config.rss_result_mask = MULTI_MASK;
4883         }
4884         if (IS_E1HMF(bp))
4885                 tstorm_config.config_flags |=
4886                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
4887
4888         tstorm_config.leading_client_id = BP_L_ID(bp);
4889
4890         REG_WR(bp, BAR_TSTRORM_INTMEM +
4891                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4892                (*(u32 *)&tstorm_config));
4893
4894         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4895         bnx2x_set_storm_rx_mode(bp);
4896
4897         for_each_queue(bp, i) {
4898                 u8 cl_id = bp->fp[i].cl_id;
4899
4900                 /* reset xstorm per client statistics */
4901                 offset = BAR_XSTRORM_INTMEM +
4902                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4903                 for (j = 0;
4904                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4905                         REG_WR(bp, offset + j*4, 0);
4906
4907                 /* reset tstorm per client statistics */
4908                 offset = BAR_TSTRORM_INTMEM +
4909                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4910                 for (j = 0;
4911                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4912                         REG_WR(bp, offset + j*4, 0);
4913
4914                 /* reset ustorm per client statistics */
4915                 offset = BAR_USTRORM_INTMEM +
4916                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4917                 for (j = 0;
4918                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4919                         REG_WR(bp, offset + j*4, 0);
4920         }
4921
4922         /* Init statistics related context */
4923         stats_flags.collect_eth = 1;
4924
4925         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4926                ((u32 *)&stats_flags)[0]);
4927         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4928                ((u32 *)&stats_flags)[1]);
4929
4930         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4931                ((u32 *)&stats_flags)[0]);
4932         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4933                ((u32 *)&stats_flags)[1]);
4934
4935         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4936                ((u32 *)&stats_flags)[0]);
4937         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4938                ((u32 *)&stats_flags)[1]);
4939
4940         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4941                ((u32 *)&stats_flags)[0]);
4942         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4943                ((u32 *)&stats_flags)[1]);
4944
4945         REG_WR(bp, BAR_XSTRORM_INTMEM +
4946                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4947                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4948         REG_WR(bp, BAR_XSTRORM_INTMEM +
4949                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4950                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4951
4952         REG_WR(bp, BAR_TSTRORM_INTMEM +
4953                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4954                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4955         REG_WR(bp, BAR_TSTRORM_INTMEM +
4956                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4957                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4958
4959         REG_WR(bp, BAR_USTRORM_INTMEM +
4960                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4961                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4962         REG_WR(bp, BAR_USTRORM_INTMEM +
4963                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4964                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4965
4966         if (CHIP_IS_E1H(bp)) {
4967                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4968                         IS_E1HMF(bp));
4969                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4970                         IS_E1HMF(bp));
4971                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4972                         IS_E1HMF(bp));
4973                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4974                         IS_E1HMF(bp));
4975
4976                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4977                          bp->e1hov);
4978         }
4979
4980         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4981         max_agg_size =
4982                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4983                           SGE_PAGE_SIZE * PAGES_PER_SGE),
4984                     (u32)0xffff);
4985         for_each_rx_queue(bp, i) {
4986                 struct bnx2x_fastpath *fp = &bp->fp[i];
4987
4988                 REG_WR(bp, BAR_USTRORM_INTMEM +
4989                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4990                        U64_LO(fp->rx_comp_mapping));
4991                 REG_WR(bp, BAR_USTRORM_INTMEM +
4992                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4993                        U64_HI(fp->rx_comp_mapping));
4994
4995                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4996                          USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4997                          max_agg_size);
4998         }
4999
5000         /* dropless flow control */
5001         if (CHIP_IS_E1H(bp)) {
5002                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5003
5004                 rx_pause.bd_thr_low = 250;
5005                 rx_pause.cqe_thr_low = 250;
5006                 rx_pause.cos = 1;
5007                 rx_pause.sge_thr_low = 0;
5008                 rx_pause.bd_thr_high = 350;
5009                 rx_pause.cqe_thr_high = 350;
5010                 rx_pause.sge_thr_high = 0;
5011
5012                 for_each_rx_queue(bp, i) {
5013                         struct bnx2x_fastpath *fp = &bp->fp[i];
5014
5015                         if (!fp->disable_tpa) {
5016                                 rx_pause.sge_thr_low = 150;
5017                                 rx_pause.sge_thr_high = 250;
5018                         }
5019
5020
5021                         offset = BAR_USTRORM_INTMEM +
5022                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5023                                                                    fp->cl_id);
5024                         for (j = 0;
5025                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5026                              j++)
5027                                 REG_WR(bp, offset + j*4,
5028                                        ((u32 *)&rx_pause)[j]);
5029                 }
5030         }
5031
5032         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5033
5034         /* Init rate shaping and fairness contexts */
5035         if (IS_E1HMF(bp)) {
5036                 int vn;
5037
5038                 /* During init there is no active link
5039                    Until link is up, set link rate to 10Gbps */
5040                 bp->link_vars.line_speed = SPEED_10000;
5041                 bnx2x_init_port_minmax(bp);
5042
5043                 bnx2x_calc_vn_weight_sum(bp);
5044
5045                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5046                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5047
5048                 /* Enable rate shaping and fairness */
5049                 bp->cmng.flags.cmng_enables =
5050                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5051                 if (bp->vn_weight_sum)
5052                         bp->cmng.flags.cmng_enables |=
5053                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5054                 else
5055                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5056                            "  fairness will be disabled\n");
5057         } else {
5058                 /* rate shaping and fairness are disabled */
5059                 DP(NETIF_MSG_IFUP,
5060                    "single function mode  minmax will be disabled\n");
5061         }
5062
5063
5064         /* Store it to internal memory */
5065         if (bp->port.pmf)
5066                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5067                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5068                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5069                                ((u32 *)(&bp->cmng))[i]);
5070 }
5071
5072 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5073 {
5074         switch (load_code) {
5075         case FW_MSG_CODE_DRV_LOAD_COMMON:
5076                 bnx2x_init_internal_common(bp);
5077                 /* no break */
5078
5079         case FW_MSG_CODE_DRV_LOAD_PORT:
5080                 bnx2x_init_internal_port(bp);
5081                 /* no break */
5082
5083         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5084                 bnx2x_init_internal_func(bp);
5085                 break;
5086
5087         default:
5088                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5089                 break;
5090         }
5091 }
5092
5093 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5094 {
5095         int i;
5096
5097         for_each_queue(bp, i) {
5098                 struct bnx2x_fastpath *fp = &bp->fp[i];
5099
5100                 fp->bp = bp;
5101                 fp->state = BNX2X_FP_STATE_CLOSED;
5102                 fp->index = i;
5103                 fp->cl_id = BP_L_ID(bp) + i;
5104                 fp->sb_id = fp->cl_id;
5105                 DP(NETIF_MSG_IFUP,
5106                    "bnx2x_init_sb(%p,%p) index %d  cl_id %d  sb %d\n",
5107                    bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
5108                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5109                               FP_SB_ID(fp));
5110                 bnx2x_update_fpsb_idx(fp);
5111         }
5112
5113         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5114                           DEF_SB_ID);
5115         bnx2x_update_dsb_idx(bp);
5116         bnx2x_update_coalesce(bp);
5117         bnx2x_init_rx_rings(bp);
5118         bnx2x_init_tx_ring(bp);
5119         bnx2x_init_sp_ring(bp);
5120         bnx2x_init_context(bp);
5121         bnx2x_init_internal(bp, load_code);
5122         bnx2x_init_ind_table(bp);
5123         bnx2x_stats_init(bp);
5124
5125         /* At this point, we are ready for interrupts */
5126         atomic_set(&bp->intr_sem, 0);
5127
5128         /* flush all before enabling interrupts */
5129         mb();
5130         mmiowb();
5131
5132         bnx2x_int_enable(bp);
5133 }
5134
5135 /* end of nic init */
5136
5137 /*
5138  * gzip service functions
5139  */
5140
5141 static int bnx2x_gunzip_init(struct bnx2x *bp)
5142 {
5143         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5144                                               &bp->gunzip_mapping);
5145         if (bp->gunzip_buf  == NULL)
5146                 goto gunzip_nomem1;
5147
5148         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5149         if (bp->strm  == NULL)
5150                 goto gunzip_nomem2;
5151
5152         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5153                                       GFP_KERNEL);
5154         if (bp->strm->workspace == NULL)
5155                 goto gunzip_nomem3;
5156
5157         return 0;
5158
5159 gunzip_nomem3:
5160         kfree(bp->strm);
5161         bp->strm = NULL;
5162
5163 gunzip_nomem2:
5164         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5165                             bp->gunzip_mapping);
5166         bp->gunzip_buf = NULL;
5167
5168 gunzip_nomem1:
5169         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5170                " un-compression\n", bp->dev->name);
5171         return -ENOMEM;
5172 }
5173
5174 static void bnx2x_gunzip_end(struct bnx2x *bp)
5175 {
5176         kfree(bp->strm->workspace);
5177
5178         kfree(bp->strm);
5179         bp->strm = NULL;
5180
5181         if (bp->gunzip_buf) {
5182                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5183                                     bp->gunzip_mapping);
5184                 bp->gunzip_buf = NULL;
5185         }
5186 }
5187
5188 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5189 {
5190         int n, rc;
5191
5192         /* check gzip header */
5193         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5194                 return -EINVAL;
5195
5196         n = 10;
5197
5198 #define FNAME                           0x8
5199
5200         if (zbuf[3] & FNAME)
5201                 while ((zbuf[n++] != 0) && (n < len));
5202
5203         bp->strm->next_in = zbuf + n;
5204         bp->strm->avail_in = len - n;
5205         bp->strm->next_out = bp->gunzip_buf;
5206         bp->strm->avail_out = FW_BUF_SIZE;
5207
5208         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5209         if (rc != Z_OK)
5210                 return rc;
5211
5212         rc = zlib_inflate(bp->strm, Z_FINISH);
5213         if ((rc != Z_OK) && (rc != Z_STREAM_END))
5214                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5215                        bp->dev->name, bp->strm->msg);
5216
5217         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5218         if (bp->gunzip_outlen & 0x3)
5219                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5220                                     " gunzip_outlen (%d) not aligned\n",
5221                        bp->dev->name, bp->gunzip_outlen);
5222         bp->gunzip_outlen >>= 2;
5223
5224         zlib_inflateEnd(bp->strm);
5225
5226         if (rc == Z_STREAM_END)
5227                 return 0;
5228
5229         return rc;
5230 }
5231
5232 /* nic load/unload */
5233
5234 /*
5235  * General service functions
5236  */
5237
5238 /* send a NIG loopback debug packet */
5239 static void bnx2x_lb_pckt(struct bnx2x *bp)
5240 {
5241         u32 wb_write[3];
5242
5243         /* Ethernet source and destination addresses */
5244         wb_write[0] = 0x55555555;
5245         wb_write[1] = 0x55555555;
5246         wb_write[2] = 0x20;             /* SOP */
5247         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5248
5249         /* NON-IP protocol */
5250         wb_write[0] = 0x09000000;
5251         wb_write[1] = 0x55555555;
5252         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
5253         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5254 }
5255
5256 /* some of the internal memories
5257  * are not directly readable from the driver
5258  * to test them we send debug packets
5259  */
5260 static int bnx2x_int_mem_test(struct bnx2x *bp)
5261 {
5262         int factor;
5263         int count, i;
5264         u32 val = 0;
5265
5266         if (CHIP_REV_IS_FPGA(bp))
5267                 factor = 120;
5268         else if (CHIP_REV_IS_EMUL(bp))
5269                 factor = 200;
5270         else
5271                 factor = 1;
5272
5273         DP(NETIF_MSG_HW, "start part1\n");
5274
5275         /* Disable inputs of parser neighbor blocks */
5276         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5277         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5278         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5279         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5280
5281         /*  Write 0 to parser credits for CFC search request */
5282         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5283
5284         /* send Ethernet packet */
5285         bnx2x_lb_pckt(bp);
5286
5287         /* TODO do i reset NIG statistic? */
5288         /* Wait until NIG register shows 1 packet of size 0x10 */
5289         count = 1000 * factor;
5290         while (count) {
5291
5292                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5293                 val = *bnx2x_sp(bp, wb_data[0]);
5294                 if (val == 0x10)
5295                         break;
5296
5297                 msleep(10);
5298                 count--;
5299         }
5300         if (val != 0x10) {
5301                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5302                 return -1;
5303         }
5304
5305         /* Wait until PRS register shows 1 packet */
5306         count = 1000 * factor;
5307         while (count) {
5308                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5309                 if (val == 1)
5310                         break;
5311
5312                 msleep(10);
5313                 count--;
5314         }
5315         if (val != 0x1) {
5316                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5317                 return -2;
5318         }
5319
5320         /* Reset and init BRB, PRS */
5321         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5322         msleep(50);
5323         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5324         msleep(50);
5325         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5326         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5327
5328         DP(NETIF_MSG_HW, "part2\n");
5329
5330         /* Disable inputs of parser neighbor blocks */
5331         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5332         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5333         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5334         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5335
5336         /* Write 0 to parser credits for CFC search request */
5337         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5338
5339         /* send 10 Ethernet packets */
5340         for (i = 0; i < 10; i++)
5341                 bnx2x_lb_pckt(bp);
5342
5343         /* Wait until NIG register shows 10 + 1
5344            packets of size 11*0x10 = 0xb0 */
5345         count = 1000 * factor;
5346         while (count) {
5347
5348                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5349                 val = *bnx2x_sp(bp, wb_data[0]);
5350                 if (val == 0xb0)
5351                         break;
5352
5353                 msleep(10);
5354                 count--;
5355         }
5356         if (val != 0xb0) {
5357                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5358                 return -3;
5359         }
5360
5361         /* Wait until PRS register shows 2 packets */
5362         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5363         if (val != 2)
5364                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5365
5366         /* Write 1 to parser credits for CFC search request */
5367         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5368
5369         /* Wait until PRS register shows 3 packets */
5370         msleep(10 * factor);
5371         /* Wait until NIG register shows 1 packet of size 0x10 */
5372         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5373         if (val != 3)
5374                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5375
5376         /* clear NIG EOP FIFO */
5377         for (i = 0; i < 11; i++)
5378                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5379         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5380         if (val != 1) {
5381                 BNX2X_ERR("clear of NIG failed\n");
5382                 return -4;
5383         }
5384
5385         /* Reset and init BRB, PRS, NIG */
5386         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5387         msleep(50);
5388         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5389         msleep(50);
5390         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5391         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5392 #ifndef BCM_ISCSI
5393         /* set NIC mode */
5394         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5395 #endif
5396
5397         /* Enable inputs of parser neighbor blocks */
5398         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5399         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5400         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5401         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5402
5403         DP(NETIF_MSG_HW, "done\n");
5404
5405         return 0; /* OK */
5406 }
5407
5408 static void enable_blocks_attention(struct bnx2x *bp)
5409 {
5410         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5411         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5412         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5413         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5414         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5415         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5416         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5417         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5418         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5419 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5420 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5421         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5422         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5423         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5424 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5425 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5426         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5427         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5428         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5429         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5430 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5431 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5432         if (CHIP_REV_IS_FPGA(bp))
5433                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5434         else
5435                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5436         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5437         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5438         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5439 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5440 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5441         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5442         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5443 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5444         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5445 }
5446
5447
5448 static void bnx2x_reset_common(struct bnx2x *bp)
5449 {
5450         /* reset_common */
5451         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5452                0xd3ffff7f);
5453         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5454 }
5455
5456 static int bnx2x_init_common(struct bnx2x *bp)
5457 {
5458         u32 val, i;
5459
5460         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5461
5462         bnx2x_reset_common(bp);
5463         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5464         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5465
5466         bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5467         if (CHIP_IS_E1H(bp))
5468                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5469
5470         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5471         msleep(30);
5472         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5473
5474         bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5475         if (CHIP_IS_E1(bp)) {
5476                 /* enable HW interrupt from PXP on USDM overflow
5477                    bit 16 on INT_MASK_0 */
5478                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5479         }
5480
5481         bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5482         bnx2x_init_pxp(bp);
5483
5484 #ifdef __BIG_ENDIAN
5485         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5486         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5487         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5488         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5489         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5490         /* make sure this value is 0 */
5491         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5492
5493 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5494         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5495         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5496         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5497         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5498 #endif
5499
5500         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5501 #ifdef BCM_ISCSI
5502         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5503         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5504         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5505 #endif
5506
5507         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5508                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5509
5510         /* let the HW do it's magic ... */
5511         msleep(100);
5512         /* finish PXP init */
5513         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5514         if (val != 1) {
5515                 BNX2X_ERR("PXP2 CFG failed\n");
5516                 return -EBUSY;
5517         }
5518         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5519         if (val != 1) {
5520                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5521                 return -EBUSY;
5522         }
5523
5524         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5525         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5526
5527         bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5528
5529         /* clean the DMAE memory */
5530         bp->dmae_ready = 1;
5531         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5532
5533         bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5534         bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5535         bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5536         bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5537
5538         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5539         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5540         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5541         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5542
5543         bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5544         /* soft reset pulse */
5545         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5546         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5547
5548 #ifdef BCM_ISCSI
5549         bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5550 #endif
5551
5552         bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5553         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5554         if (!CHIP_REV_IS_SLOW(bp)) {
5555                 /* enable hw interrupt from doorbell Q */
5556                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5557         }
5558
5559         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5560         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5561         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5562         /* set NIC mode */
5563         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5564         if (CHIP_IS_E1H(bp))
5565                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5566
5567         bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5568         bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5569         bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5570         bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5571
5572         if (CHIP_IS_E1H(bp)) {
5573                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5574                                 STORM_INTMEM_SIZE_E1H/2);
5575                 bnx2x_init_fill(bp,
5576                                 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5577                                 0, STORM_INTMEM_SIZE_E1H/2);
5578                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5579                                 STORM_INTMEM_SIZE_E1H/2);
5580                 bnx2x_init_fill(bp,
5581                                 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5582                                 0, STORM_INTMEM_SIZE_E1H/2);
5583                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5584                                 STORM_INTMEM_SIZE_E1H/2);
5585                 bnx2x_init_fill(bp,
5586                                 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5587                                 0, STORM_INTMEM_SIZE_E1H/2);
5588                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5589                                 STORM_INTMEM_SIZE_E1H/2);
5590                 bnx2x_init_fill(bp,
5591                                 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5592                                 0, STORM_INTMEM_SIZE_E1H/2);
5593         } else { /* E1 */
5594                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5595                                 STORM_INTMEM_SIZE_E1);
5596                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5597                                 STORM_INTMEM_SIZE_E1);
5598                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5599                                 STORM_INTMEM_SIZE_E1);
5600                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5601                                 STORM_INTMEM_SIZE_E1);
5602         }
5603
5604         bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5605         bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5606         bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5607         bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5608
5609         /* sync semi rtc */
5610         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5611                0x80000000);
5612         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5613                0x80000000);
5614
5615         bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5616         bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5617         bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5618
5619         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5620         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5621                 REG_WR(bp, i, 0xc0cac01a);
5622                 /* TODO: replace with something meaningful */
5623         }
5624         bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5625         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5626
5627         if (sizeof(union cdu_context) != 1024)
5628                 /* we currently assume that a context is 1024 bytes */
5629                 printk(KERN_ALERT PFX "please adjust the size of"
5630                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5631
5632         bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5633         val = (4 << 24) + (0 << 12) + 1024;
5634         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5635         if (CHIP_IS_E1(bp)) {
5636                 /* !!! fix pxp client crdit until excel update */
5637                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5638                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5639         }
5640
5641         bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5642         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5643         /* enable context validation interrupt from CFC */
5644         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5645
5646         /* set the thresholds to prevent CFC/CDU race */
5647         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5648
5649         bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5650         bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5651
5652         /* PXPCS COMMON comes here */
5653         /* Reset PCIE errors for debug */
5654         REG_WR(bp, 0x2814, 0xffffffff);
5655         REG_WR(bp, 0x3820, 0xffffffff);
5656
5657         /* EMAC0 COMMON comes here */
5658         /* EMAC1 COMMON comes here */
5659         /* DBU COMMON comes here */
5660         /* DBG COMMON comes here */
5661
5662         bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5663         if (CHIP_IS_E1H(bp)) {
5664                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5665                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5666         }
5667
5668         if (CHIP_REV_IS_SLOW(bp))
5669                 msleep(200);
5670
5671         /* finish CFC init */
5672         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5673         if (val != 1) {
5674                 BNX2X_ERR("CFC LL_INIT failed\n");
5675                 return -EBUSY;
5676         }
5677         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5678         if (val != 1) {
5679                 BNX2X_ERR("CFC AC_INIT failed\n");
5680                 return -EBUSY;
5681         }
5682         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5683         if (val != 1) {
5684                 BNX2X_ERR("CFC CAM_INIT failed\n");
5685                 return -EBUSY;
5686         }
5687         REG_WR(bp, CFC_REG_DEBUG0, 0);
5688
5689         /* read NIG statistic
5690            to see if this is our first up since powerup */
5691         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5692         val = *bnx2x_sp(bp, wb_data[0]);
5693
5694         /* do internal memory self test */
5695         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5696                 BNX2X_ERR("internal mem self test failed\n");
5697                 return -EBUSY;
5698         }
5699
5700         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5701         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5702                 /* Fan failure is indicated by SPIO 5 */
5703                 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5704                                MISC_REGISTERS_SPIO_INPUT_HI_Z);
5705
5706                 /* set to active low mode */
5707                 val = REG_RD(bp, MISC_REG_SPIO_INT);
5708                 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5709                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5710                 REG_WR(bp, MISC_REG_SPIO_INT, val);
5711
5712                 /* enable interrupt to signal the IGU */
5713                 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5714                 val |= (1 << MISC_REGISTERS_SPIO_5);
5715                 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5716                 break;
5717
5718         default:
5719                 break;
5720         }
5721
5722         /* clear PXP2 attentions */
5723         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5724
5725         enable_blocks_attention(bp);
5726
5727         if (!BP_NOMCP(bp)) {
5728                 bnx2x_acquire_phy_lock(bp);
5729                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5730                 bnx2x_release_phy_lock(bp);
5731         } else
5732                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5733
5734         return 0;
5735 }
5736
5737 static int bnx2x_init_port(struct bnx2x *bp)
5738 {
5739         int port = BP_PORT(bp);
5740         u32 low, high;
5741         u32 val;
5742
5743         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
5744
5745         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5746
5747         /* Port PXP comes here */
5748         /* Port PXP2 comes here */
5749 #ifdef BCM_ISCSI
5750         /* Port0  1
5751          * Port1  385 */
5752         i++;
5753         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5754         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5755         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5756         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5757
5758         /* Port0  2
5759          * Port1  386 */
5760         i++;
5761         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5762         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5763         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5764         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5765
5766         /* Port0  3
5767          * Port1  387 */
5768         i++;
5769         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5770         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5771         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5772         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5773 #endif
5774         /* Port CMs come here */
5775         bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5776                              (port ? XCM_PORT1_END : XCM_PORT0_END));
5777
5778         /* Port QM comes here */
5779 #ifdef BCM_ISCSI
5780         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5781         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5782
5783         bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5784                              func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5785 #endif
5786         /* Port DQ comes here */
5787
5788         bnx2x_init_block(bp, (port ? BRB1_PORT1_START : BRB1_PORT0_START),
5789                              (port ? BRB1_PORT1_END : BRB1_PORT0_END));
5790         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5791                 /* no pause for emulation and FPGA */
5792                 low = 0;
5793                 high = 513;
5794         } else {
5795                 if (IS_E1HMF(bp))
5796                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5797                 else if (bp->dev->mtu > 4096) {
5798                         if (bp->flags & ONE_PORT_FLAG)
5799                                 low = 160;
5800                         else {
5801                                 val = bp->dev->mtu;
5802                                 /* (24*1024 + val*4)/256 */
5803                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5804                         }
5805                 } else
5806                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5807                 high = low + 56;        /* 14*1024/256 */
5808         }
5809         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5810         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5811
5812
5813         /* Port PRS comes here */
5814         /* Port TSDM comes here */
5815         /* Port CSDM comes here */
5816         /* Port USDM comes here */
5817         /* Port XSDM comes here */
5818         bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5819                              port ? TSEM_PORT1_END : TSEM_PORT0_END);
5820         bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5821                              port ? USEM_PORT1_END : USEM_PORT0_END);
5822         bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5823                              port ? CSEM_PORT1_END : CSEM_PORT0_END);
5824         bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5825                              port ? XSEM_PORT1_END : XSEM_PORT0_END);
5826         /* Port UPB comes here */
5827         /* Port XPB comes here */
5828
5829         bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5830                              port ? PBF_PORT1_END : PBF_PORT0_END);
5831
5832         /* configure PBF to work without PAUSE mtu 9000 */
5833         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5834
5835         /* update threshold */
5836         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5837         /* update init credit */
5838         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5839
5840         /* probe changes */
5841         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5842         msleep(5);
5843         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5844
5845 #ifdef BCM_ISCSI
5846         /* tell the searcher where the T2 table is */
5847         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5848
5849         wb_write[0] = U64_LO(bp->t2_mapping);
5850         wb_write[1] = U64_HI(bp->t2_mapping);
5851         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5852         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5853         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5854         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5855
5856         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5857         /* Port SRCH comes here */
5858 #endif
5859         /* Port CDU comes here */
5860         /* Port CFC comes here */
5861
5862         if (CHIP_IS_E1(bp)) {
5863                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5864                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5865         }
5866         bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5867                              port ? HC_PORT1_END : HC_PORT0_END);
5868
5869         bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5870                                     MISC_AEU_PORT0_START,
5871                              port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5872         /* init aeu_mask_attn_func_0/1:
5873          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5874          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5875          *             bits 4-7 are used for "per vn group attention" */
5876         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5877                (IS_E1HMF(bp) ? 0xF7 : 0x7));
5878
5879         /* Port PXPCS comes here */
5880         /* Port EMAC0 comes here */
5881         /* Port EMAC1 comes here */
5882         /* Port DBU comes here */
5883         /* Port DBG comes here */
5884         bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5885                              port ? NIG_PORT1_END : NIG_PORT0_END);
5886
5887         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5888
5889         if (CHIP_IS_E1H(bp)) {
5890                 /* 0x2 disable e1hov, 0x1 enable */
5891                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5892                        (IS_E1HMF(bp) ? 0x1 : 0x2));
5893
5894                 /* support pause requests from USDM, TSDM and BRB */
5895                 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5896
5897                 {
5898                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5899                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5900                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5901                 }
5902         }
5903
5904         /* Port MCP comes here */
5905         /* Port DMAE comes here */
5906
5907         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5908         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5909                 /* add SPIO 5 to group 0 */
5910                 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5911                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5912                 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5913                 break;
5914
5915         default:
5916                 break;
5917         }
5918
5919         bnx2x__link_reset(bp);
5920
5921         return 0;
5922 }
5923
5924 #define ILT_PER_FUNC            (768/2)
5925 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
5926 /* the phys address is shifted right 12 bits and has an added
5927    1=valid bit added to the 53rd bit
5928    then since this is a wide register(TM)
5929    we split it into two 32 bit writes
5930  */
5931 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5932 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
5933 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
5934 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
5935
5936 #define CNIC_ILT_LINES          0
5937
5938 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5939 {
5940         int reg;
5941
5942         if (CHIP_IS_E1H(bp))
5943                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5944         else /* E1 */
5945                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5946
5947         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5948 }
5949
5950 static int bnx2x_init_func(struct bnx2x *bp)
5951 {
5952         int port = BP_PORT(bp);
5953         int func = BP_FUNC(bp);
5954         u32 addr, val;
5955         int i;
5956
5957         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
5958
5959         /* set MSI reconfigure capability */
5960         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5961         val = REG_RD(bp, addr);
5962         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5963         REG_WR(bp, addr, val);
5964
5965         i = FUNC_ILT_BASE(func);
5966
5967         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5968         if (CHIP_IS_E1H(bp)) {
5969                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5970                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5971         } else /* E1 */
5972                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5973                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5974
5975
5976         if (CHIP_IS_E1H(bp)) {
5977                 for (i = 0; i < 9; i++)
5978                         bnx2x_init_block(bp,
5979                                          cm_start[func][i], cm_end[func][i]);
5980
5981                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5982                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5983         }
5984
5985         /* HC init per function */
5986         if (CHIP_IS_E1H(bp)) {
5987                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5988
5989                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5990                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5991         }
5992         bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5993
5994         /* Reset PCIE errors for debug */
5995         REG_WR(bp, 0x2114, 0xffffffff);
5996         REG_WR(bp, 0x2120, 0xffffffff);
5997
5998         return 0;
5999 }
6000
6001 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6002 {
6003         int i, rc = 0;
6004
6005         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
6006            BP_FUNC(bp), load_code);
6007
6008         bp->dmae_ready = 0;
6009         mutex_init(&bp->dmae_mutex);
6010         bnx2x_gunzip_init(bp);
6011
6012         switch (load_code) {
6013         case FW_MSG_CODE_DRV_LOAD_COMMON:
6014                 rc = bnx2x_init_common(bp);
6015                 if (rc)
6016                         goto init_hw_err;
6017                 /* no break */
6018
6019         case FW_MSG_CODE_DRV_LOAD_PORT:
6020                 bp->dmae_ready = 1;
6021                 rc = bnx2x_init_port(bp);
6022                 if (rc)
6023                         goto init_hw_err;
6024                 /* no break */
6025
6026         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6027                 bp->dmae_ready = 1;
6028                 rc = bnx2x_init_func(bp);
6029                 if (rc)
6030                         goto init_hw_err;
6031                 break;
6032
6033         default:
6034                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6035                 break;
6036         }
6037
6038         if (!BP_NOMCP(bp)) {
6039                 int func = BP_FUNC(bp);
6040
6041                 bp->fw_drv_pulse_wr_seq =
6042                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6043                                  DRV_PULSE_SEQ_MASK);
6044                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6045                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  func_stx 0x%x\n",
6046                    bp->fw_drv_pulse_wr_seq, bp->func_stx);
6047         } else
6048                 bp->func_stx = 0;
6049
6050         /* this needs to be done before gunzip end */
6051         bnx2x_zero_def_sb(bp);
6052         for_each_queue(bp, i)
6053                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6054
6055 init_hw_err:
6056         bnx2x_gunzip_end(bp);
6057
6058         return rc;
6059 }
6060
6061 /* send the MCP a request, block until there is a reply */
6062 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6063 {
6064         int func = BP_FUNC(bp);
6065         u32 seq = ++bp->fw_seq;
6066         u32 rc = 0;
6067         u32 cnt = 1;
6068         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
6069
6070         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
6071         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
6072
6073         do {
6074                 /* let the FW do it's magic ... */
6075                 msleep(delay);
6076
6077                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
6078
6079                 /* Give the FW up to 2 second (200*10ms) */
6080         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6081
6082         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6083            cnt*delay, rc, seq);
6084
6085         /* is this a reply to our command? */
6086         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6087                 rc &= FW_MSG_CODE_MASK;
6088
6089         } else {
6090                 /* FW BUG! */
6091                 BNX2X_ERR("FW failed to respond!\n");
6092                 bnx2x_fw_dump(bp);
6093                 rc = 0;
6094         }
6095
6096         return rc;
6097 }
6098
6099 static void bnx2x_free_mem(struct bnx2x *bp)
6100 {
6101
6102 #define BNX2X_PCI_FREE(x, y, size) \
6103         do { \
6104                 if (x) { \
6105                         pci_free_consistent(bp->pdev, size, x, y); \
6106                         x = NULL; \
6107                         y = 0; \
6108                 } \
6109         } while (0)
6110
6111 #define BNX2X_FREE(x) \
6112         do { \
6113                 if (x) { \
6114                         vfree(x); \
6115                         x = NULL; \
6116                 } \
6117         } while (0)
6118
6119         int i;
6120
6121         /* fastpath */
6122         /* Common */
6123         for_each_queue(bp, i) {
6124
6125                 /* status blocks */
6126                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6127                                bnx2x_fp(bp, i, status_blk_mapping),
6128                                sizeof(struct host_status_block) +
6129                                sizeof(struct eth_tx_db_data));
6130         }
6131         /* Rx */
6132         for_each_rx_queue(bp, i) {
6133
6134                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6135                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6136                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6137                                bnx2x_fp(bp, i, rx_desc_mapping),
6138                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
6139
6140                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6141                                bnx2x_fp(bp, i, rx_comp_mapping),
6142                                sizeof(struct eth_fast_path_rx_cqe) *
6143                                NUM_RCQ_BD);
6144
6145                 /* SGE ring */
6146                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6147                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6148                                bnx2x_fp(bp, i, rx_sge_mapping),
6149                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6150         }
6151         /* Tx */
6152         for_each_tx_queue(bp, i) {
6153
6154                 /* fastpath tx rings: tx_buf tx_desc */
6155                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6156                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6157                                bnx2x_fp(bp, i, tx_desc_mapping),
6158                                sizeof(struct eth_tx_bd) * NUM_TX_BD);
6159         }
6160         /* end of fastpath */
6161
6162         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6163                        sizeof(struct host_def_status_block));
6164
6165         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6166                        sizeof(struct bnx2x_slowpath));
6167
6168 #ifdef BCM_ISCSI
6169         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6170         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6171         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6172         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6173 #endif
6174         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6175
6176 #undef BNX2X_PCI_FREE
6177 #undef BNX2X_KFREE
6178 }
6179
6180 static int bnx2x_alloc_mem(struct bnx2x *bp)
6181 {
6182
6183 #define BNX2X_PCI_ALLOC(x, y, size) \
6184         do { \
6185                 x = pci_alloc_consistent(bp->pdev, size, y); \
6186                 if (x == NULL) \
6187                         goto alloc_mem_err; \
6188                 memset(x, 0, size); \
6189         } while (0)
6190
6191 #define BNX2X_ALLOC(x, size) \
6192         do { \
6193                 x = vmalloc(size); \
6194                 if (x == NULL) \
6195                         goto alloc_mem_err; \
6196                 memset(x, 0, size); \
6197         } while (0)
6198
6199         int i;
6200
6201         /* fastpath */
6202         /* Common */
6203         for_each_queue(bp, i) {
6204                 bnx2x_fp(bp, i, bp) = bp;
6205
6206                 /* status blocks */
6207                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6208                                 &bnx2x_fp(bp, i, status_blk_mapping),
6209                                 sizeof(struct host_status_block) +
6210                                 sizeof(struct eth_tx_db_data));
6211         }
6212         /* Rx */
6213         for_each_rx_queue(bp, i) {
6214
6215                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6216                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6217                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6218                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6219                                 &bnx2x_fp(bp, i, rx_desc_mapping),
6220                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6221
6222                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6223                                 &bnx2x_fp(bp, i, rx_comp_mapping),
6224                                 sizeof(struct eth_fast_path_rx_cqe) *
6225                                 NUM_RCQ_BD);
6226
6227                 /* SGE ring */
6228                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6229                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6230                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6231                                 &bnx2x_fp(bp, i, rx_sge_mapping),
6232                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6233         }
6234         /* Tx */
6235         for_each_tx_queue(bp, i) {
6236
6237                 bnx2x_fp(bp, i, hw_tx_prods) =
6238                                 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6239
6240                 bnx2x_fp(bp, i, tx_prods_mapping) =
6241                                 bnx2x_fp(bp, i, status_blk_mapping) +
6242                                 sizeof(struct host_status_block);
6243
6244                 /* fastpath tx rings: tx_buf tx_desc */
6245                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6246                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6247                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6248                                 &bnx2x_fp(bp, i, tx_desc_mapping),
6249                                 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6250         }
6251         /* end of fastpath */
6252
6253         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6254                         sizeof(struct host_def_status_block));
6255
6256         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6257                         sizeof(struct bnx2x_slowpath));
6258
6259 #ifdef BCM_ISCSI
6260         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6261
6262         /* Initialize T1 */
6263         for (i = 0; i < 64*1024; i += 64) {
6264                 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6265                 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6266         }
6267
6268         /* allocate searcher T2 table
6269            we allocate 1/4 of alloc num for T2
6270           (which is not entered into the ILT) */
6271         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6272
6273         /* Initialize T2 */
6274         for (i = 0; i < 16*1024; i += 64)
6275                 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6276
6277         /* now fixup the last line in the block to point to the next block */
6278         *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6279
6280         /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6281         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6282
6283         /* QM queues (128*MAX_CONN) */
6284         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6285 #endif
6286
6287         /* Slow path ring */
6288         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6289
6290         return 0;
6291
6292 alloc_mem_err:
6293         bnx2x_free_mem(bp);
6294         return -ENOMEM;
6295
6296 #undef BNX2X_PCI_ALLOC
6297 #undef BNX2X_ALLOC
6298 }
6299
6300 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6301 {
6302         int i;
6303
6304         for_each_tx_queue(bp, i) {
6305                 struct bnx2x_fastpath *fp = &bp->fp[i];
6306
6307                 u16 bd_cons = fp->tx_bd_cons;
6308                 u16 sw_prod = fp->tx_pkt_prod;
6309                 u16 sw_cons = fp->tx_pkt_cons;
6310
6311                 while (sw_cons != sw_prod) {
6312                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6313                         sw_cons++;
6314                 }
6315         }
6316 }
6317
6318 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6319 {
6320         int i, j;
6321
6322         for_each_rx_queue(bp, j) {
6323                 struct bnx2x_fastpath *fp = &bp->fp[j];
6324
6325                 for (i = 0; i < NUM_RX_BD; i++) {
6326                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6327                         struct sk_buff *skb = rx_buf->skb;
6328
6329                         if (skb == NULL)
6330                                 continue;
6331
6332                         pci_unmap_single(bp->pdev,
6333                                          pci_unmap_addr(rx_buf, mapping),
6334                                          bp->rx_buf_size,
6335                                          PCI_DMA_FROMDEVICE);
6336
6337                         rx_buf->skb = NULL;
6338                         dev_kfree_skb(skb);
6339                 }
6340                 if (!fp->disable_tpa)
6341                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6342                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
6343                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
6344         }
6345 }
6346
6347 static void bnx2x_free_skbs(struct bnx2x *bp)
6348 {
6349         bnx2x_free_tx_skbs(bp);
6350         bnx2x_free_rx_skbs(bp);
6351 }
6352
6353 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6354 {
6355         int i, offset = 1;
6356
6357         free_irq(bp->msix_table[0].vector, bp->dev);
6358         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6359            bp->msix_table[0].vector);
6360
6361         for_each_queue(bp, i) {
6362                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
6363                    "state %x\n", i, bp->msix_table[i + offset].vector,
6364                    bnx2x_fp(bp, i, state));
6365
6366                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6367         }
6368 }
6369
6370 static void bnx2x_free_irq(struct bnx2x *bp)
6371 {
6372         if (bp->flags & USING_MSIX_FLAG) {
6373                 bnx2x_free_msix_irqs(bp);
6374                 pci_disable_msix(bp->pdev);
6375                 bp->flags &= ~USING_MSIX_FLAG;
6376
6377         } else if (bp->flags & USING_MSI_FLAG) {
6378                 free_irq(bp->pdev->irq, bp->dev);
6379                 pci_disable_msi(bp->pdev);
6380                 bp->flags &= ~USING_MSI_FLAG;
6381
6382         } else
6383                 free_irq(bp->pdev->irq, bp->dev);
6384 }
6385
6386 static int bnx2x_enable_msix(struct bnx2x *bp)
6387 {
6388         int i, rc, offset = 1;
6389         int igu_vec = 0;
6390
6391         bp->msix_table[0].entry = igu_vec;
6392         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6393
6394         for_each_queue(bp, i) {
6395                 igu_vec = BP_L_ID(bp) + offset + i;
6396                 bp->msix_table[i + offset].entry = igu_vec;
6397                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6398                    "(fastpath #%u)\n", i + offset, igu_vec, i);
6399         }
6400
6401         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6402                              BNX2X_NUM_QUEUES(bp) + offset);
6403         if (rc) {
6404                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
6405                 return rc;
6406         }
6407
6408         bp->flags |= USING_MSIX_FLAG;
6409
6410         return 0;
6411 }
6412
6413 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6414 {
6415         int i, rc, offset = 1;
6416
6417         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6418                          bp->dev->name, bp->dev);
6419         if (rc) {
6420                 BNX2X_ERR("request sp irq failed\n");
6421                 return -EBUSY;
6422         }
6423
6424         for_each_queue(bp, i) {
6425                 struct bnx2x_fastpath *fp = &bp->fp[i];
6426
6427                 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
6428                 rc = request_irq(bp->msix_table[i + offset].vector,
6429                                  bnx2x_msix_fp_int, 0, fp->name, fp);
6430                 if (rc) {
6431                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
6432                         bnx2x_free_msix_irqs(bp);
6433                         return -EBUSY;
6434                 }
6435
6436                 fp->state = BNX2X_FP_STATE_IRQ;
6437         }
6438
6439         i = BNX2X_NUM_QUEUES(bp);
6440         if (is_multi(bp))
6441                 printk(KERN_INFO PFX
6442                        "%s: using MSI-X  IRQs: sp %d  fp %d - %d\n",
6443                        bp->dev->name, bp->msix_table[0].vector,
6444                        bp->msix_table[offset].vector,
6445                        bp->msix_table[offset + i - 1].vector);
6446         else
6447                 printk(KERN_INFO PFX "%s: using MSI-X  IRQs: sp %d  fp %d\n",
6448                        bp->dev->name, bp->msix_table[0].vector,
6449                        bp->msix_table[offset + i - 1].vector);
6450
6451         return 0;
6452 }
6453
6454 static int bnx2x_enable_msi(struct bnx2x *bp)
6455 {
6456         int rc;
6457
6458         rc = pci_enable_msi(bp->pdev);
6459         if (rc) {
6460                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6461                 return -1;
6462         }
6463         bp->flags |= USING_MSI_FLAG;
6464
6465         return 0;
6466 }
6467
6468 static int bnx2x_req_irq(struct bnx2x *bp)
6469 {
6470         unsigned long flags;
6471         int rc;
6472
6473         if (bp->flags & USING_MSI_FLAG)
6474                 flags = 0;
6475         else
6476                 flags = IRQF_SHARED;
6477
6478         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6479                          bp->dev->name, bp->dev);
6480         if (!rc)
6481                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6482
6483         return rc;
6484 }
6485
6486 static void bnx2x_napi_enable(struct bnx2x *bp)
6487 {
6488         int i;
6489
6490         for_each_rx_queue(bp, i)
6491                 napi_enable(&bnx2x_fp(bp, i, napi));
6492 }
6493
6494 static void bnx2x_napi_disable(struct bnx2x *bp)
6495 {
6496         int i;
6497
6498         for_each_rx_queue(bp, i)
6499                 napi_disable(&bnx2x_fp(bp, i, napi));
6500 }
6501
6502 static void bnx2x_netif_start(struct bnx2x *bp)
6503 {
6504         if (atomic_dec_and_test(&bp->intr_sem)) {
6505                 if (netif_running(bp->dev)) {
6506                         bnx2x_napi_enable(bp);
6507                         bnx2x_int_enable(bp);
6508                         if (bp->state == BNX2X_STATE_OPEN)
6509                                 netif_tx_wake_all_queues(bp->dev);
6510                 }
6511         }
6512 }
6513
6514 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6515 {
6516         bnx2x_int_disable_sync(bp, disable_hw);
6517         bnx2x_napi_disable(bp);
6518         if (netif_running(bp->dev)) {
6519                 netif_tx_disable(bp->dev);
6520                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6521         }
6522 }
6523
6524 /*
6525  * Init service functions
6526  */
6527
6528 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6529 {
6530         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6531         int port = BP_PORT(bp);
6532
6533         /* CAM allocation
6534          * unicasts 0-31:port0 32-63:port1
6535          * multicast 64-127:port0 128-191:port1
6536          */
6537         config->hdr.length = 2;
6538         config->hdr.offset = port ? 32 : 0;
6539         config->hdr.client_id = BP_CL_ID(bp);
6540         config->hdr.reserved1 = 0;
6541
6542         /* primary MAC */
6543         config->config_table[0].cam_entry.msb_mac_addr =
6544                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6545         config->config_table[0].cam_entry.middle_mac_addr =
6546                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6547         config->config_table[0].cam_entry.lsb_mac_addr =
6548                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6549         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6550         if (set)
6551                 config->config_table[0].target_table_entry.flags = 0;
6552         else
6553                 CAM_INVALIDATE(config->config_table[0]);
6554         config->config_table[0].target_table_entry.client_id = 0;
6555         config->config_table[0].target_table_entry.vlan_id = 0;
6556
6557         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6558            (set ? "setting" : "clearing"),
6559            config->config_table[0].cam_entry.msb_mac_addr,
6560            config->config_table[0].cam_entry.middle_mac_addr,
6561            config->config_table[0].cam_entry.lsb_mac_addr);
6562
6563         /* broadcast */
6564         config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6565         config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6566         config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6567         config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6568         if (set)
6569                 config->config_table[1].target_table_entry.flags =
6570                                 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6571         else
6572                 CAM_INVALIDATE(config->config_table[1]);
6573         config->config_table[1].target_table_entry.client_id = 0;
6574         config->config_table[1].target_table_entry.vlan_id = 0;
6575
6576         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6577                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6578                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6579 }
6580
6581 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6582 {
6583         struct mac_configuration_cmd_e1h *config =
6584                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6585
6586         if (set && (bp->state != BNX2X_STATE_OPEN)) {
6587                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6588                 return;
6589         }
6590
6591         /* CAM allocation for E1H
6592          * unicasts: by func number
6593          * multicast: 20+FUNC*20, 20 each
6594          */
6595         config->hdr.length = 1;
6596         config->hdr.offset = BP_FUNC(bp);
6597         config->hdr.client_id = BP_CL_ID(bp);
6598         config->hdr.reserved1 = 0;
6599
6600         /* primary MAC */
6601         config->config_table[0].msb_mac_addr =
6602                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6603         config->config_table[0].middle_mac_addr =
6604                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6605         config->config_table[0].lsb_mac_addr =
6606                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6607         config->config_table[0].client_id = BP_L_ID(bp);
6608         config->config_table[0].vlan_id = 0;
6609         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6610         if (set)
6611                 config->config_table[0].flags = BP_PORT(bp);
6612         else
6613                 config->config_table[0].flags =
6614                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6615
6616         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID %d\n",
6617            (set ? "setting" : "clearing"),
6618            config->config_table[0].msb_mac_addr,
6619            config->config_table[0].middle_mac_addr,
6620            config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6621
6622         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6623                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6624                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6625 }
6626
6627 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6628                              int *state_p, int poll)
6629 {
6630         /* can take a while if any port is running */
6631         int cnt = 500;
6632
6633         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6634            poll ? "polling" : "waiting", state, idx);
6635
6636         might_sleep();
6637         while (cnt--) {
6638                 if (poll) {
6639                         bnx2x_rx_int(bp->fp, 10);
6640                         /* if index is different from 0
6641                          * the reply for some commands will
6642                          * be on the non default queue
6643                          */
6644                         if (idx)
6645                                 bnx2x_rx_int(&bp->fp[idx], 10);
6646                 }
6647
6648                 mb(); /* state is changed by bnx2x_sp_event() */
6649                 if (*state_p == state)
6650                         return 0;
6651
6652                 msleep(1);
6653         }
6654
6655         /* timeout! */
6656         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6657                   poll ? "polling" : "waiting", state, idx);
6658 #ifdef BNX2X_STOP_ON_ERROR
6659         bnx2x_panic();
6660 #endif
6661
6662         return -EBUSY;
6663 }
6664
6665 static int bnx2x_setup_leading(struct bnx2x *bp)
6666 {
6667         int rc;
6668
6669         /* reset IGU state */
6670         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6671
6672         /* SETUP ramrod */
6673         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6674
6675         /* Wait for completion */
6676         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6677
6678         return rc;
6679 }
6680
6681 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6682 {
6683         struct bnx2x_fastpath *fp = &bp->fp[index];
6684
6685         /* reset IGU state */
6686         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6687
6688         /* SETUP ramrod */
6689         fp->state = BNX2X_FP_STATE_OPENING;
6690         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6691                       fp->cl_id, 0);
6692
6693         /* Wait for completion */
6694         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6695                                  &(fp->state), 0);
6696 }
6697
6698 static int bnx2x_poll(struct napi_struct *napi, int budget);
6699
6700 static void bnx2x_set_int_mode(struct bnx2x *bp)
6701 {
6702         int num_queues;
6703
6704         switch (int_mode) {
6705         case INT_MODE_INTx:
6706         case INT_MODE_MSI:
6707                 num_queues = 1;
6708                 bp->num_rx_queues = num_queues;
6709                 bp->num_tx_queues = num_queues;
6710                 DP(NETIF_MSG_IFUP,
6711                    "set number of queues to %d\n", num_queues);
6712                 break;
6713
6714         case INT_MODE_MSIX:
6715         default:
6716                 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6717                         num_queues = min_t(u32, num_online_cpus(),
6718                                            BNX2X_MAX_QUEUES(bp));
6719                 else
6720                         num_queues = 1;
6721                 bp->num_rx_queues = num_queues;
6722                 bp->num_tx_queues = num_queues;
6723                 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6724                    "  number of tx queues to %d\n",
6725                    bp->num_rx_queues, bp->num_tx_queues);
6726                 /* if we can't use MSI-X we only need one fp,
6727                  * so try to enable MSI-X with the requested number of fp's
6728                  * and fallback to MSI or legacy INTx with one fp
6729                  */
6730                 if (bnx2x_enable_msix(bp)) {
6731                         /* failed to enable MSI-X */
6732                         num_queues = 1;
6733                         bp->num_rx_queues = num_queues;
6734                         bp->num_tx_queues = num_queues;
6735                         if (bp->multi_mode)
6736                                 BNX2X_ERR("Multi requested but failed to "
6737                                           "enable MSI-X  set number of "
6738                                           "queues to %d\n", num_queues);
6739                 }
6740                 break;
6741         }
6742         bp->dev->real_num_tx_queues = bp->num_tx_queues;
6743 }
6744
6745 static void bnx2x_set_rx_mode(struct net_device *dev);
6746
6747 /* must be called with rtnl_lock */
6748 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6749 {
6750         u32 load_code;
6751         int i, rc = 0;
6752 #ifdef BNX2X_STOP_ON_ERROR
6753         DP(NETIF_MSG_IFUP, "enter  load_mode %d\n", load_mode);
6754         if (unlikely(bp->panic))
6755                 return -EPERM;
6756 #endif
6757
6758         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6759
6760         bnx2x_set_int_mode(bp);
6761
6762         if (bnx2x_alloc_mem(bp))
6763                 return -ENOMEM;
6764
6765         for_each_rx_queue(bp, i)
6766                 bnx2x_fp(bp, i, disable_tpa) =
6767                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
6768
6769         for_each_rx_queue(bp, i)
6770                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6771                                bnx2x_poll, 128);
6772
6773 #ifdef BNX2X_STOP_ON_ERROR
6774         for_each_rx_queue(bp, i) {
6775                 struct bnx2x_fastpath *fp = &bp->fp[i];
6776
6777                 fp->poll_no_work = 0;
6778                 fp->poll_calls = 0;
6779                 fp->poll_max_calls = 0;
6780                 fp->poll_complete = 0;
6781                 fp->poll_exit = 0;
6782         }
6783 #endif
6784         bnx2x_napi_enable(bp);
6785
6786         if (bp->flags & USING_MSIX_FLAG) {
6787                 rc = bnx2x_req_msix_irqs(bp);
6788                 if (rc) {
6789                         pci_disable_msix(bp->pdev);
6790                         goto load_error1;
6791                 }
6792         } else {
6793                 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6794                         bnx2x_enable_msi(bp);
6795                 bnx2x_ack_int(bp);
6796                 rc = bnx2x_req_irq(bp);
6797                 if (rc) {
6798                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
6799                         if (bp->flags & USING_MSI_FLAG)
6800                                 pci_disable_msi(bp->pdev);
6801                         goto load_error1;
6802                 }
6803                 if (bp->flags & USING_MSI_FLAG) {
6804                         bp->dev->irq = bp->pdev->irq;
6805                         printk(KERN_INFO PFX "%s: using MSI  IRQ %d\n",
6806                                bp->dev->name, bp->pdev->irq);
6807                 }
6808         }
6809
6810         /* Send LOAD_REQUEST command to MCP
6811            Returns the type of LOAD command:
6812            if it is the first port to be initialized
6813            common blocks should be initialized, otherwise - not
6814         */
6815         if (!BP_NOMCP(bp)) {
6816                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6817                 if (!load_code) {
6818                         BNX2X_ERR("MCP response failure, aborting\n");
6819                         rc = -EBUSY;
6820                         goto load_error2;
6821                 }
6822                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6823                         rc = -EBUSY; /* other port in diagnostic mode */
6824                         goto load_error2;
6825                 }
6826
6827         } else {
6828                 int port = BP_PORT(bp);
6829
6830                 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6831                    load_count[0], load_count[1], load_count[2]);
6832                 load_count[0]++;
6833                 load_count[1 + port]++;
6834                 DP(NETIF_MSG_IFUP, "NO MCP new load counts       %d, %d, %d\n",
6835                    load_count[0], load_count[1], load_count[2]);
6836                 if (load_count[0] == 1)
6837                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6838                 else if (load_count[1 + port] == 1)
6839                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6840                 else
6841                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6842         }
6843
6844         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6845             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6846                 bp->port.pmf = 1;
6847         else
6848                 bp->port.pmf = 0;
6849         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6850
6851         /* Initialize HW */
6852         rc = bnx2x_init_hw(bp, load_code);
6853         if (rc) {
6854                 BNX2X_ERR("HW init failed, aborting\n");
6855                 goto load_error2;
6856         }
6857
6858         /* Setup NIC internals and enable interrupts */
6859         bnx2x_nic_init(bp, load_code);
6860
6861         /* Send LOAD_DONE command to MCP */
6862         if (!BP_NOMCP(bp)) {
6863                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6864                 if (!load_code) {
6865                         BNX2X_ERR("MCP response failure, aborting\n");
6866                         rc = -EBUSY;
6867                         goto load_error3;
6868                 }
6869         }
6870
6871         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6872
6873         rc = bnx2x_setup_leading(bp);
6874         if (rc) {
6875                 BNX2X_ERR("Setup leading failed!\n");
6876                 goto load_error3;
6877         }
6878
6879         if (CHIP_IS_E1H(bp))
6880                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6881                         BNX2X_ERR("!!!  mf_cfg function disabled\n");
6882                         bp->state = BNX2X_STATE_DISABLED;
6883                 }
6884
6885         if (bp->state == BNX2X_STATE_OPEN)
6886                 for_each_nondefault_queue(bp, i) {
6887                         rc = bnx2x_setup_multi(bp, i);
6888                         if (rc)
6889                                 goto load_error3;
6890                 }
6891
6892         if (CHIP_IS_E1(bp))
6893                 bnx2x_set_mac_addr_e1(bp, 1);
6894         else
6895                 bnx2x_set_mac_addr_e1h(bp, 1);
6896
6897         if (bp->port.pmf)
6898                 bnx2x_initial_phy_init(bp);
6899
6900         /* Start fast path */
6901         switch (load_mode) {
6902         case LOAD_NORMAL:
6903                 /* Tx queue should be only reenabled */
6904                 netif_tx_wake_all_queues(bp->dev);
6905                 /* Initialize the receive filter. */
6906                 bnx2x_set_rx_mode(bp->dev);
6907                 break;
6908
6909         case LOAD_OPEN:
6910                 netif_tx_start_all_queues(bp->dev);
6911                 /* Initialize the receive filter. */
6912                 bnx2x_set_rx_mode(bp->dev);
6913                 break;
6914
6915         case LOAD_DIAG:
6916                 /* Initialize the receive filter. */
6917                 bnx2x_set_rx_mode(bp->dev);
6918                 bp->state = BNX2X_STATE_DIAG;
6919                 break;
6920
6921         default:
6922                 break;
6923         }
6924
6925         if (!bp->port.pmf)
6926                 bnx2x__link_status_update(bp);
6927
6928         /* start the timer */
6929         mod_timer(&bp->timer, jiffies + bp->current_interval);
6930
6931
6932         return 0;
6933
6934 load_error3:
6935         bnx2x_int_disable_sync(bp, 1);
6936         if (!BP_NOMCP(bp)) {
6937                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
6938                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6939         }
6940         bp->port.pmf = 0;
6941         /* Free SKBs, SGEs, TPA pool and driver internals */
6942         bnx2x_free_skbs(bp);
6943         for_each_rx_queue(bp, i)
6944                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6945 load_error2:
6946         /* Release IRQs */
6947         bnx2x_free_irq(bp);
6948 load_error1:
6949         bnx2x_napi_disable(bp);
6950         for_each_rx_queue(bp, i)
6951                 netif_napi_del(&bnx2x_fp(bp, i, napi));
6952         bnx2x_free_mem(bp);
6953
6954         /* TBD we really need to reset the chip
6955            if we want to recover from this */
6956         return rc;
6957 }
6958
6959 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6960 {
6961         struct bnx2x_fastpath *fp = &bp->fp[index];
6962         int rc;
6963
6964         /* halt the connection */
6965         fp->state = BNX2X_FP_STATE_HALTING;
6966         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
6967
6968         /* Wait for completion */
6969         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6970                                &(fp->state), 1);
6971         if (rc) /* timeout */
6972                 return rc;
6973
6974         /* delete cfc entry */
6975         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6976
6977         /* Wait for completion */
6978         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6979                                &(fp->state), 1);
6980         return rc;
6981 }
6982
6983 static int bnx2x_stop_leading(struct bnx2x *bp)
6984 {
6985         u16 dsb_sp_prod_idx;
6986         /* if the other port is handling traffic,
6987            this can take a lot of time */
6988         int cnt = 500;
6989         int rc;
6990
6991         might_sleep();
6992
6993         /* Send HALT ramrod */
6994         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6995         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6996
6997         /* Wait for completion */
6998         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6999                                &(bp->fp[0].state), 1);
7000         if (rc) /* timeout */
7001                 return rc;
7002
7003         dsb_sp_prod_idx = *bp->dsb_sp_prod;
7004
7005         /* Send PORT_DELETE ramrod */
7006         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7007
7008         /* Wait for completion to arrive on default status block
7009            we are going to reset the chip anyway
7010            so there is not much to do if this times out
7011          */
7012         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7013                 if (!cnt) {
7014                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7015                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7016                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
7017 #ifdef BNX2X_STOP_ON_ERROR
7018                         bnx2x_panic();
7019 #else
7020                         rc = -EBUSY;
7021 #endif
7022                         break;
7023                 }
7024                 cnt--;
7025                 msleep(1);
7026                 rmb(); /* Refresh the dsb_sp_prod */
7027         }
7028         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7029         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7030
7031         return rc;
7032 }
7033
7034 static void bnx2x_reset_func(struct bnx2x *bp)
7035 {
7036         int port = BP_PORT(bp);
7037         int func = BP_FUNC(bp);
7038         int base, i;
7039
7040         /* Configure IGU */
7041         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7042         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7043
7044         /* Clear ILT */
7045         base = FUNC_ILT_BASE(func);
7046         for (i = base; i < base + ILT_PER_FUNC; i++)
7047                 bnx2x_ilt_wr(bp, i, 0);
7048 }
7049
7050 static void bnx2x_reset_port(struct bnx2x *bp)
7051 {
7052         int port = BP_PORT(bp);
7053         u32 val;
7054
7055         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7056
7057         /* Do not rcv packets to BRB */
7058         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7059         /* Do not direct rcv packets that are not for MCP to the BRB */
7060         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7061                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7062
7063         /* Configure AEU */
7064         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7065
7066         msleep(100);
7067         /* Check for BRB port occupancy */
7068         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7069         if (val)
7070                 DP(NETIF_MSG_IFDOWN,
7071                    "BRB1 is not empty  %d blocks are occupied\n", val);
7072
7073         /* TODO: Close Doorbell port? */
7074 }
7075
7076 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7077 {
7078         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
7079            BP_FUNC(bp), reset_code);
7080
7081         switch (reset_code) {
7082         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7083                 bnx2x_reset_port(bp);
7084                 bnx2x_reset_func(bp);
7085                 bnx2x_reset_common(bp);
7086                 break;
7087
7088         case FW_MSG_CODE_DRV_UNLOAD_PORT:
7089                 bnx2x_reset_port(bp);
7090                 bnx2x_reset_func(bp);
7091                 break;
7092
7093         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7094                 bnx2x_reset_func(bp);
7095                 break;
7096
7097         default:
7098                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7099                 break;
7100         }
7101 }
7102
7103 /* must be called with rtnl_lock */
7104 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7105 {
7106         int port = BP_PORT(bp);
7107         u32 reset_code = 0;
7108         int i, cnt, rc;
7109
7110         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7111
7112         bp->rx_mode = BNX2X_RX_MODE_NONE;
7113         bnx2x_set_storm_rx_mode(bp);
7114
7115         bnx2x_netif_stop(bp, 1);
7116
7117         del_timer_sync(&bp->timer);
7118         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7119                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7120         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7121
7122         /* Release IRQs */
7123         bnx2x_free_irq(bp);
7124
7125         /* Wait until tx fastpath tasks complete */
7126         for_each_tx_queue(bp, i) {
7127                 struct bnx2x_fastpath *fp = &bp->fp[i];
7128
7129                 cnt = 1000;
7130                 smp_rmb();
7131                 while (bnx2x_has_tx_work_unload(fp)) {
7132
7133                         bnx2x_tx_int(fp, 1000);
7134                         if (!cnt) {
7135                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
7136                                           i);
7137 #ifdef BNX2X_STOP_ON_ERROR
7138                                 bnx2x_panic();
7139                                 return -EBUSY;
7140 #else
7141                                 break;
7142 #endif
7143                         }
7144                         cnt--;
7145                         msleep(1);
7146                         smp_rmb();
7147                 }
7148         }
7149         /* Give HW time to discard old tx messages */
7150         msleep(1);
7151
7152         if (CHIP_IS_E1(bp)) {
7153                 struct mac_configuration_cmd *config =
7154                                                 bnx2x_sp(bp, mcast_config);
7155
7156                 bnx2x_set_mac_addr_e1(bp, 0);
7157
7158                 for (i = 0; i < config->hdr.length; i++)
7159                         CAM_INVALIDATE(config->config_table[i]);
7160
7161                 config->hdr.length = i;
7162                 if (CHIP_REV_IS_SLOW(bp))
7163                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7164                 else
7165                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7166                 config->hdr.client_id = BP_CL_ID(bp);
7167                 config->hdr.reserved1 = 0;
7168
7169                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7170                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7171                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7172
7173         } else { /* E1H */
7174                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7175
7176                 bnx2x_set_mac_addr_e1h(bp, 0);
7177
7178                 for (i = 0; i < MC_HASH_SIZE; i++)
7179                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7180         }
7181
7182         if (unload_mode == UNLOAD_NORMAL)
7183                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7184
7185         else if (bp->flags & NO_WOL_FLAG) {
7186                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7187                 if (CHIP_IS_E1H(bp))
7188                         REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7189
7190         } else if (bp->wol) {
7191                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7192                 u8 *mac_addr = bp->dev->dev_addr;
7193                 u32 val;
7194                 /* The mac address is written to entries 1-4 to
7195                    preserve entry 0 which is used by the PMF */
7196                 u8 entry = (BP_E1HVN(bp) + 1)*8;
7197
7198                 val = (mac_addr[0] << 8) | mac_addr[1];
7199                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7200
7201                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7202                       (mac_addr[4] << 8) | mac_addr[5];
7203                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7204
7205                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7206
7207         } else
7208                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7209
7210         /* Close multi and leading connections
7211            Completions for ramrods are collected in a synchronous way */
7212         for_each_nondefault_queue(bp, i)
7213                 if (bnx2x_stop_multi(bp, i))
7214                         goto unload_error;
7215
7216         rc = bnx2x_stop_leading(bp);
7217         if (rc) {
7218                 BNX2X_ERR("Stop leading failed!\n");
7219 #ifdef BNX2X_STOP_ON_ERROR
7220                 return -EBUSY;
7221 #else
7222                 goto unload_error;
7223 #endif
7224         }
7225
7226 unload_error:
7227         if (!BP_NOMCP(bp))
7228                 reset_code = bnx2x_fw_command(bp, reset_code);
7229         else {
7230                 DP(NETIF_MSG_IFDOWN, "NO MCP load counts      %d, %d, %d\n",
7231                    load_count[0], load_count[1], load_count[2]);
7232                 load_count[0]--;
7233                 load_count[1 + port]--;
7234                 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts  %d, %d, %d\n",
7235                    load_count[0], load_count[1], load_count[2]);
7236                 if (load_count[0] == 0)
7237                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7238                 else if (load_count[1 + port] == 0)
7239                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7240                 else
7241                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7242         }
7243
7244         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7245             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7246                 bnx2x__link_reset(bp);
7247
7248         /* Reset the chip */
7249         bnx2x_reset_chip(bp, reset_code);
7250
7251         /* Report UNLOAD_DONE to MCP */
7252         if (!BP_NOMCP(bp))
7253                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7254         bp->port.pmf = 0;
7255
7256         /* Free SKBs, SGEs, TPA pool and driver internals */
7257         bnx2x_free_skbs(bp);
7258         for_each_rx_queue(bp, i)
7259                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7260         for_each_rx_queue(bp, i)
7261                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7262         bnx2x_free_mem(bp);
7263
7264         bp->state = BNX2X_STATE_CLOSED;
7265
7266         netif_carrier_off(bp->dev);
7267
7268         return 0;
7269 }
7270
7271 static void bnx2x_reset_task(struct work_struct *work)
7272 {
7273         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7274
7275 #ifdef BNX2X_STOP_ON_ERROR
7276         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7277                   " so reset not done to allow debug dump,\n"
7278          KERN_ERR " you will need to reboot when done\n");
7279         return;
7280 #endif
7281
7282         rtnl_lock();
7283
7284         if (!netif_running(bp->dev))
7285                 goto reset_task_exit;
7286
7287         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7288         bnx2x_nic_load(bp, LOAD_NORMAL);
7289
7290 reset_task_exit:
7291         rtnl_unlock();
7292 }
7293
7294 /* end of nic load/unload */
7295
7296 /* ethtool_ops */
7297
7298 /*
7299  * Init service functions
7300  */
7301
7302 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7303 {
7304         switch (func) {
7305         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7306         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7307         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7308         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7309         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7310         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7311         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7312         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7313         default:
7314                 BNX2X_ERR("Unsupported function index: %d\n", func);
7315                 return (u32)(-1);
7316         }
7317 }
7318
7319 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7320 {
7321         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7322
7323         /* Flush all outstanding writes */
7324         mmiowb();
7325
7326         /* Pretend to be function 0 */
7327         REG_WR(bp, reg, 0);
7328         /* Flush the GRC transaction (in the chip) */
7329         new_val = REG_RD(bp, reg);
7330         if (new_val != 0) {
7331                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7332                           new_val);
7333                 BUG();
7334         }
7335
7336         /* From now we are in the "like-E1" mode */
7337         bnx2x_int_disable(bp);
7338
7339         /* Flush all outstanding writes */
7340         mmiowb();
7341
7342         /* Restore the original funtion settings */
7343         REG_WR(bp, reg, orig_func);
7344         new_val = REG_RD(bp, reg);
7345         if (new_val != orig_func) {
7346                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7347                           orig_func, new_val);
7348                 BUG();
7349         }
7350 }
7351
7352 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7353 {
7354         if (CHIP_IS_E1H(bp))
7355                 bnx2x_undi_int_disable_e1h(bp, func);
7356         else
7357                 bnx2x_int_disable(bp);
7358 }
7359
7360 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7361 {
7362         u32 val;
7363
7364         /* Check if there is any driver already loaded */
7365         val = REG_RD(bp, MISC_REG_UNPREPARED);
7366         if (val == 0x1) {
7367                 /* Check if it is the UNDI driver
7368                  * UNDI driver initializes CID offset for normal bell to 0x7
7369                  */
7370                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7371                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7372                 if (val == 0x7) {
7373                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7374                         /* save our func */
7375                         int func = BP_FUNC(bp);
7376                         u32 swap_en;
7377                         u32 swap_val;
7378
7379                         /* clear the UNDI indication */
7380                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7381
7382                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
7383
7384                         /* try unload UNDI on port 0 */
7385                         bp->func = 0;
7386                         bp->fw_seq =
7387                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7388                                 DRV_MSG_SEQ_NUMBER_MASK);
7389                         reset_code = bnx2x_fw_command(bp, reset_code);
7390
7391                         /* if UNDI is loaded on the other port */
7392                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7393
7394                                 /* send "DONE" for previous unload */
7395                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7396
7397                                 /* unload UNDI on port 1 */
7398                                 bp->func = 1;
7399                                 bp->fw_seq =
7400                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7401                                         DRV_MSG_SEQ_NUMBER_MASK);
7402                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7403
7404                                 bnx2x_fw_command(bp, reset_code);
7405                         }
7406
7407                         /* now it's safe to release the lock */
7408                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7409
7410                         bnx2x_undi_int_disable(bp, func);
7411
7412                         /* close input traffic and wait for it */
7413                         /* Do not rcv packets to BRB */
7414                         REG_WR(bp,
7415                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7416                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7417                         /* Do not direct rcv packets that are not for MCP to
7418                          * the BRB */
7419                         REG_WR(bp,
7420                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7421                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7422                         /* clear AEU */
7423                         REG_WR(bp,
7424                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7425                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7426                         msleep(10);
7427
7428                         /* save NIG port swap info */
7429                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7430                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7431                         /* reset device */
7432                         REG_WR(bp,
7433                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7434                                0xd3ffffff);
7435                         REG_WR(bp,
7436                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7437                                0x1403);
7438                         /* take the NIG out of reset and restore swap values */
7439                         REG_WR(bp,
7440                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7441                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
7442                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7443                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7444
7445                         /* send unload done to the MCP */
7446                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7447
7448                         /* restore our func and fw_seq */
7449                         bp->func = func;
7450                         bp->fw_seq =
7451                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7452                                 DRV_MSG_SEQ_NUMBER_MASK);
7453
7454                 } else
7455                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7456         }
7457 }
7458
7459 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7460 {
7461         u32 val, val2, val3, val4, id;
7462         u16 pmc;
7463
7464         /* Get the chip revision id and number. */
7465         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7466         val = REG_RD(bp, MISC_REG_CHIP_NUM);
7467         id = ((val & 0xffff) << 16);
7468         val = REG_RD(bp, MISC_REG_CHIP_REV);
7469         id |= ((val & 0xf) << 12);
7470         val = REG_RD(bp, MISC_REG_CHIP_METAL);
7471         id |= ((val & 0xff) << 4);
7472         val = REG_RD(bp, MISC_REG_BOND_ID);
7473         id |= (val & 0xf);
7474         bp->common.chip_id = id;
7475         bp->link_params.chip_id = bp->common.chip_id;
7476         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7477
7478         val = (REG_RD(bp, 0x2874) & 0x55);
7479         if ((bp->common.chip_id & 0x1) ||
7480             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7481                 bp->flags |= ONE_PORT_FLAG;
7482                 BNX2X_DEV_INFO("single port device\n");
7483         }
7484
7485         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7486         bp->common.flash_size = (NVRAM_1MB_SIZE <<
7487                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
7488         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7489                        bp->common.flash_size, bp->common.flash_size);
7490
7491         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7492         bp->link_params.shmem_base = bp->common.shmem_base;
7493         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7494
7495         if (!bp->common.shmem_base ||
7496             (bp->common.shmem_base < 0xA0000) ||
7497             (bp->common.shmem_base >= 0xC0000)) {
7498                 BNX2X_DEV_INFO("MCP not active\n");
7499                 bp->flags |= NO_MCP_FLAG;
7500                 return;
7501         }
7502
7503         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7504         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7505                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7506                 BNX2X_ERR("BAD MCP validity signature\n");
7507
7508         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7509         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7510
7511         bp->link_params.hw_led_mode = ((bp->common.hw_config &
7512                                         SHARED_HW_CFG_LED_MODE_MASK) >>
7513                                        SHARED_HW_CFG_LED_MODE_SHIFT);
7514
7515         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7516         bp->common.bc_ver = val;
7517         BNX2X_DEV_INFO("bc_ver %X\n", val);
7518         if (val < BNX2X_BC_VER) {
7519                 /* for now only warn
7520                  * later we might need to enforce this */
7521                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7522                           " please upgrade BC\n", BNX2X_BC_VER, val);
7523         }
7524
7525         if (BP_E1HVN(bp) == 0) {
7526                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7527                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7528         } else {
7529                 /* no WOL capability for E1HVN != 0 */
7530                 bp->flags |= NO_WOL_FLAG;
7531         }
7532         BNX2X_DEV_INFO("%sWoL capable\n",
7533                        (bp->flags & NO_WOL_FLAG) ? "Not " : "");
7534
7535         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7536         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7537         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7538         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7539
7540         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7541                val, val2, val3, val4);
7542 }
7543
7544 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7545                                                     u32 switch_cfg)
7546 {
7547         int port = BP_PORT(bp);
7548         u32 ext_phy_type;
7549
7550         switch (switch_cfg) {
7551         case SWITCH_CFG_1G:
7552                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7553
7554                 ext_phy_type =
7555                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7556                 switch (ext_phy_type) {
7557                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7558                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7559                                        ext_phy_type);
7560
7561                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7562                                                SUPPORTED_10baseT_Full |
7563                                                SUPPORTED_100baseT_Half |
7564                                                SUPPORTED_100baseT_Full |
7565                                                SUPPORTED_1000baseT_Full |
7566                                                SUPPORTED_2500baseX_Full |
7567                                                SUPPORTED_TP |
7568                                                SUPPORTED_FIBRE |
7569                                                SUPPORTED_Autoneg |
7570                                                SUPPORTED_Pause |
7571                                                SUPPORTED_Asym_Pause);
7572                         break;
7573
7574                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7575                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7576                                        ext_phy_type);
7577
7578                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7579                                                SUPPORTED_10baseT_Full |
7580                                                SUPPORTED_100baseT_Half |
7581                                                SUPPORTED_100baseT_Full |
7582                                                SUPPORTED_1000baseT_Full |
7583                                                SUPPORTED_TP |
7584                                                SUPPORTED_FIBRE |
7585                                                SUPPORTED_Autoneg |
7586                                                SUPPORTED_Pause |
7587                                                SUPPORTED_Asym_Pause);
7588                         break;
7589
7590                 default:
7591                         BNX2X_ERR("NVRAM config error. "
7592                                   "BAD SerDes ext_phy_config 0x%x\n",
7593                                   bp->link_params.ext_phy_config);
7594                         return;
7595                 }
7596
7597                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7598                                            port*0x10);
7599                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7600                 break;
7601
7602         case SWITCH_CFG_10G:
7603                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7604
7605                 ext_phy_type =
7606                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7607                 switch (ext_phy_type) {
7608                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7609                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7610                                        ext_phy_type);
7611
7612                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7613                                                SUPPORTED_10baseT_Full |
7614                                                SUPPORTED_100baseT_Half |
7615                                                SUPPORTED_100baseT_Full |
7616                                                SUPPORTED_1000baseT_Full |
7617                                                SUPPORTED_2500baseX_Full |
7618                                                SUPPORTED_10000baseT_Full |
7619                                                SUPPORTED_TP |
7620                                                SUPPORTED_FIBRE |
7621                                                SUPPORTED_Autoneg |
7622                                                SUPPORTED_Pause |
7623                                                SUPPORTED_Asym_Pause);
7624                         break;
7625
7626                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7627                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7628                                        ext_phy_type);
7629
7630                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7631                                                SUPPORTED_FIBRE |
7632                                                SUPPORTED_Pause |
7633                                                SUPPORTED_Asym_Pause);
7634                         break;
7635
7636                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7637                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7638                                        ext_phy_type);
7639
7640                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7641                                                SUPPORTED_1000baseT_Full |
7642                                                SUPPORTED_FIBRE |
7643                                                SUPPORTED_Pause |
7644                                                SUPPORTED_Asym_Pause);
7645                         break;
7646
7647                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7648                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7649                                        ext_phy_type);
7650
7651                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7652                                                SUPPORTED_1000baseT_Full |
7653                                                SUPPORTED_FIBRE |
7654                                                SUPPORTED_Autoneg |
7655                                                SUPPORTED_Pause |
7656                                                SUPPORTED_Asym_Pause);
7657                         break;
7658
7659                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7660                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7661                                        ext_phy_type);
7662
7663                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7664                                                SUPPORTED_2500baseX_Full |
7665                                                SUPPORTED_1000baseT_Full |
7666                                                SUPPORTED_FIBRE |
7667                                                SUPPORTED_Autoneg |
7668                                                SUPPORTED_Pause |
7669                                                SUPPORTED_Asym_Pause);
7670                         break;
7671
7672                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7673                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7674                                        ext_phy_type);
7675
7676                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7677                                                SUPPORTED_TP |
7678                                                SUPPORTED_Autoneg |
7679                                                SUPPORTED_Pause |
7680                                                SUPPORTED_Asym_Pause);
7681                         break;
7682
7683                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7684                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7685                                   bp->link_params.ext_phy_config);
7686                         break;
7687
7688                 default:
7689                         BNX2X_ERR("NVRAM config error. "
7690                                   "BAD XGXS ext_phy_config 0x%x\n",
7691                                   bp->link_params.ext_phy_config);
7692                         return;
7693                 }
7694
7695                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7696                                            port*0x18);
7697                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7698
7699                 break;
7700
7701         default:
7702                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7703                           bp->port.link_config);
7704                 return;
7705         }
7706         bp->link_params.phy_addr = bp->port.phy_addr;
7707
7708         /* mask what we support according to speed_cap_mask */
7709         if (!(bp->link_params.speed_cap_mask &
7710                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7711                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7712
7713         if (!(bp->link_params.speed_cap_mask &
7714                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7715                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7716
7717         if (!(bp->link_params.speed_cap_mask &
7718                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7719                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7720
7721         if (!(bp->link_params.speed_cap_mask &
7722                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7723                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7724
7725         if (!(bp->link_params.speed_cap_mask &
7726                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7727                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7728                                         SUPPORTED_1000baseT_Full);
7729
7730         if (!(bp->link_params.speed_cap_mask &
7731                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7732                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7733
7734         if (!(bp->link_params.speed_cap_mask &
7735                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7736                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7737
7738         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7739 }
7740
7741 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7742 {
7743         bp->link_params.req_duplex = DUPLEX_FULL;
7744
7745         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7746         case PORT_FEATURE_LINK_SPEED_AUTO:
7747                 if (bp->port.supported & SUPPORTED_Autoneg) {
7748                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7749                         bp->port.advertising = bp->port.supported;
7750                 } else {
7751                         u32 ext_phy_type =
7752                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7753
7754                         if ((ext_phy_type ==
7755                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7756                             (ext_phy_type ==
7757                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7758                                 /* force 10G, no AN */
7759                                 bp->link_params.req_line_speed = SPEED_10000;
7760                                 bp->port.advertising =
7761                                                 (ADVERTISED_10000baseT_Full |
7762                                                  ADVERTISED_FIBRE);
7763                                 break;
7764                         }
7765                         BNX2X_ERR("NVRAM config error. "
7766                                   "Invalid link_config 0x%x"
7767                                   "  Autoneg not supported\n",
7768                                   bp->port.link_config);
7769                         return;
7770                 }
7771                 break;
7772
7773         case PORT_FEATURE_LINK_SPEED_10M_FULL:
7774                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7775                         bp->link_params.req_line_speed = SPEED_10;
7776                         bp->port.advertising = (ADVERTISED_10baseT_Full |
7777                                                 ADVERTISED_TP);
7778                 } else {
7779                         BNX2X_ERR("NVRAM config error. "
7780                                   "Invalid link_config 0x%x"
7781                                   "  speed_cap_mask 0x%x\n",
7782                                   bp->port.link_config,
7783                                   bp->link_params.speed_cap_mask);
7784                         return;
7785                 }
7786                 break;
7787
7788         case PORT_FEATURE_LINK_SPEED_10M_HALF:
7789                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7790                         bp->link_params.req_line_speed = SPEED_10;
7791                         bp->link_params.req_duplex = DUPLEX_HALF;
7792                         bp->port.advertising = (ADVERTISED_10baseT_Half |
7793                                                 ADVERTISED_TP);
7794                 } else {
7795                         BNX2X_ERR("NVRAM config error. "
7796                                   "Invalid link_config 0x%x"
7797                                   "  speed_cap_mask 0x%x\n",
7798                                   bp->port.link_config,
7799                                   bp->link_params.speed_cap_mask);
7800                         return;
7801                 }
7802                 break;
7803
7804         case PORT_FEATURE_LINK_SPEED_100M_FULL:
7805                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7806                         bp->link_params.req_line_speed = SPEED_100;
7807                         bp->port.advertising = (ADVERTISED_100baseT_Full |
7808                                                 ADVERTISED_TP);
7809                 } else {
7810                         BNX2X_ERR("NVRAM config error. "
7811                                   "Invalid link_config 0x%x"
7812                                   "  speed_cap_mask 0x%x\n",
7813                                   bp->port.link_config,
7814                                   bp->link_params.speed_cap_mask);
7815                         return;
7816                 }
7817                 break;
7818
7819         case PORT_FEATURE_LINK_SPEED_100M_HALF:
7820                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7821                         bp->link_params.req_line_speed = SPEED_100;
7822                         bp->link_params.req_duplex = DUPLEX_HALF;
7823                         bp->port.advertising = (ADVERTISED_100baseT_Half |
7824                                                 ADVERTISED_TP);
7825                 } else {
7826                         BNX2X_ERR("NVRAM config error. "
7827                                   "Invalid link_config 0x%x"
7828                                   "  speed_cap_mask 0x%x\n",
7829                                   bp->port.link_config,
7830                                   bp->link_params.speed_cap_mask);
7831                         return;
7832                 }
7833                 break;
7834
7835         case PORT_FEATURE_LINK_SPEED_1G:
7836                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7837                         bp->link_params.req_line_speed = SPEED_1000;
7838                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
7839                                                 ADVERTISED_TP);
7840                 } else {
7841                         BNX2X_ERR("NVRAM config error. "
7842                                   "Invalid link_config 0x%x"
7843                                   "  speed_cap_mask 0x%x\n",
7844                                   bp->port.link_config,
7845                                   bp->link_params.speed_cap_mask);
7846                         return;
7847                 }
7848                 break;
7849
7850         case PORT_FEATURE_LINK_SPEED_2_5G:
7851                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7852                         bp->link_params.req_line_speed = SPEED_2500;
7853                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
7854                                                 ADVERTISED_TP);
7855                 } else {
7856                         BNX2X_ERR("NVRAM config error. "
7857                                   "Invalid link_config 0x%x"
7858                                   "  speed_cap_mask 0x%x\n",
7859                                   bp->port.link_config,
7860                                   bp->link_params.speed_cap_mask);
7861                         return;
7862                 }
7863                 break;
7864
7865         case PORT_FEATURE_LINK_SPEED_10G_CX4:
7866         case PORT_FEATURE_LINK_SPEED_10G_KX4:
7867         case PORT_FEATURE_LINK_SPEED_10G_KR:
7868                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7869                         bp->link_params.req_line_speed = SPEED_10000;
7870                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
7871                                                 ADVERTISED_FIBRE);
7872                 } else {
7873                         BNX2X_ERR("NVRAM config error. "
7874                                   "Invalid link_config 0x%x"
7875                                   "  speed_cap_mask 0x%x\n",
7876                                   bp->port.link_config,
7877                                   bp->link_params.speed_cap_mask);
7878                         return;
7879                 }
7880                 break;
7881
7882         default:
7883                 BNX2X_ERR("NVRAM config error. "
7884                           "BAD link speed link_config 0x%x\n",
7885                           bp->port.link_config);
7886                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7887                 bp->port.advertising = bp->port.supported;
7888                 break;
7889         }
7890
7891         bp->link_params.req_flow_ctrl = (bp->port.link_config &
7892                                          PORT_FEATURE_FLOW_CONTROL_MASK);
7893         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
7894             !(bp->port.supported & SUPPORTED_Autoneg))
7895                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7896
7897         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
7898                        "  advertising 0x%x\n",
7899                        bp->link_params.req_line_speed,
7900                        bp->link_params.req_duplex,
7901                        bp->link_params.req_flow_ctrl, bp->port.advertising);
7902 }
7903
7904 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7905 {
7906         int port = BP_PORT(bp);
7907         u32 val, val2;
7908
7909         bp->link_params.bp = bp;
7910         bp->link_params.port = port;
7911
7912         bp->link_params.serdes_config =
7913                 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7914         bp->link_params.lane_config =
7915                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7916         bp->link_params.ext_phy_config =
7917                 SHMEM_RD(bp,
7918                          dev_info.port_hw_config[port].external_phy_config);
7919         bp->link_params.speed_cap_mask =
7920                 SHMEM_RD(bp,
7921                          dev_info.port_hw_config[port].speed_capability_mask);
7922
7923         bp->port.link_config =
7924                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7925
7926         BNX2X_DEV_INFO("serdes_config 0x%08x  lane_config 0x%08x\n"
7927              KERN_INFO "  ext_phy_config 0x%08x  speed_cap_mask 0x%08x"
7928                        "  link_config 0x%08x\n",
7929                        bp->link_params.serdes_config,
7930                        bp->link_params.lane_config,
7931                        bp->link_params.ext_phy_config,
7932                        bp->link_params.speed_cap_mask, bp->port.link_config);
7933
7934         bp->link_params.switch_cfg = (bp->port.link_config &
7935                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
7936         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7937
7938         bnx2x_link_settings_requested(bp);
7939
7940         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7941         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7942         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7943         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7944         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7945         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7946         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7947         bp->dev->dev_addr[5] = (u8)(val & 0xff);
7948         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7949         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7950 }
7951
7952 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7953 {
7954         int func = BP_FUNC(bp);
7955         u32 val, val2;
7956         int rc = 0;
7957
7958         bnx2x_get_common_hwinfo(bp);
7959
7960         bp->e1hov = 0;
7961         bp->e1hmf = 0;
7962         if (CHIP_IS_E1H(bp)) {
7963                 bp->mf_config =
7964                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7965
7966                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7967                        FUNC_MF_CFG_E1HOV_TAG_MASK);
7968                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7969
7970                         bp->e1hov = val;
7971                         bp->e1hmf = 1;
7972                         BNX2X_DEV_INFO("MF mode  E1HOV for func %d is %d "
7973                                        "(0x%04x)\n",
7974                                        func, bp->e1hov, bp->e1hov);
7975                 } else {
7976                         BNX2X_DEV_INFO("Single function mode\n");
7977                         if (BP_E1HVN(bp)) {
7978                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
7979                                           "  aborting\n", func);
7980                                 rc = -EPERM;
7981                         }
7982                 }
7983         }
7984
7985         if (!BP_NOMCP(bp)) {
7986                 bnx2x_get_port_hwinfo(bp);
7987
7988                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7989                               DRV_MSG_SEQ_NUMBER_MASK);
7990                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7991         }
7992
7993         if (IS_E1HMF(bp)) {
7994                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7995                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
7996                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7997                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7998                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7999                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8000                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8001                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8002                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8003                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8004                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8005                                ETH_ALEN);
8006                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8007                                ETH_ALEN);
8008                 }
8009
8010                 return rc;
8011         }
8012
8013         if (BP_NOMCP(bp)) {
8014                 /* only supposed to happen on emulation/FPGA */
8015                 BNX2X_ERR("warning random MAC workaround active\n");
8016                 random_ether_addr(bp->dev->dev_addr);
8017                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8018         }
8019
8020         return rc;
8021 }
8022
8023 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8024 {
8025         int func = BP_FUNC(bp);
8026         int timer_interval;
8027         int rc;
8028
8029         /* Disable interrupt handling until HW is initialized */
8030         atomic_set(&bp->intr_sem, 1);
8031
8032         mutex_init(&bp->port.phy_mutex);
8033
8034         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8035         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8036
8037         rc = bnx2x_get_hwinfo(bp);
8038
8039         /* need to reset chip if undi was active */
8040         if (!BP_NOMCP(bp))
8041                 bnx2x_undi_unload(bp);
8042
8043         if (CHIP_REV_IS_FPGA(bp))
8044                 printk(KERN_ERR PFX "FPGA detected\n");
8045
8046         if (BP_NOMCP(bp) && (func == 0))
8047                 printk(KERN_ERR PFX
8048                        "MCP disabled, must load devices in order!\n");
8049
8050         /* Set multi queue mode */
8051         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8052             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8053                 printk(KERN_ERR PFX
8054                       "Multi disabled since int_mode requested is not MSI-X\n");
8055                 multi_mode = ETH_RSS_MODE_DISABLED;
8056         }
8057         bp->multi_mode = multi_mode;
8058
8059
8060         /* Set TPA flags */
8061         if (disable_tpa) {
8062                 bp->flags &= ~TPA_ENABLE_FLAG;
8063                 bp->dev->features &= ~NETIF_F_LRO;
8064         } else {
8065                 bp->flags |= TPA_ENABLE_FLAG;
8066                 bp->dev->features |= NETIF_F_LRO;
8067         }
8068
8069
8070         bp->tx_ring_size = MAX_TX_AVAIL;
8071         bp->rx_ring_size = MAX_RX_AVAIL;
8072
8073         bp->rx_csum = 1;
8074
8075         bp->tx_ticks = 50;
8076         bp->rx_ticks = 25;
8077
8078         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8079         bp->current_interval = (poll ? poll : timer_interval);
8080
8081         init_timer(&bp->timer);
8082         bp->timer.expires = jiffies + bp->current_interval;
8083         bp->timer.data = (unsigned long) bp;
8084         bp->timer.function = bnx2x_timer;
8085
8086         return rc;
8087 }
8088
8089 /*
8090  * ethtool service functions
8091  */
8092
8093 /* All ethtool functions called with rtnl_lock */
8094
8095 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8096 {
8097         struct bnx2x *bp = netdev_priv(dev);
8098
8099         cmd->supported = bp->port.supported;
8100         cmd->advertising = bp->port.advertising;
8101
8102         if (netif_carrier_ok(dev)) {
8103                 cmd->speed = bp->link_vars.line_speed;
8104                 cmd->duplex = bp->link_vars.duplex;
8105         } else {
8106                 cmd->speed = bp->link_params.req_line_speed;
8107                 cmd->duplex = bp->link_params.req_duplex;
8108         }
8109         if (IS_E1HMF(bp)) {
8110                 u16 vn_max_rate;
8111
8112                 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8113                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8114                 if (vn_max_rate < cmd->speed)
8115                         cmd->speed = vn_max_rate;
8116         }
8117
8118         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8119                 u32 ext_phy_type =
8120                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8121
8122                 switch (ext_phy_type) {
8123                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8124                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8125                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8126                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8127                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8128                         cmd->port = PORT_FIBRE;
8129                         break;
8130
8131                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8132                         cmd->port = PORT_TP;
8133                         break;
8134
8135                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8136                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8137                                   bp->link_params.ext_phy_config);
8138                         break;
8139
8140                 default:
8141                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8142                            bp->link_params.ext_phy_config);
8143                         break;
8144                 }
8145         } else
8146                 cmd->port = PORT_TP;
8147
8148         cmd->phy_address = bp->port.phy_addr;
8149         cmd->transceiver = XCVR_INTERNAL;
8150
8151         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8152                 cmd->autoneg = AUTONEG_ENABLE;
8153         else
8154                 cmd->autoneg = AUTONEG_DISABLE;
8155
8156         cmd->maxtxpkt = 0;
8157         cmd->maxrxpkt = 0;
8158
8159         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8160            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
8161            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
8162            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
8163            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8164            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8165            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8166
8167         return 0;
8168 }
8169
8170 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8171 {
8172         struct bnx2x *bp = netdev_priv(dev);
8173         u32 advertising;
8174
8175         if (IS_E1HMF(bp))
8176                 return 0;
8177
8178         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8179            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
8180            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
8181            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
8182            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8183            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8184            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8185
8186         if (cmd->autoneg == AUTONEG_ENABLE) {
8187                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8188                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8189                         return -EINVAL;
8190                 }
8191
8192                 /* advertise the requested speed and duplex if supported */
8193                 cmd->advertising &= bp->port.supported;
8194
8195                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8196                 bp->link_params.req_duplex = DUPLEX_FULL;
8197                 bp->port.advertising |= (ADVERTISED_Autoneg |
8198                                          cmd->advertising);
8199
8200         } else { /* forced speed */
8201                 /* advertise the requested speed and duplex if supported */
8202                 switch (cmd->speed) {
8203                 case SPEED_10:
8204                         if (cmd->duplex == DUPLEX_FULL) {
8205                                 if (!(bp->port.supported &
8206                                       SUPPORTED_10baseT_Full)) {
8207                                         DP(NETIF_MSG_LINK,
8208                                            "10M full not supported\n");
8209                                         return -EINVAL;
8210                                 }
8211
8212                                 advertising = (ADVERTISED_10baseT_Full |
8213                                                ADVERTISED_TP);
8214                         } else {
8215                                 if (!(bp->port.supported &
8216                                       SUPPORTED_10baseT_Half)) {
8217                                         DP(NETIF_MSG_LINK,
8218                                            "10M half not supported\n");
8219                                         return -EINVAL;
8220                                 }
8221
8222                                 advertising = (ADVERTISED_10baseT_Half |
8223                                                ADVERTISED_TP);
8224                         }
8225                         break;
8226
8227                 case SPEED_100:
8228                         if (cmd->duplex == DUPLEX_FULL) {
8229                                 if (!(bp->port.supported &
8230                                                 SUPPORTED_100baseT_Full)) {
8231                                         DP(NETIF_MSG_LINK,
8232                                            "100M full not supported\n");
8233                                         return -EINVAL;
8234                                 }
8235
8236                                 advertising = (ADVERTISED_100baseT_Full |
8237                                                ADVERTISED_TP);
8238                         } else {
8239                                 if (!(bp->port.supported &
8240                                                 SUPPORTED_100baseT_Half)) {
8241                                         DP(NETIF_MSG_LINK,
8242                                            "100M half not supported\n");
8243                                         return -EINVAL;
8244                                 }
8245
8246                                 advertising = (ADVERTISED_100baseT_Half |
8247                                                ADVERTISED_TP);
8248                         }
8249                         break;
8250
8251                 case SPEED_1000:
8252                         if (cmd->duplex != DUPLEX_FULL) {
8253                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
8254                                 return -EINVAL;
8255                         }
8256
8257                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8258                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
8259                                 return -EINVAL;
8260                         }
8261
8262                         advertising = (ADVERTISED_1000baseT_Full |
8263                                        ADVERTISED_TP);
8264                         break;
8265
8266                 case SPEED_2500:
8267                         if (cmd->duplex != DUPLEX_FULL) {
8268                                 DP(NETIF_MSG_LINK,
8269                                    "2.5G half not supported\n");
8270                                 return -EINVAL;
8271                         }
8272
8273                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8274                                 DP(NETIF_MSG_LINK,
8275                                    "2.5G full not supported\n");
8276                                 return -EINVAL;
8277                         }
8278
8279                         advertising = (ADVERTISED_2500baseX_Full |
8280                                        ADVERTISED_TP);
8281                         break;
8282
8283                 case SPEED_10000:
8284                         if (cmd->duplex != DUPLEX_FULL) {
8285                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
8286                                 return -EINVAL;
8287                         }
8288
8289                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8290                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
8291                                 return -EINVAL;
8292                         }
8293
8294                         advertising = (ADVERTISED_10000baseT_Full |
8295                                        ADVERTISED_FIBRE);
8296                         break;
8297
8298                 default:
8299                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
8300                         return -EINVAL;
8301                 }
8302
8303                 bp->link_params.req_line_speed = cmd->speed;
8304                 bp->link_params.req_duplex = cmd->duplex;
8305                 bp->port.advertising = advertising;
8306         }
8307
8308         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8309            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
8310            bp->link_params.req_line_speed, bp->link_params.req_duplex,
8311            bp->port.advertising);
8312
8313         if (netif_running(dev)) {
8314                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8315                 bnx2x_link_set(bp);
8316         }
8317
8318         return 0;
8319 }
8320
8321 #define PHY_FW_VER_LEN                  10
8322
8323 static void bnx2x_get_drvinfo(struct net_device *dev,
8324                               struct ethtool_drvinfo *info)
8325 {
8326         struct bnx2x *bp = netdev_priv(dev);
8327         u8 phy_fw_ver[PHY_FW_VER_LEN];
8328
8329         strcpy(info->driver, DRV_MODULE_NAME);
8330         strcpy(info->version, DRV_MODULE_VERSION);
8331
8332         phy_fw_ver[0] = '\0';
8333         if (bp->port.pmf) {
8334                 bnx2x_acquire_phy_lock(bp);
8335                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8336                                              (bp->state != BNX2X_STATE_CLOSED),
8337                                              phy_fw_ver, PHY_FW_VER_LEN);
8338                 bnx2x_release_phy_lock(bp);
8339         }
8340
8341         snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8342                  (bp->common.bc_ver & 0xff0000) >> 16,
8343                  (bp->common.bc_ver & 0xff00) >> 8,
8344                  (bp->common.bc_ver & 0xff),
8345                  ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
8346         strcpy(info->bus_info, pci_name(bp->pdev));
8347         info->n_stats = BNX2X_NUM_STATS;
8348         info->testinfo_len = BNX2X_NUM_TESTS;
8349         info->eedump_len = bp->common.flash_size;
8350         info->regdump_len = 0;
8351 }
8352
8353 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8354 {
8355         struct bnx2x *bp = netdev_priv(dev);
8356
8357         if (bp->flags & NO_WOL_FLAG) {
8358                 wol->supported = 0;
8359                 wol->wolopts = 0;
8360         } else {
8361                 wol->supported = WAKE_MAGIC;
8362                 if (bp->wol)
8363                         wol->wolopts = WAKE_MAGIC;
8364                 else
8365                         wol->wolopts = 0;
8366         }
8367         memset(&wol->sopass, 0, sizeof(wol->sopass));
8368 }
8369
8370 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8371 {
8372         struct bnx2x *bp = netdev_priv(dev);
8373
8374         if (wol->wolopts & ~WAKE_MAGIC)
8375                 return -EINVAL;
8376
8377         if (wol->wolopts & WAKE_MAGIC) {
8378                 if (bp->flags & NO_WOL_FLAG)
8379                         return -EINVAL;
8380
8381                 bp->wol = 1;
8382         } else
8383                 bp->wol = 0;
8384
8385         return 0;
8386 }
8387
8388 static u32 bnx2x_get_msglevel(struct net_device *dev)
8389 {
8390         struct bnx2x *bp = netdev_priv(dev);
8391
8392         return bp->msglevel;
8393 }
8394
8395 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8396 {
8397         struct bnx2x *bp = netdev_priv(dev);
8398
8399         if (capable(CAP_NET_ADMIN))
8400                 bp->msglevel = level;
8401 }
8402
8403 static int bnx2x_nway_reset(struct net_device *dev)
8404 {
8405         struct bnx2x *bp = netdev_priv(dev);
8406
8407         if (!bp->port.pmf)
8408                 return 0;
8409
8410         if (netif_running(dev)) {
8411                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8412                 bnx2x_link_set(bp);
8413         }
8414
8415         return 0;
8416 }
8417
8418 static int bnx2x_get_eeprom_len(struct net_device *dev)
8419 {
8420         struct bnx2x *bp = netdev_priv(dev);
8421
8422         return bp->common.flash_size;
8423 }
8424
8425 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8426 {
8427         int port = BP_PORT(bp);
8428         int count, i;
8429         u32 val = 0;
8430
8431         /* adjust timeout for emulation/FPGA */
8432         count = NVRAM_TIMEOUT_COUNT;
8433         if (CHIP_REV_IS_SLOW(bp))
8434                 count *= 100;
8435
8436         /* request access to nvram interface */
8437         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8438                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8439
8440         for (i = 0; i < count*10; i++) {
8441                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8442                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8443                         break;
8444
8445                 udelay(5);
8446         }
8447
8448         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8449                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
8450                 return -EBUSY;
8451         }
8452
8453         return 0;
8454 }
8455
8456 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8457 {
8458         int port = BP_PORT(bp);
8459         int count, i;
8460         u32 val = 0;
8461
8462         /* adjust timeout for emulation/FPGA */
8463         count = NVRAM_TIMEOUT_COUNT;
8464         if (CHIP_REV_IS_SLOW(bp))
8465                 count *= 100;
8466
8467         /* relinquish nvram interface */
8468         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8469                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8470
8471         for (i = 0; i < count*10; i++) {
8472                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8473                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8474                         break;
8475
8476                 udelay(5);
8477         }
8478
8479         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8480                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
8481                 return -EBUSY;
8482         }
8483
8484         return 0;
8485 }
8486
8487 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8488 {
8489         u32 val;
8490
8491         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8492
8493         /* enable both bits, even on read */
8494         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8495                (val | MCPR_NVM_ACCESS_ENABLE_EN |
8496                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
8497 }
8498
8499 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8500 {
8501         u32 val;
8502
8503         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8504
8505         /* disable both bits, even after read */
8506         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8507                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8508                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8509 }
8510
8511 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8512                                   u32 cmd_flags)
8513 {
8514         int count, i, rc;
8515         u32 val;
8516
8517         /* build the command word */
8518         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8519
8520         /* need to clear DONE bit separately */
8521         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8522
8523         /* address of the NVRAM to read from */
8524         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8525                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8526
8527         /* issue a read command */
8528         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8529
8530         /* adjust timeout for emulation/FPGA */
8531         count = NVRAM_TIMEOUT_COUNT;
8532         if (CHIP_REV_IS_SLOW(bp))
8533                 count *= 100;
8534
8535         /* wait for completion */
8536         *ret_val = 0;
8537         rc = -EBUSY;
8538         for (i = 0; i < count; i++) {
8539                 udelay(5);
8540                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8541
8542                 if (val & MCPR_NVM_COMMAND_DONE) {
8543                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8544                         /* we read nvram data in cpu order
8545                          * but ethtool sees it as an array of bytes
8546                          * converting to big-endian will do the work */
8547                         val = cpu_to_be32(val);
8548                         *ret_val = val;
8549                         rc = 0;
8550                         break;
8551                 }
8552         }
8553
8554         return rc;
8555 }
8556
8557 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8558                             int buf_size)
8559 {
8560         int rc;
8561         u32 cmd_flags;
8562         u32 val;
8563
8564         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8565                 DP(BNX2X_MSG_NVM,
8566                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8567                    offset, buf_size);
8568                 return -EINVAL;
8569         }
8570
8571         if (offset + buf_size > bp->common.flash_size) {
8572                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8573                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8574                    offset, buf_size, bp->common.flash_size);
8575                 return -EINVAL;
8576         }
8577
8578         /* request access to nvram interface */
8579         rc = bnx2x_acquire_nvram_lock(bp);
8580         if (rc)
8581                 return rc;
8582
8583         /* enable access to nvram interface */
8584         bnx2x_enable_nvram_access(bp);
8585
8586         /* read the first word(s) */
8587         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8588         while ((buf_size > sizeof(u32)) && (rc == 0)) {
8589                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8590                 memcpy(ret_buf, &val, 4);
8591
8592                 /* advance to the next dword */
8593                 offset += sizeof(u32);
8594                 ret_buf += sizeof(u32);
8595                 buf_size -= sizeof(u32);
8596                 cmd_flags = 0;
8597         }
8598
8599         if (rc == 0) {
8600                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8601                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8602                 memcpy(ret_buf, &val, 4);
8603         }
8604
8605         /* disable access to nvram interface */
8606         bnx2x_disable_nvram_access(bp);
8607         bnx2x_release_nvram_lock(bp);
8608
8609         return rc;
8610 }
8611
8612 static int bnx2x_get_eeprom(struct net_device *dev,
8613                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8614 {
8615         struct bnx2x *bp = netdev_priv(dev);
8616         int rc;
8617
8618         if (!netif_running(dev))
8619                 return -EAGAIN;
8620
8621         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8622            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8623            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8624            eeprom->len, eeprom->len);
8625
8626         /* parameters already validated in ethtool_get_eeprom */
8627
8628         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8629
8630         return rc;
8631 }
8632
8633 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8634                                    u32 cmd_flags)
8635 {
8636         int count, i, rc;
8637
8638         /* build the command word */
8639         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8640
8641         /* need to clear DONE bit separately */
8642         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8643
8644         /* write the data */
8645         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8646
8647         /* address of the NVRAM to write to */
8648         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8649                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8650
8651         /* issue the write command */
8652         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8653
8654         /* adjust timeout for emulation/FPGA */
8655         count = NVRAM_TIMEOUT_COUNT;
8656         if (CHIP_REV_IS_SLOW(bp))
8657                 count *= 100;
8658
8659         /* wait for completion */
8660         rc = -EBUSY;
8661         for (i = 0; i < count; i++) {
8662                 udelay(5);
8663                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8664                 if (val & MCPR_NVM_COMMAND_DONE) {
8665                         rc = 0;
8666                         break;
8667                 }
8668         }
8669
8670         return rc;
8671 }
8672
8673 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
8674
8675 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8676                               int buf_size)
8677 {
8678         int rc;
8679         u32 cmd_flags;
8680         u32 align_offset;
8681         u32 val;
8682
8683         if (offset + buf_size > bp->common.flash_size) {
8684                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8685                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8686                    offset, buf_size, bp->common.flash_size);
8687                 return -EINVAL;
8688         }
8689
8690         /* request access to nvram interface */
8691         rc = bnx2x_acquire_nvram_lock(bp);
8692         if (rc)
8693                 return rc;
8694
8695         /* enable access to nvram interface */
8696         bnx2x_enable_nvram_access(bp);
8697
8698         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8699         align_offset = (offset & ~0x03);
8700         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8701
8702         if (rc == 0) {
8703                 val &= ~(0xff << BYTE_OFFSET(offset));
8704                 val |= (*data_buf << BYTE_OFFSET(offset));
8705
8706                 /* nvram data is returned as an array of bytes
8707                  * convert it back to cpu order */
8708                 val = be32_to_cpu(val);
8709
8710                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8711                                              cmd_flags);
8712         }
8713
8714         /* disable access to nvram interface */
8715         bnx2x_disable_nvram_access(bp);
8716         bnx2x_release_nvram_lock(bp);
8717
8718         return rc;
8719 }
8720
8721 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8722                              int buf_size)
8723 {
8724         int rc;
8725         u32 cmd_flags;
8726         u32 val;
8727         u32 written_so_far;
8728
8729         if (buf_size == 1)      /* ethtool */
8730                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8731
8732         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8733                 DP(BNX2X_MSG_NVM,
8734                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8735                    offset, buf_size);
8736                 return -EINVAL;
8737         }
8738
8739         if (offset + buf_size > bp->common.flash_size) {
8740                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8741                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8742                    offset, buf_size, bp->common.flash_size);
8743                 return -EINVAL;
8744         }
8745
8746         /* request access to nvram interface */
8747         rc = bnx2x_acquire_nvram_lock(bp);
8748         if (rc)
8749                 return rc;
8750
8751         /* enable access to nvram interface */
8752         bnx2x_enable_nvram_access(bp);
8753
8754         written_so_far = 0;
8755         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8756         while ((written_so_far < buf_size) && (rc == 0)) {
8757                 if (written_so_far == (buf_size - sizeof(u32)))
8758                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8759                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8760                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8761                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8762                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8763
8764                 memcpy(&val, data_buf, 4);
8765
8766                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8767
8768                 /* advance to the next dword */
8769                 offset += sizeof(u32);
8770                 data_buf += sizeof(u32);
8771                 written_so_far += sizeof(u32);
8772                 cmd_flags = 0;
8773         }
8774
8775         /* disable access to nvram interface */
8776         bnx2x_disable_nvram_access(bp);
8777         bnx2x_release_nvram_lock(bp);
8778
8779         return rc;
8780 }
8781
8782 static int bnx2x_set_eeprom(struct net_device *dev,
8783                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8784 {
8785         struct bnx2x *bp = netdev_priv(dev);
8786         int rc;
8787
8788         if (!netif_running(dev))
8789                 return -EAGAIN;
8790
8791         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8792            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8793            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8794            eeprom->len, eeprom->len);
8795
8796         /* parameters already validated in ethtool_set_eeprom */
8797
8798         /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8799         if (eeprom->magic == 0x00504859)
8800                 if (bp->port.pmf) {
8801
8802                         bnx2x_acquire_phy_lock(bp);
8803                         rc = bnx2x_flash_download(bp, BP_PORT(bp),
8804                                              bp->link_params.ext_phy_config,
8805                                              (bp->state != BNX2X_STATE_CLOSED),
8806                                              eebuf, eeprom->len);
8807                         if ((bp->state == BNX2X_STATE_OPEN) ||
8808                             (bp->state == BNX2X_STATE_DISABLED)) {
8809                                 rc |= bnx2x_link_reset(&bp->link_params,
8810                                                        &bp->link_vars);
8811                                 rc |= bnx2x_phy_init(&bp->link_params,
8812                                                      &bp->link_vars);
8813                         }
8814                         bnx2x_release_phy_lock(bp);
8815
8816                 } else /* Only the PMF can access the PHY */
8817                         return -EINVAL;
8818         else
8819                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8820
8821         return rc;
8822 }
8823
8824 static int bnx2x_get_coalesce(struct net_device *dev,
8825                               struct ethtool_coalesce *coal)
8826 {
8827         struct bnx2x *bp = netdev_priv(dev);
8828
8829         memset(coal, 0, sizeof(struct ethtool_coalesce));
8830
8831         coal->rx_coalesce_usecs = bp->rx_ticks;
8832         coal->tx_coalesce_usecs = bp->tx_ticks;
8833
8834         return 0;
8835 }
8836
8837 static int bnx2x_set_coalesce(struct net_device *dev,
8838                               struct ethtool_coalesce *coal)
8839 {
8840         struct bnx2x *bp = netdev_priv(dev);
8841
8842         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8843         if (bp->rx_ticks > 3000)
8844                 bp->rx_ticks = 3000;
8845
8846         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8847         if (bp->tx_ticks > 0x3000)
8848                 bp->tx_ticks = 0x3000;
8849
8850         if (netif_running(dev))
8851                 bnx2x_update_coalesce(bp);
8852
8853         return 0;
8854 }
8855
8856 static void bnx2x_get_ringparam(struct net_device *dev,
8857                                 struct ethtool_ringparam *ering)
8858 {
8859         struct bnx2x *bp = netdev_priv(dev);
8860
8861         ering->rx_max_pending = MAX_RX_AVAIL;
8862         ering->rx_mini_max_pending = 0;
8863         ering->rx_jumbo_max_pending = 0;
8864
8865         ering->rx_pending = bp->rx_ring_size;
8866         ering->rx_mini_pending = 0;
8867         ering->rx_jumbo_pending = 0;
8868
8869         ering->tx_max_pending = MAX_TX_AVAIL;
8870         ering->tx_pending = bp->tx_ring_size;
8871 }
8872
8873 static int bnx2x_set_ringparam(struct net_device *dev,
8874                                struct ethtool_ringparam *ering)
8875 {
8876         struct bnx2x *bp = netdev_priv(dev);
8877         int rc = 0;
8878
8879         if ((ering->rx_pending > MAX_RX_AVAIL) ||
8880             (ering->tx_pending > MAX_TX_AVAIL) ||
8881             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8882                 return -EINVAL;
8883
8884         bp->rx_ring_size = ering->rx_pending;
8885         bp->tx_ring_size = ering->tx_pending;
8886
8887         if (netif_running(dev)) {
8888                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8889                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8890         }
8891
8892         return rc;
8893 }
8894
8895 static void bnx2x_get_pauseparam(struct net_device *dev,
8896                                  struct ethtool_pauseparam *epause)
8897 {
8898         struct bnx2x *bp = netdev_priv(dev);
8899
8900         epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8901                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8902
8903         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8904                             BNX2X_FLOW_CTRL_RX);
8905         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8906                             BNX2X_FLOW_CTRL_TX);
8907
8908         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8909            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8910            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8911 }
8912
8913 static int bnx2x_set_pauseparam(struct net_device *dev,
8914                                 struct ethtool_pauseparam *epause)
8915 {
8916         struct bnx2x *bp = netdev_priv(dev);
8917
8918         if (IS_E1HMF(bp))
8919                 return 0;
8920
8921         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8922            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8923            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8924
8925         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8926
8927         if (epause->rx_pause)
8928                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
8929
8930         if (epause->tx_pause)
8931                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
8932
8933         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8934                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8935
8936         if (epause->autoneg) {
8937                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8938                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
8939                         return -EINVAL;
8940                 }
8941
8942                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8943                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8944         }
8945
8946         DP(NETIF_MSG_LINK,
8947            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8948
8949         if (netif_running(dev)) {
8950                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8951                 bnx2x_link_set(bp);
8952         }
8953
8954         return 0;
8955 }
8956
8957 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8958 {
8959         struct bnx2x *bp = netdev_priv(dev);
8960         int changed = 0;
8961         int rc = 0;
8962
8963         /* TPA requires Rx CSUM offloading */
8964         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8965                 if (!(dev->features & NETIF_F_LRO)) {
8966                         dev->features |= NETIF_F_LRO;
8967                         bp->flags |= TPA_ENABLE_FLAG;
8968                         changed = 1;
8969                 }
8970
8971         } else if (dev->features & NETIF_F_LRO) {
8972                 dev->features &= ~NETIF_F_LRO;
8973                 bp->flags &= ~TPA_ENABLE_FLAG;
8974                 changed = 1;
8975         }
8976
8977         if (changed && netif_running(dev)) {
8978                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8979                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8980         }
8981
8982         return rc;
8983 }
8984
8985 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8986 {
8987         struct bnx2x *bp = netdev_priv(dev);
8988
8989         return bp->rx_csum;
8990 }
8991
8992 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8993 {
8994         struct bnx2x *bp = netdev_priv(dev);
8995         int rc = 0;
8996
8997         bp->rx_csum = data;
8998
8999         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9000            TPA'ed packets will be discarded due to wrong TCP CSUM */
9001         if (!data) {
9002                 u32 flags = ethtool_op_get_flags(dev);
9003
9004                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9005         }
9006
9007         return rc;
9008 }
9009
9010 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9011 {
9012         if (data) {
9013                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9014                 dev->features |= NETIF_F_TSO6;
9015         } else {
9016                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9017                 dev->features &= ~NETIF_F_TSO6;
9018         }
9019
9020         return 0;
9021 }
9022
9023 static const struct {
9024         char string[ETH_GSTRING_LEN];
9025 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9026         { "register_test (offline)" },
9027         { "memory_test (offline)" },
9028         { "loopback_test (offline)" },
9029         { "nvram_test (online)" },
9030         { "interrupt_test (online)" },
9031         { "link_test (online)" },
9032         { "idle check (online)" }
9033 };
9034
9035 static int bnx2x_self_test_count(struct net_device *dev)
9036 {
9037         return BNX2X_NUM_TESTS;
9038 }
9039
9040 static int bnx2x_test_registers(struct bnx2x *bp)
9041 {
9042         int idx, i, rc = -ENODEV;
9043         u32 wr_val = 0;
9044         int port = BP_PORT(bp);
9045         static const struct {
9046                 u32  offset0;
9047                 u32  offset1;
9048                 u32  mask;
9049         } reg_tbl[] = {
9050 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
9051                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
9052                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
9053                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
9054                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
9055                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
9056                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
9057                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
9058                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
9059                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
9060 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
9061                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
9062                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
9063                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
9064                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
9065                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9066                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
9067                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
9068                 { NIG_REG_EGRESS_MNG0_FIFO,           20, 0xffffffff },
9069                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
9070 /* 20 */        { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
9071                 { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
9072                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
9073                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
9074                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
9075                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
9076                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
9077                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
9078                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
9079                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
9080 /* 30 */        { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
9081                 { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
9082                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
9083                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
9084                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9085                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
9086                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9087                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
9088
9089                 { 0xffffffff, 0, 0x00000000 }
9090         };
9091
9092         if (!netif_running(bp->dev))
9093                 return rc;
9094
9095         /* Repeat the test twice:
9096            First by writing 0x00000000, second by writing 0xffffffff */
9097         for (idx = 0; idx < 2; idx++) {
9098
9099                 switch (idx) {
9100                 case 0:
9101                         wr_val = 0;
9102                         break;
9103                 case 1:
9104                         wr_val = 0xffffffff;
9105                         break;
9106                 }
9107
9108                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9109                         u32 offset, mask, save_val, val;
9110
9111                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9112                         mask = reg_tbl[i].mask;
9113
9114                         save_val = REG_RD(bp, offset);
9115
9116                         REG_WR(bp, offset, wr_val);
9117                         val = REG_RD(bp, offset);
9118
9119                         /* Restore the original register's value */
9120                         REG_WR(bp, offset, save_val);
9121
9122                         /* verify that value is as expected value */
9123                         if ((val & mask) != (wr_val & mask))
9124                                 goto test_reg_exit;
9125                 }
9126         }
9127
9128         rc = 0;
9129
9130 test_reg_exit:
9131         return rc;
9132 }
9133
9134 static int bnx2x_test_memory(struct bnx2x *bp)
9135 {
9136         int i, j, rc = -ENODEV;
9137         u32 val;
9138         static const struct {
9139                 u32 offset;
9140                 int size;
9141         } mem_tbl[] = {
9142                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
9143                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9144                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
9145                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
9146                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
9147                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
9148                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
9149
9150                 { 0xffffffff, 0 }
9151         };
9152         static const struct {
9153                 char *name;
9154                 u32 offset;
9155                 u32 e1_mask;
9156                 u32 e1h_mask;
9157         } prty_tbl[] = {
9158                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
9159                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
9160                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
9161                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
9162                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
9163                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
9164
9165                 { NULL, 0xffffffff, 0, 0 }
9166         };
9167
9168         if (!netif_running(bp->dev))
9169                 return rc;
9170
9171         /* Go through all the memories */
9172         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9173                 for (j = 0; j < mem_tbl[i].size; j++)
9174                         REG_RD(bp, mem_tbl[i].offset + j*4);
9175
9176         /* Check the parity status */
9177         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9178                 val = REG_RD(bp, prty_tbl[i].offset);
9179                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9180                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9181                         DP(NETIF_MSG_HW,
9182                            "%s is 0x%x\n", prty_tbl[i].name, val);
9183                         goto test_mem_exit;
9184                 }
9185         }
9186
9187         rc = 0;
9188
9189 test_mem_exit:
9190         return rc;
9191 }
9192
9193 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9194 {
9195         int cnt = 1000;
9196
9197         if (link_up)
9198                 while (bnx2x_link_test(bp) && cnt--)
9199                         msleep(10);
9200 }
9201
9202 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9203 {
9204         unsigned int pkt_size, num_pkts, i;
9205         struct sk_buff *skb;
9206         unsigned char *packet;
9207         struct bnx2x_fastpath *fp = &bp->fp[0];
9208         u16 tx_start_idx, tx_idx;
9209         u16 rx_start_idx, rx_idx;
9210         u16 pkt_prod;
9211         struct sw_tx_bd *tx_buf;
9212         struct eth_tx_bd *tx_bd;
9213         dma_addr_t mapping;
9214         union eth_rx_cqe *cqe;
9215         u8 cqe_fp_flags;
9216         struct sw_rx_bd *rx_buf;
9217         u16 len;
9218         int rc = -ENODEV;
9219
9220         if (loopback_mode == BNX2X_MAC_LOOPBACK) {
9221                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
9222                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9223
9224         } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
9225                 u16 cnt = 1000;
9226                 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
9227                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9228                 /* wait until link state is restored */
9229                 if (link_up)
9230                         while (cnt-- && bnx2x_test_link(&bp->link_params,
9231                                                         &bp->link_vars))
9232                                 msleep(10);
9233         } else
9234                 return -EINVAL;
9235
9236         pkt_size = 1514;
9237         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9238         if (!skb) {
9239                 rc = -ENOMEM;
9240                 goto test_loopback_exit;
9241         }
9242         packet = skb_put(skb, pkt_size);
9243         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9244         memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9245         for (i = ETH_HLEN; i < pkt_size; i++)
9246                 packet[i] = (unsigned char) (i & 0xff);
9247
9248         num_pkts = 0;
9249         tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9250         rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9251
9252         pkt_prod = fp->tx_pkt_prod++;
9253         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9254         tx_buf->first_bd = fp->tx_bd_prod;
9255         tx_buf->skb = skb;
9256
9257         tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9258         mapping = pci_map_single(bp->pdev, skb->data,
9259                                  skb_headlen(skb), PCI_DMA_TODEVICE);
9260         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9261         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9262         tx_bd->nbd = cpu_to_le16(1);
9263         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9264         tx_bd->vlan = cpu_to_le16(pkt_prod);
9265         tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9266                                        ETH_TX_BD_FLAGS_END_BD);
9267         tx_bd->general_data = ((UNICAST_ADDRESS <<
9268                                 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9269
9270         wmb();
9271
9272         fp->hw_tx_prods->bds_prod =
9273                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
9274         mb(); /* FW restriction: must not reorder writing nbd and packets */
9275         fp->hw_tx_prods->packets_prod =
9276                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9277         DOORBELL(bp, FP_IDX(fp), 0);
9278
9279         mmiowb();
9280
9281         num_pkts++;
9282         fp->tx_bd_prod++;
9283         bp->dev->trans_start = jiffies;
9284
9285         udelay(100);
9286
9287         tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9288         if (tx_idx != tx_start_idx + num_pkts)
9289                 goto test_loopback_exit;
9290
9291         rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9292         if (rx_idx != rx_start_idx + num_pkts)
9293                 goto test_loopback_exit;
9294
9295         cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9296         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9297         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9298                 goto test_loopback_rx_exit;
9299
9300         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9301         if (len != pkt_size)
9302                 goto test_loopback_rx_exit;
9303
9304         rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9305         skb = rx_buf->skb;
9306         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9307         for (i = ETH_HLEN; i < pkt_size; i++)
9308                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9309                         goto test_loopback_rx_exit;
9310
9311         rc = 0;
9312
9313 test_loopback_rx_exit:
9314
9315         fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9316         fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9317         fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9318         fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9319
9320         /* Update producers */
9321         bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9322                              fp->rx_sge_prod);
9323
9324 test_loopback_exit:
9325         bp->link_params.loopback_mode = LOOPBACK_NONE;
9326
9327         return rc;
9328 }
9329
9330 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9331 {
9332         int rc = 0;
9333
9334         if (!netif_running(bp->dev))
9335                 return BNX2X_LOOPBACK_FAILED;
9336
9337         bnx2x_netif_stop(bp, 1);
9338         bnx2x_acquire_phy_lock(bp);
9339
9340         if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
9341                 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
9342                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9343         }
9344
9345         if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
9346                 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
9347                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9348         }
9349
9350         bnx2x_release_phy_lock(bp);
9351         bnx2x_netif_start(bp);
9352
9353         return rc;
9354 }
9355
9356 #define CRC32_RESIDUAL                  0xdebb20e3
9357
9358 static int bnx2x_test_nvram(struct bnx2x *bp)
9359 {
9360         static const struct {
9361                 int offset;
9362                 int size;
9363         } nvram_tbl[] = {
9364                 {     0,  0x14 }, /* bootstrap */
9365                 {  0x14,  0xec }, /* dir */
9366                 { 0x100, 0x350 }, /* manuf_info */
9367                 { 0x450,  0xf0 }, /* feature_info */
9368                 { 0x640,  0x64 }, /* upgrade_key_info */
9369                 { 0x6a4,  0x64 },
9370                 { 0x708,  0x70 }, /* manuf_key_info */
9371                 { 0x778,  0x70 },
9372                 {     0,     0 }
9373         };
9374         u32 buf[0x350 / 4];
9375         u8 *data = (u8 *)buf;
9376         int i, rc;
9377         u32 magic, csum;
9378
9379         rc = bnx2x_nvram_read(bp, 0, data, 4);
9380         if (rc) {
9381                 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
9382                 goto test_nvram_exit;
9383         }
9384
9385         magic = be32_to_cpu(buf[0]);
9386         if (magic != 0x669955aa) {
9387                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9388                 rc = -ENODEV;
9389                 goto test_nvram_exit;
9390         }
9391
9392         for (i = 0; nvram_tbl[i].size; i++) {
9393
9394                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9395                                       nvram_tbl[i].size);
9396                 if (rc) {
9397                         DP(NETIF_MSG_PROBE,
9398                            "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
9399                         goto test_nvram_exit;
9400                 }
9401
9402                 csum = ether_crc_le(nvram_tbl[i].size, data);
9403                 if (csum != CRC32_RESIDUAL) {
9404                         DP(NETIF_MSG_PROBE,
9405                            "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9406                         rc = -ENODEV;
9407                         goto test_nvram_exit;
9408                 }
9409         }
9410
9411 test_nvram_exit:
9412         return rc;
9413 }
9414
9415 static int bnx2x_test_intr(struct bnx2x *bp)
9416 {
9417         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9418         int i, rc;
9419
9420         if (!netif_running(bp->dev))
9421                 return -ENODEV;
9422
9423         config->hdr.length = 0;
9424         if (CHIP_IS_E1(bp))
9425                 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9426         else
9427                 config->hdr.offset = BP_FUNC(bp);
9428         config->hdr.client_id = BP_CL_ID(bp);
9429         config->hdr.reserved1 = 0;
9430
9431         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9432                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9433                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9434         if (rc == 0) {
9435                 bp->set_mac_pending++;
9436                 for (i = 0; i < 10; i++) {
9437                         if (!bp->set_mac_pending)
9438                                 break;
9439                         msleep_interruptible(10);
9440                 }
9441                 if (i == 10)
9442                         rc = -ENODEV;
9443         }
9444
9445         return rc;
9446 }
9447
9448 static void bnx2x_self_test(struct net_device *dev,
9449                             struct ethtool_test *etest, u64 *buf)
9450 {
9451         struct bnx2x *bp = netdev_priv(dev);
9452
9453         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9454
9455         if (!netif_running(dev))
9456                 return;
9457
9458         /* offline tests are not supported in MF mode */
9459         if (IS_E1HMF(bp))
9460                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9461
9462         if (etest->flags & ETH_TEST_FL_OFFLINE) {
9463                 u8 link_up;
9464
9465                 link_up = bp->link_vars.link_up;
9466                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9467                 bnx2x_nic_load(bp, LOAD_DIAG);
9468                 /* wait until link state is restored */
9469                 bnx2x_wait_for_link(bp, link_up);
9470
9471                 if (bnx2x_test_registers(bp) != 0) {
9472                         buf[0] = 1;
9473                         etest->flags |= ETH_TEST_FL_FAILED;
9474                 }
9475                 if (bnx2x_test_memory(bp) != 0) {
9476                         buf[1] = 1;
9477                         etest->flags |= ETH_TEST_FL_FAILED;
9478                 }
9479                 buf[2] = bnx2x_test_loopback(bp, link_up);
9480                 if (buf[2] != 0)
9481                         etest->flags |= ETH_TEST_FL_FAILED;
9482
9483                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9484                 bnx2x_nic_load(bp, LOAD_NORMAL);
9485                 /* wait until link state is restored */
9486                 bnx2x_wait_for_link(bp, link_up);
9487         }
9488         if (bnx2x_test_nvram(bp) != 0) {
9489                 buf[3] = 1;
9490                 etest->flags |= ETH_TEST_FL_FAILED;
9491         }
9492         if (bnx2x_test_intr(bp) != 0) {
9493                 buf[4] = 1;
9494                 etest->flags |= ETH_TEST_FL_FAILED;
9495         }
9496         if (bp->port.pmf)
9497                 if (bnx2x_link_test(bp) != 0) {
9498                         buf[5] = 1;
9499                         etest->flags |= ETH_TEST_FL_FAILED;
9500                 }
9501
9502 #ifdef BNX2X_EXTRA_DEBUG
9503         bnx2x_panic_dump(bp);
9504 #endif
9505 }
9506
9507 static const struct {
9508         long offset;
9509         int size;
9510         u8 string[ETH_GSTRING_LEN];
9511 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9512 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9513         { Q_STATS_OFFSET32(error_bytes_received_hi),
9514                                                 8, "[%d]: rx_error_bytes" },
9515         { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9516                                                 8, "[%d]: rx_ucast_packets" },
9517         { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9518                                                 8, "[%d]: rx_mcast_packets" },
9519         { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9520                                                 8, "[%d]: rx_bcast_packets" },
9521         { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9522         { Q_STATS_OFFSET32(rx_err_discard_pkt),
9523                                          4, "[%d]: rx_phy_ip_err_discards"},
9524         { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9525                                          4, "[%d]: rx_skb_alloc_discard" },
9526         { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9527
9528 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9529         { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9530                                                         8, "[%d]: tx_packets" }
9531 };
9532
9533 static const struct {
9534         long offset;
9535         int size;
9536         u32 flags;
9537 #define STATS_FLAGS_PORT                1
9538 #define STATS_FLAGS_FUNC                2
9539 #define STATS_FLAGS_BOTH                (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
9540         u8 string[ETH_GSTRING_LEN];
9541 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9542 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9543                                 8, STATS_FLAGS_BOTH, "rx_bytes" },
9544         { STATS_OFFSET32(error_bytes_received_hi),
9545                                 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
9546         { STATS_OFFSET32(total_unicast_packets_received_hi),
9547                                 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
9548         { STATS_OFFSET32(total_multicast_packets_received_hi),
9549                                 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
9550         { STATS_OFFSET32(total_broadcast_packets_received_hi),
9551                                 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
9552         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9553                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9554         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9555                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
9556         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9557                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9558         { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9559                                 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9560 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9561                                 8, STATS_FLAGS_PORT, "rx_fragments" },
9562         { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9563                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
9564         { STATS_OFFSET32(no_buff_discard_hi),
9565                                 8, STATS_FLAGS_BOTH, "rx_discards" },
9566         { STATS_OFFSET32(mac_filter_discard),
9567                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9568         { STATS_OFFSET32(xxoverflow_discard),
9569                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9570         { STATS_OFFSET32(brb_drop_hi),
9571                                 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9572         { STATS_OFFSET32(brb_truncate_hi),
9573                                 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9574         { STATS_OFFSET32(pause_frames_received_hi),
9575                                 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9576         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9577                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9578         { STATS_OFFSET32(nig_timer_max),
9579                         4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9580 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9581                                 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9582         { STATS_OFFSET32(rx_skb_alloc_failed),
9583                                 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9584         { STATS_OFFSET32(hw_csum_err),
9585                                 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9586
9587         { STATS_OFFSET32(total_bytes_transmitted_hi),
9588                                 8, STATS_FLAGS_BOTH, "tx_bytes" },
9589         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9590                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9591         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9592                                 8, STATS_FLAGS_BOTH, "tx_packets" },
9593         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9594                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9595         { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9596                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9597         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9598                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9599         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9600                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9601 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9602                                 8, STATS_FLAGS_PORT, "tx_deferred" },
9603         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9604                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9605         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9606                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9607         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9608                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9609         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9610                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9611         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9612                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9613         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9614                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9615         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9616                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9617         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9618                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9619         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9620                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9621 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
9622                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9623         { STATS_OFFSET32(pause_frames_sent_hi),
9624                                 8, STATS_FLAGS_PORT, "tx_pause_frames" }
9625 };
9626
9627 #define IS_PORT_STAT(i) \
9628         ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9629 #define IS_FUNC_STAT(i)         (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9630 #define IS_E1HMF_MODE_STAT(bp) \
9631                         (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
9632
9633 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9634 {
9635         struct bnx2x *bp = netdev_priv(dev);
9636         int i, j, k;
9637
9638         switch (stringset) {
9639         case ETH_SS_STATS:
9640                 if (is_multi(bp)) {
9641                         k = 0;
9642                         for_each_queue(bp, i) {
9643                                 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9644                                         sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9645                                                 bnx2x_q_stats_arr[j].string, i);
9646                                 k += BNX2X_NUM_Q_STATS;
9647                         }
9648                         if (IS_E1HMF_MODE_STAT(bp))
9649                                 break;
9650                         for (j = 0; j < BNX2X_NUM_STATS; j++)
9651                                 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9652                                        bnx2x_stats_arr[j].string);
9653                 } else {
9654                         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9655                                 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9656                                         continue;
9657                                 strcpy(buf + j*ETH_GSTRING_LEN,
9658                                        bnx2x_stats_arr[i].string);
9659                                 j++;
9660                         }
9661                 }
9662                 break;
9663
9664         case ETH_SS_TEST:
9665                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9666                 break;
9667         }
9668 }
9669
9670 static int bnx2x_get_stats_count(struct net_device *dev)
9671 {
9672         struct bnx2x *bp = netdev_priv(dev);
9673         int i, num_stats;
9674
9675         if (is_multi(bp)) {
9676                 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9677                 if (!IS_E1HMF_MODE_STAT(bp))
9678                         num_stats += BNX2X_NUM_STATS;
9679         } else {
9680                 if (IS_E1HMF_MODE_STAT(bp)) {
9681                         num_stats = 0;
9682                         for (i = 0; i < BNX2X_NUM_STATS; i++)
9683                                 if (IS_FUNC_STAT(i))
9684                                         num_stats++;
9685                 } else
9686                         num_stats = BNX2X_NUM_STATS;
9687         }
9688
9689         return num_stats;
9690 }
9691
9692 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9693                                     struct ethtool_stats *stats, u64 *buf)
9694 {
9695         struct bnx2x *bp = netdev_priv(dev);
9696         u32 *hw_stats, *offset;
9697         int i, j, k;
9698
9699         if (is_multi(bp)) {
9700                 k = 0;
9701                 for_each_queue(bp, i) {
9702                         hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9703                         for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9704                                 if (bnx2x_q_stats_arr[j].size == 0) {
9705                                         /* skip this counter */
9706                                         buf[k + j] = 0;
9707                                         continue;
9708                                 }
9709                                 offset = (hw_stats +
9710                                           bnx2x_q_stats_arr[j].offset);
9711                                 if (bnx2x_q_stats_arr[j].size == 4) {
9712                                         /* 4-byte counter */
9713                                         buf[k + j] = (u64) *offset;
9714                                         continue;
9715                                 }
9716                                 /* 8-byte counter */
9717                                 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9718                         }
9719                         k += BNX2X_NUM_Q_STATS;
9720                 }
9721                 if (IS_E1HMF_MODE_STAT(bp))
9722                         return;
9723                 hw_stats = (u32 *)&bp->eth_stats;
9724                 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9725                         if (bnx2x_stats_arr[j].size == 0) {
9726                                 /* skip this counter */
9727                                 buf[k + j] = 0;
9728                                 continue;
9729                         }
9730                         offset = (hw_stats + bnx2x_stats_arr[j].offset);
9731                         if (bnx2x_stats_arr[j].size == 4) {
9732                                 /* 4-byte counter */
9733                                 buf[k + j] = (u64) *offset;
9734                                 continue;
9735                         }
9736                         /* 8-byte counter */
9737                         buf[k + j] = HILO_U64(*offset, *(offset + 1));
9738                 }
9739         } else {
9740                 hw_stats = (u32 *)&bp->eth_stats;
9741                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9742                         if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9743                                 continue;
9744                         if (bnx2x_stats_arr[i].size == 0) {
9745                                 /* skip this counter */
9746                                 buf[j] = 0;
9747                                 j++;
9748                                 continue;
9749                         }
9750                         offset = (hw_stats + bnx2x_stats_arr[i].offset);
9751                         if (bnx2x_stats_arr[i].size == 4) {
9752                                 /* 4-byte counter */
9753                                 buf[j] = (u64) *offset;
9754                                 j++;
9755                                 continue;
9756                         }
9757                         /* 8-byte counter */
9758                         buf[j] = HILO_U64(*offset, *(offset + 1));
9759                         j++;
9760                 }
9761         }
9762 }
9763
9764 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9765 {
9766         struct bnx2x *bp = netdev_priv(dev);
9767         int port = BP_PORT(bp);
9768         int i;
9769
9770         if (!netif_running(dev))
9771                 return 0;
9772
9773         if (!bp->port.pmf)
9774                 return 0;
9775
9776         if (data == 0)
9777                 data = 2;
9778
9779         for (i = 0; i < (data * 2); i++) {
9780                 if ((i % 2) == 0)
9781                         bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9782                                       bp->link_params.hw_led_mode,
9783                                       bp->link_params.chip_id);
9784                 else
9785                         bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9786                                       bp->link_params.hw_led_mode,
9787                                       bp->link_params.chip_id);
9788
9789                 msleep_interruptible(500);
9790                 if (signal_pending(current))
9791                         break;
9792         }
9793
9794         if (bp->link_vars.link_up)
9795                 bnx2x_set_led(bp, port, LED_MODE_OPER,
9796                               bp->link_vars.line_speed,
9797                               bp->link_params.hw_led_mode,
9798                               bp->link_params.chip_id);
9799
9800         return 0;
9801 }
9802
9803 static struct ethtool_ops bnx2x_ethtool_ops = {
9804         .get_settings           = bnx2x_get_settings,
9805         .set_settings           = bnx2x_set_settings,
9806         .get_drvinfo            = bnx2x_get_drvinfo,
9807         .get_wol                = bnx2x_get_wol,
9808         .set_wol                = bnx2x_set_wol,
9809         .get_msglevel           = bnx2x_get_msglevel,
9810         .set_msglevel           = bnx2x_set_msglevel,
9811         .nway_reset             = bnx2x_nway_reset,
9812         .get_link               = ethtool_op_get_link,
9813         .get_eeprom_len         = bnx2x_get_eeprom_len,
9814         .get_eeprom             = bnx2x_get_eeprom,
9815         .set_eeprom             = bnx2x_set_eeprom,
9816         .get_coalesce           = bnx2x_get_coalesce,
9817         .set_coalesce           = bnx2x_set_coalesce,
9818         .get_ringparam          = bnx2x_get_ringparam,
9819         .set_ringparam          = bnx2x_set_ringparam,
9820         .get_pauseparam         = bnx2x_get_pauseparam,
9821         .set_pauseparam         = bnx2x_set_pauseparam,
9822         .get_rx_csum            = bnx2x_get_rx_csum,
9823         .set_rx_csum            = bnx2x_set_rx_csum,
9824         .get_tx_csum            = ethtool_op_get_tx_csum,
9825         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
9826         .set_flags              = bnx2x_set_flags,
9827         .get_flags              = ethtool_op_get_flags,
9828         .get_sg                 = ethtool_op_get_sg,
9829         .set_sg                 = ethtool_op_set_sg,
9830         .get_tso                = ethtool_op_get_tso,
9831         .set_tso                = bnx2x_set_tso,
9832         .self_test_count        = bnx2x_self_test_count,
9833         .self_test              = bnx2x_self_test,
9834         .get_strings            = bnx2x_get_strings,
9835         .phys_id                = bnx2x_phys_id,
9836         .get_stats_count        = bnx2x_get_stats_count,
9837         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
9838 };
9839
9840 /* end of ethtool_ops */
9841
9842 /****************************************************************************
9843 * General service functions
9844 ****************************************************************************/
9845
9846 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9847 {
9848         u16 pmcsr;
9849
9850         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9851
9852         switch (state) {
9853         case PCI_D0:
9854                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9855                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9856                                        PCI_PM_CTRL_PME_STATUS));
9857
9858                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9859                         /* delay required during transition out of D3hot */
9860                         msleep(20);
9861                 break;
9862
9863         case PCI_D3hot:
9864                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9865                 pmcsr |= 3;
9866
9867                 if (bp->wol)
9868                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9869
9870                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9871                                       pmcsr);
9872
9873                 /* No more memory access after this point until
9874                 * device is brought back to D0.
9875                 */
9876                 break;
9877
9878         default:
9879                 return -EINVAL;
9880         }
9881         return 0;
9882 }
9883
9884 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
9885 {
9886         u16 rx_cons_sb;
9887
9888         /* Tell compiler that status block fields can change */
9889         barrier();
9890         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9891         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9892                 rx_cons_sb++;
9893         return (fp->rx_comp_cons != rx_cons_sb);
9894 }
9895
9896 /*
9897  * net_device service functions
9898  */
9899
9900 static int bnx2x_poll(struct napi_struct *napi, int budget)
9901 {
9902         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9903                                                  napi);
9904         struct bnx2x *bp = fp->bp;
9905         int work_done = 0;
9906
9907 #ifdef BNX2X_STOP_ON_ERROR
9908         if (unlikely(bp->panic))
9909                 goto poll_panic;
9910 #endif
9911
9912         prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9913         prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9914         prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9915
9916         bnx2x_update_fpsb_idx(fp);
9917
9918         if (bnx2x_has_tx_work(fp))
9919                 bnx2x_tx_int(fp, budget);
9920
9921         if (bnx2x_has_rx_work(fp))
9922                 work_done = bnx2x_rx_int(fp, budget);
9923         rmb(); /* BNX2X_HAS_WORK() reads the status block */
9924
9925         /* must not complete if we consumed full budget */
9926         if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9927
9928 #ifdef BNX2X_STOP_ON_ERROR
9929 poll_panic:
9930 #endif
9931                 napi_complete(napi);
9932
9933                 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9934                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9935                 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9936                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9937         }
9938         return work_done;
9939 }
9940
9941
9942 /* we split the first BD into headers and data BDs
9943  * to ease the pain of our fellow microcode engineers
9944  * we use one mapping for both BDs
9945  * So far this has only been observed to happen
9946  * in Other Operating Systems(TM)
9947  */
9948 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9949                                    struct bnx2x_fastpath *fp,
9950                                    struct eth_tx_bd **tx_bd, u16 hlen,
9951                                    u16 bd_prod, int nbd)
9952 {
9953         struct eth_tx_bd *h_tx_bd = *tx_bd;
9954         struct eth_tx_bd *d_tx_bd;
9955         dma_addr_t mapping;
9956         int old_len = le16_to_cpu(h_tx_bd->nbytes);
9957
9958         /* first fix first BD */
9959         h_tx_bd->nbd = cpu_to_le16(nbd);
9960         h_tx_bd->nbytes = cpu_to_le16(hlen);
9961
9962         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9963            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9964            h_tx_bd->addr_lo, h_tx_bd->nbd);
9965
9966         /* now get a new data BD
9967          * (after the pbd) and fill it */
9968         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9969         d_tx_bd = &fp->tx_desc_ring[bd_prod];
9970
9971         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9972                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9973
9974         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9975         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9976         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9977         d_tx_bd->vlan = 0;
9978         /* this marks the BD as one that has no individual mapping
9979          * the FW ignores this flag in a BD not marked start
9980          */
9981         d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9982         DP(NETIF_MSG_TX_QUEUED,
9983            "TSO split data size is %d (%x:%x)\n",
9984            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9985
9986         /* update tx_bd for marking the last BD flag */
9987         *tx_bd = d_tx_bd;
9988
9989         return bd_prod;
9990 }
9991
9992 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9993 {
9994         if (fix > 0)
9995                 csum = (u16) ~csum_fold(csum_sub(csum,
9996                                 csum_partial(t_header - fix, fix, 0)));
9997
9998         else if (fix < 0)
9999                 csum = (u16) ~csum_fold(csum_add(csum,
10000                                 csum_partial(t_header, -fix, 0)));
10001
10002         return swab16(csum);
10003 }
10004
10005 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10006 {
10007         u32 rc;
10008
10009         if (skb->ip_summed != CHECKSUM_PARTIAL)
10010                 rc = XMIT_PLAIN;
10011
10012         else {
10013                 if (skb->protocol == ntohs(ETH_P_IPV6)) {
10014                         rc = XMIT_CSUM_V6;
10015                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10016                                 rc |= XMIT_CSUM_TCP;
10017
10018                 } else {
10019                         rc = XMIT_CSUM_V4;
10020                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10021                                 rc |= XMIT_CSUM_TCP;
10022                 }
10023         }
10024
10025         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10026                 rc |= XMIT_GSO_V4;
10027
10028         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10029                 rc |= XMIT_GSO_V6;
10030
10031         return rc;
10032 }
10033
10034 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10035 /* check if packet requires linearization (packet is too fragmented) */
10036 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10037                              u32 xmit_type)
10038 {
10039         int to_copy = 0;
10040         int hlen = 0;
10041         int first_bd_sz = 0;
10042
10043         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10044         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10045
10046                 if (xmit_type & XMIT_GSO) {
10047                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10048                         /* Check if LSO packet needs to be copied:
10049                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10050                         int wnd_size = MAX_FETCH_BD - 3;
10051                         /* Number of windows to check */
10052                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10053                         int wnd_idx = 0;
10054                         int frag_idx = 0;
10055                         u32 wnd_sum = 0;
10056
10057                         /* Headers length */
10058                         hlen = (int)(skb_transport_header(skb) - skb->data) +
10059                                 tcp_hdrlen(skb);
10060
10061                         /* Amount of data (w/o headers) on linear part of SKB*/
10062                         first_bd_sz = skb_headlen(skb) - hlen;
10063
10064                         wnd_sum  = first_bd_sz;
10065
10066                         /* Calculate the first sum - it's special */
10067                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10068                                 wnd_sum +=
10069                                         skb_shinfo(skb)->frags[frag_idx].size;
10070
10071                         /* If there was data on linear skb data - check it */
10072                         if (first_bd_sz > 0) {
10073                                 if (unlikely(wnd_sum < lso_mss)) {
10074                                         to_copy = 1;
10075                                         goto exit_lbl;
10076                                 }
10077
10078                                 wnd_sum -= first_bd_sz;
10079                         }
10080
10081                         /* Others are easier: run through the frag list and
10082                            check all windows */
10083                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10084                                 wnd_sum +=
10085                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10086
10087                                 if (unlikely(wnd_sum < lso_mss)) {
10088                                         to_copy = 1;
10089                                         break;
10090                                 }
10091                                 wnd_sum -=
10092                                         skb_shinfo(skb)->frags[wnd_idx].size;
10093                         }
10094
10095                 } else {
10096                         /* in non-LSO too fragmented packet should always
10097                            be linearized */
10098                         to_copy = 1;
10099                 }
10100         }
10101
10102 exit_lbl:
10103         if (unlikely(to_copy))
10104                 DP(NETIF_MSG_TX_QUEUED,
10105                    "Linearization IS REQUIRED for %s packet. "
10106                    "num_frags %d  hlen %d  first_bd_sz %d\n",
10107                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10108                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10109
10110         return to_copy;
10111 }
10112 #endif
10113
10114 /* called with netif_tx_lock
10115  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10116  * netif_wake_queue()
10117  */
10118 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10119 {
10120         struct bnx2x *bp = netdev_priv(dev);
10121         struct bnx2x_fastpath *fp;
10122         struct netdev_queue *txq;
10123         struct sw_tx_bd *tx_buf;
10124         struct eth_tx_bd *tx_bd;
10125         struct eth_tx_parse_bd *pbd = NULL;
10126         u16 pkt_prod, bd_prod;
10127         int nbd, fp_index;
10128         dma_addr_t mapping;
10129         u32 xmit_type = bnx2x_xmit_type(bp, skb);
10130         int vlan_off = (bp->e1hov ? 4 : 0);
10131         int i;
10132         u8 hlen = 0;
10133
10134 #ifdef BNX2X_STOP_ON_ERROR
10135         if (unlikely(bp->panic))
10136                 return NETDEV_TX_BUSY;
10137 #endif
10138
10139         fp_index = skb_get_queue_mapping(skb);
10140         txq = netdev_get_tx_queue(dev, fp_index);
10141
10142         fp = &bp->fp[fp_index];
10143
10144         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10145                 fp->eth_q_stats.driver_xoff++,
10146                 netif_tx_stop_queue(txq);
10147                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10148                 return NETDEV_TX_BUSY;
10149         }
10150
10151         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
10152            "  gso type %x  xmit_type %x\n",
10153            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10154            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10155
10156 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10157         /* First, check if we need to linearize the skb
10158            (due to FW restrictions) */
10159         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10160                 /* Statistics of linearization */
10161                 bp->lin_cnt++;
10162                 if (skb_linearize(skb) != 0) {
10163                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10164                            "silently dropping this SKB\n");
10165                         dev_kfree_skb_any(skb);
10166                         return NETDEV_TX_OK;
10167                 }
10168         }
10169 #endif
10170
10171         /*
10172         Please read carefully. First we use one BD which we mark as start,
10173         then for TSO or xsum we have a parsing info BD,
10174         and only then we have the rest of the TSO BDs.
10175         (don't forget to mark the last one as last,
10176         and to unmap only AFTER you write to the BD ...)
10177         And above all, all pdb sizes are in words - NOT DWORDS!
10178         */
10179
10180         pkt_prod = fp->tx_pkt_prod++;
10181         bd_prod = TX_BD(fp->tx_bd_prod);
10182
10183         /* get a tx_buf and first BD */
10184         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10185         tx_bd = &fp->tx_desc_ring[bd_prod];
10186
10187         tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10188         tx_bd->general_data = (UNICAST_ADDRESS <<
10189                                ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
10190         /* header nbd */
10191         tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
10192
10193         /* remember the first BD of the packet */
10194         tx_buf->first_bd = fp->tx_bd_prod;
10195         tx_buf->skb = skb;
10196
10197         DP(NETIF_MSG_TX_QUEUED,
10198            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
10199            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10200
10201 #ifdef BCM_VLAN
10202         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10203             (bp->flags & HW_VLAN_TX_FLAG)) {
10204                 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10205                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10206                 vlan_off += 4;
10207         } else
10208 #endif
10209                 tx_bd->vlan = cpu_to_le16(pkt_prod);
10210
10211         if (xmit_type) {
10212                 /* turn on parsing and get a BD */
10213                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10214                 pbd = (void *)&fp->tx_desc_ring[bd_prod];
10215
10216                 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10217         }
10218
10219         if (xmit_type & XMIT_CSUM) {
10220                 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
10221
10222                 /* for now NS flag is not used in Linux */
10223                 pbd->global_data = (hlen |
10224                                     ((skb->protocol == ntohs(ETH_P_8021Q)) <<
10225                                      ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
10226
10227                 pbd->ip_hlen = (skb_transport_header(skb) -
10228                                 skb_network_header(skb)) / 2;
10229
10230                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
10231
10232                 pbd->total_hlen = cpu_to_le16(hlen);
10233                 hlen = hlen*2 - vlan_off;
10234
10235                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10236
10237                 if (xmit_type & XMIT_CSUM_V4)
10238                         tx_bd->bd_flags.as_bitfield |=
10239                                                 ETH_TX_BD_FLAGS_IP_CSUM;
10240                 else
10241                         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10242
10243                 if (xmit_type & XMIT_CSUM_TCP) {
10244                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10245
10246                 } else {
10247                         s8 fix = SKB_CS_OFF(skb); /* signed! */
10248
10249                         pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
10250                         pbd->cs_offset = fix / 2;
10251
10252                         DP(NETIF_MSG_TX_QUEUED,
10253                            "hlen %d  offset %d  fix %d  csum before fix %x\n",
10254                            le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10255                            SKB_CS(skb));
10256
10257                         /* HW bug: fixup the CSUM */
10258                         pbd->tcp_pseudo_csum =
10259                                 bnx2x_csum_fix(skb_transport_header(skb),
10260                                                SKB_CS(skb), fix);
10261
10262                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10263                            pbd->tcp_pseudo_csum);
10264                 }
10265         }
10266
10267         mapping = pci_map_single(bp->pdev, skb->data,
10268                                  skb_headlen(skb), PCI_DMA_TODEVICE);
10269
10270         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10271         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10272         nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
10273         tx_bd->nbd = cpu_to_le16(nbd);
10274         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10275
10276         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
10277            "  nbytes %d  flags %x  vlan %x\n",
10278            tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10279            le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10280            le16_to_cpu(tx_bd->vlan));
10281
10282         if (xmit_type & XMIT_GSO) {
10283
10284                 DP(NETIF_MSG_TX_QUEUED,
10285                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
10286                    skb->len, hlen, skb_headlen(skb),
10287                    skb_shinfo(skb)->gso_size);
10288
10289                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10290
10291                 if (unlikely(skb_headlen(skb) > hlen))
10292                         bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10293                                                  bd_prod, ++nbd);
10294
10295                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10296                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
10297                 pbd->tcp_flags = pbd_tcp_flags(skb);
10298
10299                 if (xmit_type & XMIT_GSO_V4) {
10300                         pbd->ip_id = swab16(ip_hdr(skb)->id);
10301                         pbd->tcp_pseudo_csum =
10302                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10303                                                           ip_hdr(skb)->daddr,
10304                                                           0, IPPROTO_TCP, 0));
10305
10306                 } else
10307                         pbd->tcp_pseudo_csum =
10308                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10309                                                         &ipv6_hdr(skb)->daddr,
10310                                                         0, IPPROTO_TCP, 0));
10311
10312                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10313         }
10314
10315         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10316                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
10317
10318                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10319                 tx_bd = &fp->tx_desc_ring[bd_prod];
10320
10321                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10322                                        frag->size, PCI_DMA_TODEVICE);
10323
10324                 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10325                 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10326                 tx_bd->nbytes = cpu_to_le16(frag->size);
10327                 tx_bd->vlan = cpu_to_le16(pkt_prod);
10328                 tx_bd->bd_flags.as_bitfield = 0;
10329
10330                 DP(NETIF_MSG_TX_QUEUED,
10331                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d  flags %x\n",
10332                    i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10333                    le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
10334         }
10335
10336         /* now at last mark the BD as the last BD */
10337         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10338
10339         DP(NETIF_MSG_TX_QUEUED, "last bd @%p  flags %x\n",
10340            tx_bd, tx_bd->bd_flags.as_bitfield);
10341
10342         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10343
10344         /* now send a tx doorbell, counting the next BD
10345          * if the packet contains or ends with it
10346          */
10347         if (TX_BD_POFF(bd_prod) < nbd)
10348                 nbd++;
10349
10350         if (pbd)
10351                 DP(NETIF_MSG_TX_QUEUED,
10352                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
10353                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
10354                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10355                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
10356                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
10357
10358         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
10359
10360         /*
10361          * Make sure that the BD data is updated before updating the producer
10362          * since FW might read the BD right after the producer is updated.
10363          * This is only applicable for weak-ordered memory model archs such
10364          * as IA-64. The following barrier is also mandatory since FW will
10365          * assumes packets must have BDs.
10366          */
10367         wmb();
10368
10369         fp->hw_tx_prods->bds_prod =
10370                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
10371         mb(); /* FW restriction: must not reorder writing nbd and packets */
10372         fp->hw_tx_prods->packets_prod =
10373                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
10374         DOORBELL(bp, FP_IDX(fp), 0);
10375
10376         mmiowb();
10377
10378         fp->tx_bd_prod += nbd;
10379         dev->trans_start = jiffies;
10380
10381         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
10382                 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10383                    if we put Tx into XOFF state. */
10384                 smp_mb();
10385                 netif_tx_stop_queue(txq);
10386                 fp->eth_q_stats.driver_xoff++;
10387                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
10388                         netif_tx_wake_queue(txq);
10389         }
10390         fp->tx_pkt++;
10391
10392         return NETDEV_TX_OK;
10393 }
10394
10395 /* called with rtnl_lock */
10396 static int bnx2x_open(struct net_device *dev)
10397 {
10398         struct bnx2x *bp = netdev_priv(dev);
10399
10400         netif_carrier_off(dev);
10401
10402         bnx2x_set_power_state(bp, PCI_D0);
10403
10404         return bnx2x_nic_load(bp, LOAD_OPEN);
10405 }
10406
10407 /* called with rtnl_lock */
10408 static int bnx2x_close(struct net_device *dev)
10409 {
10410         struct bnx2x *bp = netdev_priv(dev);
10411
10412         /* Unload the driver, release IRQs */
10413         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10414         if (atomic_read(&bp->pdev->enable_cnt) == 1)
10415                 if (!CHIP_REV_IS_SLOW(bp))
10416                         bnx2x_set_power_state(bp, PCI_D3hot);
10417
10418         return 0;
10419 }
10420
10421 /* called with netif_tx_lock from set_multicast */
10422 static void bnx2x_set_rx_mode(struct net_device *dev)
10423 {
10424         struct bnx2x *bp = netdev_priv(dev);
10425         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10426         int port = BP_PORT(bp);
10427
10428         if (bp->state != BNX2X_STATE_OPEN) {
10429                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10430                 return;
10431         }
10432
10433         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10434
10435         if (dev->flags & IFF_PROMISC)
10436                 rx_mode = BNX2X_RX_MODE_PROMISC;
10437
10438         else if ((dev->flags & IFF_ALLMULTI) ||
10439                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10440                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10441
10442         else { /* some multicasts */
10443                 if (CHIP_IS_E1(bp)) {
10444                         int i, old, offset;
10445                         struct dev_mc_list *mclist;
10446                         struct mac_configuration_cmd *config =
10447                                                 bnx2x_sp(bp, mcast_config);
10448
10449                         for (i = 0, mclist = dev->mc_list;
10450                              mclist && (i < dev->mc_count);
10451                              i++, mclist = mclist->next) {
10452
10453                                 config->config_table[i].
10454                                         cam_entry.msb_mac_addr =
10455                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
10456                                 config->config_table[i].
10457                                         cam_entry.middle_mac_addr =
10458                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
10459                                 config->config_table[i].
10460                                         cam_entry.lsb_mac_addr =
10461                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
10462                                 config->config_table[i].cam_entry.flags =
10463                                                         cpu_to_le16(port);
10464                                 config->config_table[i].
10465                                         target_table_entry.flags = 0;
10466                                 config->config_table[i].
10467                                         target_table_entry.client_id = 0;
10468                                 config->config_table[i].
10469                                         target_table_entry.vlan_id = 0;
10470
10471                                 DP(NETIF_MSG_IFUP,
10472                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10473                                    config->config_table[i].
10474                                                 cam_entry.msb_mac_addr,
10475                                    config->config_table[i].
10476                                                 cam_entry.middle_mac_addr,
10477                                    config->config_table[i].
10478                                                 cam_entry.lsb_mac_addr);
10479                         }
10480                         old = config->hdr.length;
10481                         if (old > i) {
10482                                 for (; i < old; i++) {
10483                                         if (CAM_IS_INVALID(config->
10484                                                            config_table[i])) {
10485                                                 /* already invalidated */
10486                                                 break;
10487                                         }
10488                                         /* invalidate */
10489                                         CAM_INVALIDATE(config->
10490                                                        config_table[i]);
10491                                 }
10492                         }
10493
10494                         if (CHIP_REV_IS_SLOW(bp))
10495                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10496                         else
10497                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
10498
10499                         config->hdr.length = i;
10500                         config->hdr.offset = offset;
10501                         config->hdr.client_id = bp->fp->cl_id;
10502                         config->hdr.reserved1 = 0;
10503
10504                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10505                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10506                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10507                                       0);
10508                 } else { /* E1H */
10509                         /* Accept one or more multicasts */
10510                         struct dev_mc_list *mclist;
10511                         u32 mc_filter[MC_HASH_SIZE];
10512                         u32 crc, bit, regidx;
10513                         int i;
10514
10515                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10516
10517                         for (i = 0, mclist = dev->mc_list;
10518                              mclist && (i < dev->mc_count);
10519                              i++, mclist = mclist->next) {
10520
10521                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10522                                    mclist->dmi_addr);
10523
10524                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10525                                 bit = (crc >> 24) & 0xff;
10526                                 regidx = bit >> 5;
10527                                 bit &= 0x1f;
10528                                 mc_filter[regidx] |= (1 << bit);
10529                         }
10530
10531                         for (i = 0; i < MC_HASH_SIZE; i++)
10532                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10533                                        mc_filter[i]);
10534                 }
10535         }
10536
10537         bp->rx_mode = rx_mode;
10538         bnx2x_set_storm_rx_mode(bp);
10539 }
10540
10541 /* called with rtnl_lock */
10542 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10543 {
10544         struct sockaddr *addr = p;
10545         struct bnx2x *bp = netdev_priv(dev);
10546
10547         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
10548                 return -EINVAL;
10549
10550         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10551         if (netif_running(dev)) {
10552                 if (CHIP_IS_E1(bp))
10553                         bnx2x_set_mac_addr_e1(bp, 1);
10554                 else
10555                         bnx2x_set_mac_addr_e1h(bp, 1);
10556         }
10557
10558         return 0;
10559 }
10560
10561 /* called with rtnl_lock */
10562 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10563 {
10564         struct mii_ioctl_data *data = if_mii(ifr);
10565         struct bnx2x *bp = netdev_priv(dev);
10566         int port = BP_PORT(bp);
10567         int err;
10568
10569         switch (cmd) {
10570         case SIOCGMIIPHY:
10571                 data->phy_id = bp->port.phy_addr;
10572
10573                 /* fallthrough */
10574
10575         case SIOCGMIIREG: {
10576                 u16 mii_regval;
10577
10578                 if (!netif_running(dev))
10579                         return -EAGAIN;
10580
10581                 mutex_lock(&bp->port.phy_mutex);
10582                 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
10583                                       DEFAULT_PHY_DEV_ADDR,
10584                                       (data->reg_num & 0x1f), &mii_regval);
10585                 data->val_out = mii_regval;
10586                 mutex_unlock(&bp->port.phy_mutex);
10587                 return err;
10588         }
10589
10590         case SIOCSMIIREG:
10591                 if (!capable(CAP_NET_ADMIN))
10592                         return -EPERM;
10593
10594                 if (!netif_running(dev))
10595                         return -EAGAIN;
10596
10597                 mutex_lock(&bp->port.phy_mutex);
10598                 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
10599                                        DEFAULT_PHY_DEV_ADDR,
10600                                        (data->reg_num & 0x1f), data->val_in);
10601                 mutex_unlock(&bp->port.phy_mutex);
10602                 return err;
10603
10604         default:
10605                 /* do nothing */
10606                 break;
10607         }
10608
10609         return -EOPNOTSUPP;
10610 }
10611
10612 /* called with rtnl_lock */
10613 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10614 {
10615         struct bnx2x *bp = netdev_priv(dev);
10616         int rc = 0;
10617
10618         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10619             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10620                 return -EINVAL;
10621
10622         /* This does not race with packet allocation
10623          * because the actual alloc size is
10624          * only updated as part of load
10625          */
10626         dev->mtu = new_mtu;
10627
10628         if (netif_running(dev)) {
10629                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10630                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10631         }
10632
10633         return rc;
10634 }
10635
10636 static void bnx2x_tx_timeout(struct net_device *dev)
10637 {
10638         struct bnx2x *bp = netdev_priv(dev);
10639
10640 #ifdef BNX2X_STOP_ON_ERROR
10641         if (!bp->panic)
10642                 bnx2x_panic();
10643 #endif
10644         /* This allows the netif to be shutdown gracefully before resetting */
10645         schedule_work(&bp->reset_task);
10646 }
10647
10648 #ifdef BCM_VLAN
10649 /* called with rtnl_lock */
10650 static void bnx2x_vlan_rx_register(struct net_device *dev,
10651                                    struct vlan_group *vlgrp)
10652 {
10653         struct bnx2x *bp = netdev_priv(dev);
10654
10655         bp->vlgrp = vlgrp;
10656
10657         /* Set flags according to the required capabilities */
10658         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10659
10660         if (dev->features & NETIF_F_HW_VLAN_TX)
10661                 bp->flags |= HW_VLAN_TX_FLAG;
10662
10663         if (dev->features & NETIF_F_HW_VLAN_RX)
10664                 bp->flags |= HW_VLAN_RX_FLAG;
10665
10666         if (netif_running(dev))
10667                 bnx2x_set_client_config(bp);
10668 }
10669
10670 #endif
10671
10672 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10673 static void poll_bnx2x(struct net_device *dev)
10674 {
10675         struct bnx2x *bp = netdev_priv(dev);
10676
10677         disable_irq(bp->pdev->irq);
10678         bnx2x_interrupt(bp->pdev->irq, dev);
10679         enable_irq(bp->pdev->irq);
10680 }
10681 #endif
10682
10683 static const struct net_device_ops bnx2x_netdev_ops = {
10684         .ndo_open               = bnx2x_open,
10685         .ndo_stop               = bnx2x_close,
10686         .ndo_start_xmit         = bnx2x_start_xmit,
10687         .ndo_set_multicast_list = bnx2x_set_rx_mode,
10688         .ndo_set_mac_address    = bnx2x_change_mac_addr,
10689         .ndo_validate_addr      = eth_validate_addr,
10690         .ndo_do_ioctl           = bnx2x_ioctl,
10691         .ndo_change_mtu         = bnx2x_change_mtu,
10692         .ndo_tx_timeout         = bnx2x_tx_timeout,
10693 #ifdef BCM_VLAN
10694         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
10695 #endif
10696 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10697         .ndo_poll_controller    = poll_bnx2x,
10698 #endif
10699 };
10700
10701
10702 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10703                                     struct net_device *dev)
10704 {
10705         struct bnx2x *bp;
10706         int rc;
10707
10708         SET_NETDEV_DEV(dev, &pdev->dev);
10709         bp = netdev_priv(dev);
10710
10711         bp->dev = dev;
10712         bp->pdev = pdev;
10713         bp->flags = 0;
10714         bp->func = PCI_FUNC(pdev->devfn);
10715
10716         rc = pci_enable_device(pdev);
10717         if (rc) {
10718                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10719                 goto err_out;
10720         }
10721
10722         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10723                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10724                        " aborting\n");
10725                 rc = -ENODEV;
10726                 goto err_out_disable;
10727         }
10728
10729         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10730                 printk(KERN_ERR PFX "Cannot find second PCI device"
10731                        " base address, aborting\n");
10732                 rc = -ENODEV;
10733                 goto err_out_disable;
10734         }
10735
10736         if (atomic_read(&pdev->enable_cnt) == 1) {
10737                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10738                 if (rc) {
10739                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10740                                " aborting\n");
10741                         goto err_out_disable;
10742                 }
10743
10744                 pci_set_master(pdev);
10745                 pci_save_state(pdev);
10746         }
10747
10748         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10749         if (bp->pm_cap == 0) {
10750                 printk(KERN_ERR PFX "Cannot find power management"
10751                        " capability, aborting\n");
10752                 rc = -EIO;
10753                 goto err_out_release;
10754         }
10755
10756         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10757         if (bp->pcie_cap == 0) {
10758                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10759                        " aborting\n");
10760                 rc = -EIO;
10761                 goto err_out_release;
10762         }
10763
10764         if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10765                 bp->flags |= USING_DAC_FLAG;
10766                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10767                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10768                                " failed, aborting\n");
10769                         rc = -EIO;
10770                         goto err_out_release;
10771                 }
10772
10773         } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10774                 printk(KERN_ERR PFX "System does not support DMA,"
10775                        " aborting\n");
10776                 rc = -EIO;
10777                 goto err_out_release;
10778         }
10779
10780         dev->mem_start = pci_resource_start(pdev, 0);
10781         dev->base_addr = dev->mem_start;
10782         dev->mem_end = pci_resource_end(pdev, 0);
10783
10784         dev->irq = pdev->irq;
10785
10786         bp->regview = pci_ioremap_bar(pdev, 0);
10787         if (!bp->regview) {
10788                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10789                 rc = -ENOMEM;
10790                 goto err_out_release;
10791         }
10792
10793         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10794                                         min_t(u64, BNX2X_DB_SIZE,
10795                                               pci_resource_len(pdev, 2)));
10796         if (!bp->doorbells) {
10797                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10798                 rc = -ENOMEM;
10799                 goto err_out_unmap;
10800         }
10801
10802         bnx2x_set_power_state(bp, PCI_D0);
10803
10804         /* clean indirect addresses */
10805         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10806                                PCICFG_VENDOR_ID_OFFSET);
10807         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10808         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10809         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10810         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10811
10812         dev->watchdog_timeo = TX_TIMEOUT;
10813
10814         dev->netdev_ops = &bnx2x_netdev_ops;
10815         dev->ethtool_ops = &bnx2x_ethtool_ops;
10816         dev->features |= NETIF_F_SG;
10817         dev->features |= NETIF_F_HW_CSUM;
10818         if (bp->flags & USING_DAC_FLAG)
10819                 dev->features |= NETIF_F_HIGHDMA;
10820 #ifdef BCM_VLAN
10821         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10822         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10823 #endif
10824         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10825         dev->features |= NETIF_F_TSO6;
10826
10827         return 0;
10828
10829 err_out_unmap:
10830         if (bp->regview) {
10831                 iounmap(bp->regview);
10832                 bp->regview = NULL;
10833         }
10834         if (bp->doorbells) {
10835                 iounmap(bp->doorbells);
10836                 bp->doorbells = NULL;
10837         }
10838
10839 err_out_release:
10840         if (atomic_read(&pdev->enable_cnt) == 1)
10841                 pci_release_regions(pdev);
10842
10843 err_out_disable:
10844         pci_disable_device(pdev);
10845         pci_set_drvdata(pdev, NULL);
10846
10847 err_out:
10848         return rc;
10849 }
10850
10851 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10852 {
10853         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10854
10855         val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10856         return val;
10857 }
10858
10859 /* return value of 1=2.5GHz 2=5GHz */
10860 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10861 {
10862         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10863
10864         val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10865         return val;
10866 }
10867
10868 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10869                                     const struct pci_device_id *ent)
10870 {
10871         static int version_printed;
10872         struct net_device *dev = NULL;
10873         struct bnx2x *bp;
10874         int rc;
10875
10876         if (version_printed++ == 0)
10877                 printk(KERN_INFO "%s", version);
10878
10879         /* dev zeroed in init_etherdev */
10880         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
10881         if (!dev) {
10882                 printk(KERN_ERR PFX "Cannot allocate net device\n");
10883                 return -ENOMEM;
10884         }
10885
10886         bp = netdev_priv(dev);
10887         bp->msglevel = debug;
10888
10889         rc = bnx2x_init_dev(pdev, dev);
10890         if (rc < 0) {
10891                 free_netdev(dev);
10892                 return rc;
10893         }
10894
10895         pci_set_drvdata(pdev, dev);
10896
10897         rc = bnx2x_init_bp(bp);
10898         if (rc)
10899                 goto init_one_exit;
10900
10901         rc = register_netdev(dev);
10902         if (rc) {
10903                 dev_err(&pdev->dev, "Cannot register net device\n");
10904                 goto init_one_exit;
10905         }
10906
10907         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10908                " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
10909                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10910                bnx2x_get_pcie_width(bp),
10911                (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10912                dev->base_addr, bp->pdev->irq);
10913         printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
10914         return 0;
10915
10916 init_one_exit:
10917         if (bp->regview)
10918                 iounmap(bp->regview);
10919
10920         if (bp->doorbells)
10921                 iounmap(bp->doorbells);
10922
10923         free_netdev(dev);
10924
10925         if (atomic_read(&pdev->enable_cnt) == 1)
10926                 pci_release_regions(pdev);
10927
10928         pci_disable_device(pdev);
10929         pci_set_drvdata(pdev, NULL);
10930
10931         return rc;
10932 }
10933
10934 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10935 {
10936         struct net_device *dev = pci_get_drvdata(pdev);
10937         struct bnx2x *bp;
10938
10939         if (!dev) {
10940                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10941                 return;
10942         }
10943         bp = netdev_priv(dev);
10944
10945         unregister_netdev(dev);
10946
10947         if (bp->regview)
10948                 iounmap(bp->regview);
10949
10950         if (bp->doorbells)
10951                 iounmap(bp->doorbells);
10952
10953         free_netdev(dev);
10954
10955         if (atomic_read(&pdev->enable_cnt) == 1)
10956                 pci_release_regions(pdev);
10957
10958         pci_disable_device(pdev);
10959         pci_set_drvdata(pdev, NULL);
10960 }
10961
10962 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10963 {
10964         struct net_device *dev = pci_get_drvdata(pdev);
10965         struct bnx2x *bp;
10966
10967         if (!dev) {
10968                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10969                 return -ENODEV;
10970         }
10971         bp = netdev_priv(dev);
10972
10973         rtnl_lock();
10974
10975         pci_save_state(pdev);
10976
10977         if (!netif_running(dev)) {
10978                 rtnl_unlock();
10979                 return 0;
10980         }
10981
10982         netif_device_detach(dev);
10983
10984         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10985
10986         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10987
10988         rtnl_unlock();
10989
10990         return 0;
10991 }
10992
10993 static int bnx2x_resume(struct pci_dev *pdev)
10994 {
10995         struct net_device *dev = pci_get_drvdata(pdev);
10996         struct bnx2x *bp;
10997         int rc;
10998
10999         if (!dev) {
11000                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11001                 return -ENODEV;
11002         }
11003         bp = netdev_priv(dev);
11004
11005         rtnl_lock();
11006
11007         pci_restore_state(pdev);
11008
11009         if (!netif_running(dev)) {
11010                 rtnl_unlock();
11011                 return 0;
11012         }
11013
11014         bnx2x_set_power_state(bp, PCI_D0);
11015         netif_device_attach(dev);
11016
11017         rc = bnx2x_nic_load(bp, LOAD_OPEN);
11018
11019         rtnl_unlock();
11020
11021         return rc;
11022 }
11023
11024 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11025 {
11026         int i;
11027
11028         bp->state = BNX2X_STATE_ERROR;
11029
11030         bp->rx_mode = BNX2X_RX_MODE_NONE;
11031
11032         bnx2x_netif_stop(bp, 0);
11033
11034         del_timer_sync(&bp->timer);
11035         bp->stats_state = STATS_STATE_DISABLED;
11036         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11037
11038         /* Release IRQs */
11039         bnx2x_free_irq(bp);
11040
11041         if (CHIP_IS_E1(bp)) {
11042                 struct mac_configuration_cmd *config =
11043                                                 bnx2x_sp(bp, mcast_config);
11044
11045                 for (i = 0; i < config->hdr.length; i++)
11046                         CAM_INVALIDATE(config->config_table[i]);
11047         }
11048
11049         /* Free SKBs, SGEs, TPA pool and driver internals */
11050         bnx2x_free_skbs(bp);
11051         for_each_rx_queue(bp, i)
11052                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
11053         for_each_rx_queue(bp, i)
11054                 netif_napi_del(&bnx2x_fp(bp, i, napi));
11055         bnx2x_free_mem(bp);
11056
11057         bp->state = BNX2X_STATE_CLOSED;
11058
11059         netif_carrier_off(bp->dev);
11060
11061         return 0;
11062 }
11063
11064 static void bnx2x_eeh_recover(struct bnx2x *bp)
11065 {
11066         u32 val;
11067
11068         mutex_init(&bp->port.phy_mutex);
11069
11070         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11071         bp->link_params.shmem_base = bp->common.shmem_base;
11072         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11073
11074         if (!bp->common.shmem_base ||
11075             (bp->common.shmem_base < 0xA0000) ||
11076             (bp->common.shmem_base >= 0xC0000)) {
11077                 BNX2X_DEV_INFO("MCP not active\n");
11078                 bp->flags |= NO_MCP_FLAG;
11079                 return;
11080         }
11081
11082         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11083         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11084                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11085                 BNX2X_ERR("BAD MCP validity signature\n");
11086
11087         if (!BP_NOMCP(bp)) {
11088                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11089                               & DRV_MSG_SEQ_NUMBER_MASK);
11090                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11091         }
11092 }
11093
11094 /**
11095  * bnx2x_io_error_detected - called when PCI error is detected
11096  * @pdev: Pointer to PCI device
11097  * @state: The current pci connection state
11098  *
11099  * This function is called after a PCI bus error affecting
11100  * this device has been detected.
11101  */
11102 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11103                                                 pci_channel_state_t state)
11104 {
11105         struct net_device *dev = pci_get_drvdata(pdev);
11106         struct bnx2x *bp = netdev_priv(dev);
11107
11108         rtnl_lock();
11109
11110         netif_device_detach(dev);
11111
11112         if (netif_running(dev))
11113                 bnx2x_eeh_nic_unload(bp);
11114
11115         pci_disable_device(pdev);
11116
11117         rtnl_unlock();
11118
11119         /* Request a slot reset */
11120         return PCI_ERS_RESULT_NEED_RESET;
11121 }
11122
11123 /**
11124  * bnx2x_io_slot_reset - called after the PCI bus has been reset
11125  * @pdev: Pointer to PCI device
11126  *
11127  * Restart the card from scratch, as if from a cold-boot.
11128  */
11129 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11130 {
11131         struct net_device *dev = pci_get_drvdata(pdev);
11132         struct bnx2x *bp = netdev_priv(dev);
11133
11134         rtnl_lock();
11135
11136         if (pci_enable_device(pdev)) {
11137                 dev_err(&pdev->dev,
11138                         "Cannot re-enable PCI device after reset\n");
11139                 rtnl_unlock();
11140                 return PCI_ERS_RESULT_DISCONNECT;
11141         }
11142
11143         pci_set_master(pdev);
11144         pci_restore_state(pdev);
11145
11146         if (netif_running(dev))
11147                 bnx2x_set_power_state(bp, PCI_D0);
11148
11149         rtnl_unlock();
11150
11151         return PCI_ERS_RESULT_RECOVERED;
11152 }
11153
11154 /**
11155  * bnx2x_io_resume - called when traffic can start flowing again
11156  * @pdev: Pointer to PCI device
11157  *
11158  * This callback is called when the error recovery driver tells us that
11159  * its OK to resume normal operation.
11160  */
11161 static void bnx2x_io_resume(struct pci_dev *pdev)
11162 {
11163         struct net_device *dev = pci_get_drvdata(pdev);
11164         struct bnx2x *bp = netdev_priv(dev);
11165
11166         rtnl_lock();
11167
11168         bnx2x_eeh_recover(bp);
11169
11170         if (netif_running(dev))
11171                 bnx2x_nic_load(bp, LOAD_NORMAL);
11172
11173         netif_device_attach(dev);
11174
11175         rtnl_unlock();
11176 }
11177
11178 static struct pci_error_handlers bnx2x_err_handler = {
11179         .error_detected = bnx2x_io_error_detected,
11180         .slot_reset = bnx2x_io_slot_reset,
11181         .resume = bnx2x_io_resume,
11182 };
11183
11184 static struct pci_driver bnx2x_pci_driver = {
11185         .name        = DRV_MODULE_NAME,
11186         .id_table    = bnx2x_pci_tbl,
11187         .probe       = bnx2x_init_one,
11188         .remove      = __devexit_p(bnx2x_remove_one),
11189         .suspend     = bnx2x_suspend,
11190         .resume      = bnx2x_resume,
11191         .err_handler = &bnx2x_err_handler,
11192 };
11193
11194 static int __init bnx2x_init(void)
11195 {
11196         bnx2x_wq = create_singlethread_workqueue("bnx2x");
11197         if (bnx2x_wq == NULL) {
11198                 printk(KERN_ERR PFX "Cannot create workqueue\n");
11199                 return -ENOMEM;
11200         }
11201
11202         return pci_register_driver(&bnx2x_pci_driver);
11203 }
11204
11205 static void __exit bnx2x_cleanup(void)
11206 {
11207         pci_unregister_driver(&bnx2x_pci_driver);
11208
11209         destroy_workqueue(bnx2x_wq);
11210 }
11211
11212 module_init(bnx2x_init);
11213 module_exit(bnx2x_cleanup);
11214