iwlwifi: update comments on the debug interface
[linux-2.6] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2008 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #ifdef NETIF_F_HW_VLAN_TX
42         #include <linux/if_vlan.h>
43 #endif
44 #include <net/ip.h>
45 #include <net/tcp.h>
46 #include <net/checksum.h>
47 #include <net/ip6_checksum.h>
48 #include <linux/workqueue.h>
49 #include <linux/crc32.h>
50 #include <linux/crc32c.h>
51 #include <linux/prefetch.h>
52 #include <linux/zlib.h>
53 #include <linux/io.h>
54
55 #include "bnx2x_reg.h"
56 #include "bnx2x_fw_defs.h"
57 #include "bnx2x_hsi.h"
58 #include "bnx2x_link.h"
59 #include "bnx2x.h"
60 #include "bnx2x_init.h"
61
62 #define DRV_MODULE_VERSION      "1.45.23"
63 #define DRV_MODULE_RELDATE      "2008/11/03"
64 #define BNX2X_BC_VER            0x040200
65
66 /* Time in jiffies before concluding the transmitter is hung */
67 #define TX_TIMEOUT              (5*HZ)
68
69 static char version[] __devinitdata =
70         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
71         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
72
73 MODULE_AUTHOR("Eliezer Tamir");
74 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
75 MODULE_LICENSE("GPL");
76 MODULE_VERSION(DRV_MODULE_VERSION);
77
78 static int disable_tpa;
79 static int use_inta;
80 static int poll;
81 static int debug;
82 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
83 static int use_multi;
84
85 module_param(disable_tpa, int, 0);
86 module_param(use_inta, int, 0);
87 module_param(poll, int, 0);
88 module_param(debug, int, 0);
89 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
90 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
91 MODULE_PARM_DESC(poll, "use polling (for debug)");
92 MODULE_PARM_DESC(debug, "default debug msglevel");
93
94 #ifdef BNX2X_MULTI
95 module_param(use_multi, int, 0);
96 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
97 #endif
98
99 enum bnx2x_board_type {
100         BCM57710 = 0,
101         BCM57711 = 1,
102         BCM57711E = 2,
103 };
104
105 /* indexed by board_type, above */
106 static struct {
107         char *name;
108 } board_info[] __devinitdata = {
109         { "Broadcom NetXtreme II BCM57710 XGb" },
110         { "Broadcom NetXtreme II BCM57711 XGb" },
111         { "Broadcom NetXtreme II BCM57711E XGb" }
112 };
113
114
115 static const struct pci_device_id bnx2x_pci_tbl[] = {
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
117                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
119                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
121                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
122         { 0 }
123 };
124
125 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
126
127 /****************************************************************************
128 * General service functions
129 ****************************************************************************/
130
131 /* used only at init
132  * locking is done by mcp
133  */
134 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
135 {
136         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
137         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
138         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
139                                PCICFG_VENDOR_ID_OFFSET);
140 }
141
142 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
143 {
144         u32 val;
145
146         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
147         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
148         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
149                                PCICFG_VENDOR_ID_OFFSET);
150
151         return val;
152 }
153
154 static const u32 dmae_reg_go_c[] = {
155         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
156         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
157         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
158         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
159 };
160
161 /* copy command into DMAE command memory and set DMAE command go */
162 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
163                             int idx)
164 {
165         u32 cmd_offset;
166         int i;
167
168         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
169         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
170                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
171
172                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
173                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
174         }
175         REG_WR(bp, dmae_reg_go_c[idx], 1);
176 }
177
178 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
179                       u32 len32)
180 {
181         struct dmae_command *dmae = &bp->init_dmae;
182         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
183         int cnt = 200;
184
185         if (!bp->dmae_ready) {
186                 u32 *data = bnx2x_sp(bp, wb_data[0]);
187
188                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
189                    "  using indirect\n", dst_addr, len32);
190                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
191                 return;
192         }
193
194         mutex_lock(&bp->dmae_mutex);
195
196         memset(dmae, 0, sizeof(struct dmae_command));
197
198         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
199                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
200                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
201 #ifdef __BIG_ENDIAN
202                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
203 #else
204                         DMAE_CMD_ENDIANITY_DW_SWAP |
205 #endif
206                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
207                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
208         dmae->src_addr_lo = U64_LO(dma_addr);
209         dmae->src_addr_hi = U64_HI(dma_addr);
210         dmae->dst_addr_lo = dst_addr >> 2;
211         dmae->dst_addr_hi = 0;
212         dmae->len = len32;
213         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
214         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
215         dmae->comp_val = DMAE_COMP_VAL;
216
217         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
218            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
219                     "dst_addr [%x:%08x (%08x)]\n"
220            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
221            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
222            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
223            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
224         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
225            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
226            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
227
228         *wb_comp = 0;
229
230         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
231
232         udelay(5);
233
234         while (*wb_comp != DMAE_COMP_VAL) {
235                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
236
237                 if (!cnt) {
238                         BNX2X_ERR("dmae timeout!\n");
239                         break;
240                 }
241                 cnt--;
242                 /* adjust delay for emulation/FPGA */
243                 if (CHIP_REV_IS_SLOW(bp))
244                         msleep(100);
245                 else
246                         udelay(5);
247         }
248
249         mutex_unlock(&bp->dmae_mutex);
250 }
251
252 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
253 {
254         struct dmae_command *dmae = &bp->init_dmae;
255         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
256         int cnt = 200;
257
258         if (!bp->dmae_ready) {
259                 u32 *data = bnx2x_sp(bp, wb_data[0]);
260                 int i;
261
262                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
263                    "  using indirect\n", src_addr, len32);
264                 for (i = 0; i < len32; i++)
265                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
266                 return;
267         }
268
269         mutex_lock(&bp->dmae_mutex);
270
271         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
272         memset(dmae, 0, sizeof(struct dmae_command));
273
274         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
275                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
276                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
277 #ifdef __BIG_ENDIAN
278                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
279 #else
280                         DMAE_CMD_ENDIANITY_DW_SWAP |
281 #endif
282                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
283                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
284         dmae->src_addr_lo = src_addr >> 2;
285         dmae->src_addr_hi = 0;
286         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
287         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
288         dmae->len = len32;
289         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
290         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
291         dmae->comp_val = DMAE_COMP_VAL;
292
293         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
294            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
295                     "dst_addr [%x:%08x (%08x)]\n"
296            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
297            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
298            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
299            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
300
301         *wb_comp = 0;
302
303         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
304
305         udelay(5);
306
307         while (*wb_comp != DMAE_COMP_VAL) {
308
309                 if (!cnt) {
310                         BNX2X_ERR("dmae timeout!\n");
311                         break;
312                 }
313                 cnt--;
314                 /* adjust delay for emulation/FPGA */
315                 if (CHIP_REV_IS_SLOW(bp))
316                         msleep(100);
317                 else
318                         udelay(5);
319         }
320         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
321            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
322            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
323
324         mutex_unlock(&bp->dmae_mutex);
325 }
326
327 /* used only for slowpath so not inlined */
328 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
329 {
330         u32 wb_write[2];
331
332         wb_write[0] = val_hi;
333         wb_write[1] = val_lo;
334         REG_WR_DMAE(bp, reg, wb_write, 2);
335 }
336
337 #ifdef USE_WB_RD
338 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
339 {
340         u32 wb_data[2];
341
342         REG_RD_DMAE(bp, reg, wb_data, 2);
343
344         return HILO_U64(wb_data[0], wb_data[1]);
345 }
346 #endif
347
348 static int bnx2x_mc_assert(struct bnx2x *bp)
349 {
350         char last_idx;
351         int i, rc = 0;
352         u32 row0, row1, row2, row3;
353
354         /* XSTORM */
355         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
356                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
357         if (last_idx)
358                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
359
360         /* print the asserts */
361         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
362
363                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
364                               XSTORM_ASSERT_LIST_OFFSET(i));
365                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
366                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
367                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
368                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
369                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
370                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
371
372                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
373                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
374                                   " 0x%08x 0x%08x 0x%08x\n",
375                                   i, row3, row2, row1, row0);
376                         rc++;
377                 } else {
378                         break;
379                 }
380         }
381
382         /* TSTORM */
383         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
384                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
385         if (last_idx)
386                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
387
388         /* print the asserts */
389         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
390
391                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
392                               TSTORM_ASSERT_LIST_OFFSET(i));
393                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
394                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
395                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
396                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
397                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
398                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
399
400                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
401                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
402                                   " 0x%08x 0x%08x 0x%08x\n",
403                                   i, row3, row2, row1, row0);
404                         rc++;
405                 } else {
406                         break;
407                 }
408         }
409
410         /* CSTORM */
411         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
412                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
413         if (last_idx)
414                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
415
416         /* print the asserts */
417         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
418
419                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
420                               CSTORM_ASSERT_LIST_OFFSET(i));
421                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
422                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
423                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
424                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
425                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
426                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
427
428                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
429                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
430                                   " 0x%08x 0x%08x 0x%08x\n",
431                                   i, row3, row2, row1, row0);
432                         rc++;
433                 } else {
434                         break;
435                 }
436         }
437
438         /* USTORM */
439         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
440                            USTORM_ASSERT_LIST_INDEX_OFFSET);
441         if (last_idx)
442                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
443
444         /* print the asserts */
445         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
446
447                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
448                               USTORM_ASSERT_LIST_OFFSET(i));
449                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
450                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
451                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
452                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
453                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
454                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
455
456                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
457                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
458                                   " 0x%08x 0x%08x 0x%08x\n",
459                                   i, row3, row2, row1, row0);
460                         rc++;
461                 } else {
462                         break;
463                 }
464         }
465
466         return rc;
467 }
468
469 static void bnx2x_fw_dump(struct bnx2x *bp)
470 {
471         u32 mark, offset;
472         u32 data[9];
473         int word;
474
475         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
476         mark = ((mark + 0x3) & ~0x3);
477         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
478
479         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
480                 for (word = 0; word < 8; word++)
481                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
482                                                   offset + 4*word));
483                 data[8] = 0x0;
484                 printk(KERN_CONT "%s", (char *)data);
485         }
486         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
487                 for (word = 0; word < 8; word++)
488                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
489                                                   offset + 4*word));
490                 data[8] = 0x0;
491                 printk(KERN_CONT "%s", (char *)data);
492         }
493         printk("\n" KERN_ERR PFX "end of fw dump\n");
494 }
495
496 static void bnx2x_panic_dump(struct bnx2x *bp)
497 {
498         int i;
499         u16 j, start, end;
500
501         bp->stats_state = STATS_STATE_DISABLED;
502         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
503
504         BNX2X_ERR("begin crash dump -----------------\n");
505
506         for_each_queue(bp, i) {
507                 struct bnx2x_fastpath *fp = &bp->fp[i];
508                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
509
510                 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
511                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
512                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
513                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
514                 BNX2X_ERR("          rx_bd_prod(%x)  rx_bd_cons(%x)"
515                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
516                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
517                           fp->rx_bd_prod, fp->rx_bd_cons,
518                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
519                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
520                 BNX2X_ERR("          rx_sge_prod(%x)  last_max_sge(%x)"
521                           "  fp_c_idx(%x)  *sb_c_idx(%x)  fp_u_idx(%x)"
522                           "  *sb_u_idx(%x)  bd data(%x,%x)\n",
523                           fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
524                           fp->status_blk->c_status_block.status_block_index,
525                           fp->fp_u_idx,
526                           fp->status_blk->u_status_block.status_block_index,
527                           hw_prods->packets_prod, hw_prods->bds_prod);
528
529                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
530                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
531                 for (j = start; j < end; j++) {
532                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
533
534                         BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
535                                   sw_bd->skb, sw_bd->first_bd);
536                 }
537
538                 start = TX_BD(fp->tx_bd_cons - 10);
539                 end = TX_BD(fp->tx_bd_cons + 254);
540                 for (j = start; j < end; j++) {
541                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
542
543                         BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
544                                   j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
545                 }
546
547                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
548                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
549                 for (j = start; j < end; j++) {
550                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
551                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
552
553                         BNX2X_ERR("rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
554                                   j, rx_bd[1], rx_bd[0], sw_bd->skb);
555                 }
556
557                 start = RX_SGE(fp->rx_sge_prod);
558                 end = RX_SGE(fp->last_max_sge);
559                 for (j = start; j < end; j++) {
560                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
561                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
562
563                         BNX2X_ERR("rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
564                                   j, rx_sge[1], rx_sge[0], sw_page->page);
565                 }
566
567                 start = RCQ_BD(fp->rx_comp_cons - 10);
568                 end = RCQ_BD(fp->rx_comp_cons + 503);
569                 for (j = start; j < end; j++) {
570                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
571
572                         BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
573                                   j, cqe[0], cqe[1], cqe[2], cqe[3]);
574                 }
575         }
576
577         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
578                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
579                   "  spq_prod_idx(%u)\n",
580                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
581                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
582
583         bnx2x_fw_dump(bp);
584         bnx2x_mc_assert(bp);
585         BNX2X_ERR("end crash dump -----------------\n");
586 }
587
588 static void bnx2x_int_enable(struct bnx2x *bp)
589 {
590         int port = BP_PORT(bp);
591         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
592         u32 val = REG_RD(bp, addr);
593         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
594
595         if (msix) {
596                 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
597                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
598                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
599         } else {
600                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
601                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
602                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
603                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
604
605                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
606                    val, port, addr, msix);
607
608                 REG_WR(bp, addr, val);
609
610                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
611         }
612
613         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
614            val, port, addr, msix);
615
616         REG_WR(bp, addr, val);
617
618         if (CHIP_IS_E1H(bp)) {
619                 /* init leading/trailing edge */
620                 if (IS_E1HMF(bp)) {
621                         val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
622                         if (bp->port.pmf)
623                                 /* enable nig attention */
624                                 val |= 0x0100;
625                 } else
626                         val = 0xffff;
627
628                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
629                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
630         }
631 }
632
633 static void bnx2x_int_disable(struct bnx2x *bp)
634 {
635         int port = BP_PORT(bp);
636         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
637         u32 val = REG_RD(bp, addr);
638
639         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
640                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
641                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
642                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
643
644         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
645            val, port, addr);
646
647         REG_WR(bp, addr, val);
648         if (REG_RD(bp, addr) != val)
649                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
650 }
651
652 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
653 {
654         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
655         int i;
656
657         /* disable interrupt handling */
658         atomic_inc(&bp->intr_sem);
659         if (disable_hw)
660                 /* prevent the HW from sending interrupts */
661                 bnx2x_int_disable(bp);
662
663         /* make sure all ISRs are done */
664         if (msix) {
665                 for_each_queue(bp, i)
666                         synchronize_irq(bp->msix_table[i].vector);
667
668                 /* one more for the Slow Path IRQ */
669                 synchronize_irq(bp->msix_table[i].vector);
670         } else
671                 synchronize_irq(bp->pdev->irq);
672
673         /* make sure sp_task is not running */
674         cancel_work_sync(&bp->sp_task);
675 }
676
677 /* fast path */
678
679 /*
680  * General service functions
681  */
682
683 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
684                                 u8 storm, u16 index, u8 op, u8 update)
685 {
686         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687                        COMMAND_REG_INT_ACK);
688         struct igu_ack_register igu_ack;
689
690         igu_ack.status_block_index = index;
691         igu_ack.sb_id_and_flags =
692                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
693                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
694                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
696
697         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698            (*(u32 *)&igu_ack), hc_addr);
699         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
700 }
701
702 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
703 {
704         struct host_status_block *fpsb = fp->status_blk;
705         u16 rc = 0;
706
707         barrier(); /* status block is written to by the chip */
708         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
709                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
710                 rc |= 1;
711         }
712         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
713                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
714                 rc |= 2;
715         }
716         return rc;
717 }
718
719 static u16 bnx2x_ack_int(struct bnx2x *bp)
720 {
721         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722                        COMMAND_REG_SIMD_MASK);
723         u32 result = REG_RD(bp, hc_addr);
724
725         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
726            result, hc_addr);
727
728         return result;
729 }
730
731
732 /*
733  * fast path service functions
734  */
735
736 /* free skb in the packet ring at pos idx
737  * return idx of last bd freed
738  */
739 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
740                              u16 idx)
741 {
742         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
743         struct eth_tx_bd *tx_bd;
744         struct sk_buff *skb = tx_buf->skb;
745         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
746         int nbd;
747
748         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
749            idx, tx_buf, skb);
750
751         /* unmap first bd */
752         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
753         tx_bd = &fp->tx_desc_ring[bd_idx];
754         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
755                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
756
757         nbd = le16_to_cpu(tx_bd->nbd) - 1;
758         new_cons = nbd + tx_buf->first_bd;
759 #ifdef BNX2X_STOP_ON_ERROR
760         if (nbd > (MAX_SKB_FRAGS + 2)) {
761                 BNX2X_ERR("BAD nbd!\n");
762                 bnx2x_panic();
763         }
764 #endif
765
766         /* Skip a parse bd and the TSO split header bd
767            since they have no mapping */
768         if (nbd)
769                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
770
771         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
772                                            ETH_TX_BD_FLAGS_TCP_CSUM |
773                                            ETH_TX_BD_FLAGS_SW_LSO)) {
774                 if (--nbd)
775                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
776                 tx_bd = &fp->tx_desc_ring[bd_idx];
777                 /* is this a TSO split header bd? */
778                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
779                         if (--nbd)
780                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
781                 }
782         }
783
784         /* now free frags */
785         while (nbd > 0) {
786
787                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
788                 tx_bd = &fp->tx_desc_ring[bd_idx];
789                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
790                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
791                 if (--nbd)
792                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
793         }
794
795         /* release skb */
796         WARN_ON(!skb);
797         dev_kfree_skb(skb);
798         tx_buf->first_bd = 0;
799         tx_buf->skb = NULL;
800
801         return new_cons;
802 }
803
804 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
805 {
806         s16 used;
807         u16 prod;
808         u16 cons;
809
810         barrier(); /* Tell compiler that prod and cons can change */
811         prod = fp->tx_bd_prod;
812         cons = fp->tx_bd_cons;
813
814         /* NUM_TX_RINGS = number of "next-page" entries
815            It will be used as a threshold */
816         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
817
818 #ifdef BNX2X_STOP_ON_ERROR
819         WARN_ON(used < 0);
820         WARN_ON(used > fp->bp->tx_ring_size);
821         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
822 #endif
823
824         return (s16)(fp->bp->tx_ring_size) - used;
825 }
826
827 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
828 {
829         struct bnx2x *bp = fp->bp;
830         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
831         int done = 0;
832
833 #ifdef BNX2X_STOP_ON_ERROR
834         if (unlikely(bp->panic))
835                 return;
836 #endif
837
838         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
839         sw_cons = fp->tx_pkt_cons;
840
841         while (sw_cons != hw_cons) {
842                 u16 pkt_cons;
843
844                 pkt_cons = TX_BD(sw_cons);
845
846                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
847
848                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
849                    hw_cons, sw_cons, pkt_cons);
850
851 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
852                         rmb();
853                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
854                 }
855 */
856                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
857                 sw_cons++;
858                 done++;
859
860                 if (done == work)
861                         break;
862         }
863
864         fp->tx_pkt_cons = sw_cons;
865         fp->tx_bd_cons = bd_cons;
866
867         /* Need to make the tx_cons update visible to start_xmit()
868          * before checking for netif_queue_stopped().  Without the
869          * memory barrier, there is a small possibility that start_xmit()
870          * will miss it and cause the queue to be stopped forever.
871          */
872         smp_mb();
873
874         /* TBD need a thresh? */
875         if (unlikely(netif_queue_stopped(bp->dev))) {
876
877                 netif_tx_lock(bp->dev);
878
879                 if (netif_queue_stopped(bp->dev) &&
880                     (bp->state == BNX2X_STATE_OPEN) &&
881                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
882                         netif_wake_queue(bp->dev);
883
884                 netif_tx_unlock(bp->dev);
885         }
886 }
887
888
889 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
890                            union eth_rx_cqe *rr_cqe)
891 {
892         struct bnx2x *bp = fp->bp;
893         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
894         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
895
896         DP(BNX2X_MSG_SP,
897            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
898            FP_IDX(fp), cid, command, bp->state,
899            rr_cqe->ramrod_cqe.ramrod_type);
900
901         bp->spq_left++;
902
903         if (FP_IDX(fp)) {
904                 switch (command | fp->state) {
905                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
906                                                 BNX2X_FP_STATE_OPENING):
907                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
908                            cid);
909                         fp->state = BNX2X_FP_STATE_OPEN;
910                         break;
911
912                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
913                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
914                            cid);
915                         fp->state = BNX2X_FP_STATE_HALTED;
916                         break;
917
918                 default:
919                         BNX2X_ERR("unexpected MC reply (%d)  "
920                                   "fp->state is %x\n", command, fp->state);
921                         break;
922                 }
923                 mb(); /* force bnx2x_wait_ramrod() to see the change */
924                 return;
925         }
926
927         switch (command | bp->state) {
928         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
929                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
930                 bp->state = BNX2X_STATE_OPEN;
931                 break;
932
933         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
934                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
935                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
936                 fp->state = BNX2X_FP_STATE_HALTED;
937                 break;
938
939         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
940                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
941                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
942                 break;
943
944
945         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
946         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
947                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
948                 bp->set_mac_pending = 0;
949                 break;
950
951         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
952                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
953                 break;
954
955         default:
956                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
957                           command, bp->state);
958                 break;
959         }
960         mb(); /* force bnx2x_wait_ramrod() to see the change */
961 }
962
963 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
964                                      struct bnx2x_fastpath *fp, u16 index)
965 {
966         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
967         struct page *page = sw_buf->page;
968         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
969
970         /* Skip "next page" elements */
971         if (!page)
972                 return;
973
974         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
975                        BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
976         __free_pages(page, PAGES_PER_SGE_SHIFT);
977
978         sw_buf->page = NULL;
979         sge->addr_hi = 0;
980         sge->addr_lo = 0;
981 }
982
983 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
984                                            struct bnx2x_fastpath *fp, int last)
985 {
986         int i;
987
988         for (i = 0; i < last; i++)
989                 bnx2x_free_rx_sge(bp, fp, i);
990 }
991
992 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
993                                      struct bnx2x_fastpath *fp, u16 index)
994 {
995         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
996         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
997         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
998         dma_addr_t mapping;
999
1000         if (unlikely(page == NULL))
1001                 return -ENOMEM;
1002
1003         mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
1004                                PCI_DMA_FROMDEVICE);
1005         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1006                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1007                 return -ENOMEM;
1008         }
1009
1010         sw_buf->page = page;
1011         pci_unmap_addr_set(sw_buf, mapping, mapping);
1012
1013         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1014         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1015
1016         return 0;
1017 }
1018
1019 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1020                                      struct bnx2x_fastpath *fp, u16 index)
1021 {
1022         struct sk_buff *skb;
1023         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1024         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1025         dma_addr_t mapping;
1026
1027         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1028         if (unlikely(skb == NULL))
1029                 return -ENOMEM;
1030
1031         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1032                                  PCI_DMA_FROMDEVICE);
1033         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1034                 dev_kfree_skb(skb);
1035                 return -ENOMEM;
1036         }
1037
1038         rx_buf->skb = skb;
1039         pci_unmap_addr_set(rx_buf, mapping, mapping);
1040
1041         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1042         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1043
1044         return 0;
1045 }
1046
1047 /* note that we are not allocating a new skb,
1048  * we are just moving one from cons to prod
1049  * we are not creating a new mapping,
1050  * so there is no need to check for dma_mapping_error().
1051  */
1052 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1053                                struct sk_buff *skb, u16 cons, u16 prod)
1054 {
1055         struct bnx2x *bp = fp->bp;
1056         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1057         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1058         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1059         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1060
1061         pci_dma_sync_single_for_device(bp->pdev,
1062                                        pci_unmap_addr(cons_rx_buf, mapping),
1063                                        bp->rx_offset + RX_COPY_THRESH,
1064                                        PCI_DMA_FROMDEVICE);
1065
1066         prod_rx_buf->skb = cons_rx_buf->skb;
1067         pci_unmap_addr_set(prod_rx_buf, mapping,
1068                            pci_unmap_addr(cons_rx_buf, mapping));
1069         *prod_bd = *cons_bd;
1070 }
1071
1072 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1073                                              u16 idx)
1074 {
1075         u16 last_max = fp->last_max_sge;
1076
1077         if (SUB_S16(idx, last_max) > 0)
1078                 fp->last_max_sge = idx;
1079 }
1080
1081 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1082 {
1083         int i, j;
1084
1085         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1086                 int idx = RX_SGE_CNT * i - 1;
1087
1088                 for (j = 0; j < 2; j++) {
1089                         SGE_MASK_CLEAR_BIT(fp, idx);
1090                         idx--;
1091                 }
1092         }
1093 }
1094
1095 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1096                                   struct eth_fast_path_rx_cqe *fp_cqe)
1097 {
1098         struct bnx2x *bp = fp->bp;
1099         u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1100                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1101                       BCM_PAGE_SHIFT;
1102         u16 last_max, last_elem, first_elem;
1103         u16 delta = 0;
1104         u16 i;
1105
1106         if (!sge_len)
1107                 return;
1108
1109         /* First mark all used pages */
1110         for (i = 0; i < sge_len; i++)
1111                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1112
1113         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1114            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1115
1116         /* Here we assume that the last SGE index is the biggest */
1117         prefetch((void *)(fp->sge_mask));
1118         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1119
1120         last_max = RX_SGE(fp->last_max_sge);
1121         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1122         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1123
1124         /* If ring is not full */
1125         if (last_elem + 1 != first_elem)
1126                 last_elem++;
1127
1128         /* Now update the prod */
1129         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1130                 if (likely(fp->sge_mask[i]))
1131                         break;
1132
1133                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1134                 delta += RX_SGE_MASK_ELEM_SZ;
1135         }
1136
1137         if (delta > 0) {
1138                 fp->rx_sge_prod += delta;
1139                 /* clear page-end entries */
1140                 bnx2x_clear_sge_mask_next_elems(fp);
1141         }
1142
1143         DP(NETIF_MSG_RX_STATUS,
1144            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1145            fp->last_max_sge, fp->rx_sge_prod);
1146 }
1147
1148 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1149 {
1150         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1151         memset(fp->sge_mask, 0xff,
1152                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1153
1154         /* Clear the two last indices in the page to 1:
1155            these are the indices that correspond to the "next" element,
1156            hence will never be indicated and should be removed from
1157            the calculations. */
1158         bnx2x_clear_sge_mask_next_elems(fp);
1159 }
1160
1161 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1162                             struct sk_buff *skb, u16 cons, u16 prod)
1163 {
1164         struct bnx2x *bp = fp->bp;
1165         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1166         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1167         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1168         dma_addr_t mapping;
1169
1170         /* move empty skb from pool to prod and map it */
1171         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1172         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1173                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1174         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1175
1176         /* move partial skb from cons to pool (don't unmap yet) */
1177         fp->tpa_pool[queue] = *cons_rx_buf;
1178
1179         /* mark bin state as start - print error if current state != stop */
1180         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1181                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1182
1183         fp->tpa_state[queue] = BNX2X_TPA_START;
1184
1185         /* point prod_bd to new skb */
1186         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1187         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1188
1189 #ifdef BNX2X_STOP_ON_ERROR
1190         fp->tpa_queue_used |= (1 << queue);
1191 #ifdef __powerpc64__
1192         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1193 #else
1194         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1195 #endif
1196            fp->tpa_queue_used);
1197 #endif
1198 }
1199
1200 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1201                                struct sk_buff *skb,
1202                                struct eth_fast_path_rx_cqe *fp_cqe,
1203                                u16 cqe_idx)
1204 {
1205         struct sw_rx_page *rx_pg, old_rx_pg;
1206         struct page *sge;
1207         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1208         u32 i, frag_len, frag_size, pages;
1209         int err;
1210         int j;
1211
1212         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1213         pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
1214
1215         /* This is needed in order to enable forwarding support */
1216         if (frag_size)
1217                 skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
1218                                                max(frag_size, (u32)len_on_bd));
1219
1220 #ifdef BNX2X_STOP_ON_ERROR
1221         if (pages > 8*PAGES_PER_SGE) {
1222                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1223                           pages, cqe_idx);
1224                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1225                           fp_cqe->pkt_len, len_on_bd);
1226                 bnx2x_panic();
1227                 return -EINVAL;
1228         }
1229 #endif
1230
1231         /* Run through the SGL and compose the fragmented skb */
1232         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1233                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1234
1235                 /* FW gives the indices of the SGE as if the ring is an array
1236                    (meaning that "next" element will consume 2 indices) */
1237                 frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
1238                 rx_pg = &fp->rx_page_ring[sge_idx];
1239                 sge = rx_pg->page;
1240                 old_rx_pg = *rx_pg;
1241
1242                 /* If we fail to allocate a substitute page, we simply stop
1243                    where we are and drop the whole packet */
1244                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1245                 if (unlikely(err)) {
1246                         bp->eth_stats.rx_skb_alloc_failed++;
1247                         return err;
1248                 }
1249
1250                 /* Unmap the page as we r going to pass it to the stack */
1251                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1252                               BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1253
1254                 /* Add one frag and update the appropriate fields in the skb */
1255                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1256
1257                 skb->data_len += frag_len;
1258                 skb->truesize += frag_len;
1259                 skb->len += frag_len;
1260
1261                 frag_size -= frag_len;
1262         }
1263
1264         return 0;
1265 }
1266
1267 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1268                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1269                            u16 cqe_idx)
1270 {
1271         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1272         struct sk_buff *skb = rx_buf->skb;
1273         /* alloc new skb */
1274         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1275
1276         /* Unmap skb in the pool anyway, as we are going to change
1277            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1278            fails. */
1279         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1280                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1281
1282         if (likely(new_skb)) {
1283                 /* fix ip xsum and give it to the stack */
1284                 /* (no need to map the new skb) */
1285
1286                 prefetch(skb);
1287                 prefetch(((char *)(skb)) + 128);
1288
1289 #ifdef BNX2X_STOP_ON_ERROR
1290                 if (pad + len > bp->rx_buf_size) {
1291                         BNX2X_ERR("skb_put is about to fail...  "
1292                                   "pad %d  len %d  rx_buf_size %d\n",
1293                                   pad, len, bp->rx_buf_size);
1294                         bnx2x_panic();
1295                         return;
1296                 }
1297 #endif
1298
1299                 skb_reserve(skb, pad);
1300                 skb_put(skb, len);
1301
1302                 skb->protocol = eth_type_trans(skb, bp->dev);
1303                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1304
1305                 {
1306                         struct iphdr *iph;
1307
1308                         iph = (struct iphdr *)skb->data;
1309                         iph->check = 0;
1310                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1311                 }
1312
1313                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1314                                          &cqe->fast_path_cqe, cqe_idx)) {
1315 #ifdef BCM_VLAN
1316                         if ((bp->vlgrp != NULL) &&
1317                             (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1318                              PARSING_FLAGS_VLAN))
1319                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1320                                                 le16_to_cpu(cqe->fast_path_cqe.
1321                                                             vlan_tag));
1322                         else
1323 #endif
1324                                 netif_receive_skb(skb);
1325                 } else {
1326                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1327                            " - dropping packet!\n");
1328                         dev_kfree_skb(skb);
1329                 }
1330
1331
1332                 /* put new skb in bin */
1333                 fp->tpa_pool[queue].skb = new_skb;
1334
1335         } else {
1336                 /* else drop the packet and keep the buffer in the bin */
1337                 DP(NETIF_MSG_RX_STATUS,
1338                    "Failed to allocate new skb - dropping packet!\n");
1339                 bp->eth_stats.rx_skb_alloc_failed++;
1340         }
1341
1342         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1343 }
1344
1345 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1346                                         struct bnx2x_fastpath *fp,
1347                                         u16 bd_prod, u16 rx_comp_prod,
1348                                         u16 rx_sge_prod)
1349 {
1350         struct tstorm_eth_rx_producers rx_prods = {0};
1351         int i;
1352
1353         /* Update producers */
1354         rx_prods.bd_prod = bd_prod;
1355         rx_prods.cqe_prod = rx_comp_prod;
1356         rx_prods.sge_prod = rx_sge_prod;
1357
1358         for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1359                 REG_WR(bp, BAR_TSTRORM_INTMEM +
1360                        TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1361                        ((u32 *)&rx_prods)[i]);
1362
1363         DP(NETIF_MSG_RX_STATUS,
1364            "Wrote: bd_prod %u  cqe_prod %u  sge_prod %u\n",
1365            bd_prod, rx_comp_prod, rx_sge_prod);
1366 }
1367
1368 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1369 {
1370         struct bnx2x *bp = fp->bp;
1371         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1372         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1373         int rx_pkt = 0;
1374
1375 #ifdef BNX2X_STOP_ON_ERROR
1376         if (unlikely(bp->panic))
1377                 return 0;
1378 #endif
1379
1380         /* CQ "next element" is of the size of the regular element,
1381            that's why it's ok here */
1382         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1383         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1384                 hw_comp_cons++;
1385
1386         bd_cons = fp->rx_bd_cons;
1387         bd_prod = fp->rx_bd_prod;
1388         bd_prod_fw = bd_prod;
1389         sw_comp_cons = fp->rx_comp_cons;
1390         sw_comp_prod = fp->rx_comp_prod;
1391
1392         /* Memory barrier necessary as speculative reads of the rx
1393          * buffer can be ahead of the index in the status block
1394          */
1395         rmb();
1396
1397         DP(NETIF_MSG_RX_STATUS,
1398            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1399            FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1400
1401         while (sw_comp_cons != hw_comp_cons) {
1402                 struct sw_rx_bd *rx_buf = NULL;
1403                 struct sk_buff *skb;
1404                 union eth_rx_cqe *cqe;
1405                 u8 cqe_fp_flags;
1406                 u16 len, pad;
1407
1408                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1409                 bd_prod = RX_BD(bd_prod);
1410                 bd_cons = RX_BD(bd_cons);
1411
1412                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1413                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1414
1415                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1416                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1417                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1418                    cqe->fast_path_cqe.rss_hash_result,
1419                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1420                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1421
1422                 /* is this a slowpath msg? */
1423                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1424                         bnx2x_sp_event(fp, cqe);
1425                         goto next_cqe;
1426
1427                 /* this is an rx packet */
1428                 } else {
1429                         rx_buf = &fp->rx_buf_ring[bd_cons];
1430                         skb = rx_buf->skb;
1431                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1432                         pad = cqe->fast_path_cqe.placement_offset;
1433
1434                         /* If CQE is marked both TPA_START and TPA_END
1435                            it is a non-TPA CQE */
1436                         if ((!fp->disable_tpa) &&
1437                             (TPA_TYPE(cqe_fp_flags) !=
1438                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1439                                 u16 queue = cqe->fast_path_cqe.queue_index;
1440
1441                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1442                                         DP(NETIF_MSG_RX_STATUS,
1443                                            "calling tpa_start on queue %d\n",
1444                                            queue);
1445
1446                                         bnx2x_tpa_start(fp, queue, skb,
1447                                                         bd_cons, bd_prod);
1448                                         goto next_rx;
1449                                 }
1450
1451                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1452                                         DP(NETIF_MSG_RX_STATUS,
1453                                            "calling tpa_stop on queue %d\n",
1454                                            queue);
1455
1456                                         if (!BNX2X_RX_SUM_FIX(cqe))
1457                                                 BNX2X_ERR("STOP on none TCP "
1458                                                           "data\n");
1459
1460                                         /* This is a size of the linear data
1461                                            on this skb */
1462                                         len = le16_to_cpu(cqe->fast_path_cqe.
1463                                                                 len_on_bd);
1464                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1465                                                     len, cqe, comp_ring_cons);
1466 #ifdef BNX2X_STOP_ON_ERROR
1467                                         if (bp->panic)
1468                                                 return -EINVAL;
1469 #endif
1470
1471                                         bnx2x_update_sge_prod(fp,
1472                                                         &cqe->fast_path_cqe);
1473                                         goto next_cqe;
1474                                 }
1475                         }
1476
1477                         pci_dma_sync_single_for_device(bp->pdev,
1478                                         pci_unmap_addr(rx_buf, mapping),
1479                                                        pad + RX_COPY_THRESH,
1480                                                        PCI_DMA_FROMDEVICE);
1481                         prefetch(skb);
1482                         prefetch(((char *)(skb)) + 128);
1483
1484                         /* is this an error packet? */
1485                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1486                                 DP(NETIF_MSG_RX_ERR,
1487                                    "ERROR  flags %x  rx packet %u\n",
1488                                    cqe_fp_flags, sw_comp_cons);
1489                                 bp->eth_stats.rx_err_discard_pkt++;
1490                                 goto reuse_rx;
1491                         }
1492
1493                         /* Since we don't have a jumbo ring
1494                          * copy small packets if mtu > 1500
1495                          */
1496                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1497                             (len <= RX_COPY_THRESH)) {
1498                                 struct sk_buff *new_skb;
1499
1500                                 new_skb = netdev_alloc_skb(bp->dev,
1501                                                            len + pad);
1502                                 if (new_skb == NULL) {
1503                                         DP(NETIF_MSG_RX_ERR,
1504                                            "ERROR  packet dropped "
1505                                            "because of alloc failure\n");
1506                                         bp->eth_stats.rx_skb_alloc_failed++;
1507                                         goto reuse_rx;
1508                                 }
1509
1510                                 /* aligned copy */
1511                                 skb_copy_from_linear_data_offset(skb, pad,
1512                                                     new_skb->data + pad, len);
1513                                 skb_reserve(new_skb, pad);
1514                                 skb_put(new_skb, len);
1515
1516                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1517
1518                                 skb = new_skb;
1519
1520                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1521                                 pci_unmap_single(bp->pdev,
1522                                         pci_unmap_addr(rx_buf, mapping),
1523                                                  bp->rx_buf_size,
1524                                                  PCI_DMA_FROMDEVICE);
1525                                 skb_reserve(skb, pad);
1526                                 skb_put(skb, len);
1527
1528                         } else {
1529                                 DP(NETIF_MSG_RX_ERR,
1530                                    "ERROR  packet dropped because "
1531                                    "of alloc failure\n");
1532                                 bp->eth_stats.rx_skb_alloc_failed++;
1533 reuse_rx:
1534                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1535                                 goto next_rx;
1536                         }
1537
1538                         skb->protocol = eth_type_trans(skb, bp->dev);
1539
1540                         skb->ip_summed = CHECKSUM_NONE;
1541                         if (bp->rx_csum) {
1542                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1543                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1544                                 else
1545                                         bp->eth_stats.hw_csum_err++;
1546                         }
1547                 }
1548
1549 #ifdef BCM_VLAN
1550                 if ((bp->vlgrp != NULL) &&
1551                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1552                      PARSING_FLAGS_VLAN))
1553                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1554                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1555                 else
1556 #endif
1557                         netif_receive_skb(skb);
1558
1559
1560 next_rx:
1561                 rx_buf->skb = NULL;
1562
1563                 bd_cons = NEXT_RX_IDX(bd_cons);
1564                 bd_prod = NEXT_RX_IDX(bd_prod);
1565                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1566                 rx_pkt++;
1567 next_cqe:
1568                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1569                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1570
1571                 if (rx_pkt == budget)
1572                         break;
1573         } /* while */
1574
1575         fp->rx_bd_cons = bd_cons;
1576         fp->rx_bd_prod = bd_prod_fw;
1577         fp->rx_comp_cons = sw_comp_cons;
1578         fp->rx_comp_prod = sw_comp_prod;
1579
1580         /* Update producers */
1581         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1582                              fp->rx_sge_prod);
1583         mmiowb(); /* keep prod updates ordered */
1584
1585         fp->rx_pkt += rx_pkt;
1586         fp->rx_calls++;
1587
1588         return rx_pkt;
1589 }
1590
1591 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1592 {
1593         struct bnx2x_fastpath *fp = fp_cookie;
1594         struct bnx2x *bp = fp->bp;
1595         struct net_device *dev = bp->dev;
1596         int index = FP_IDX(fp);
1597
1598         /* Return here if interrupt is disabled */
1599         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1600                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1601                 return IRQ_HANDLED;
1602         }
1603
1604         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1605            index, FP_SB_ID(fp));
1606         bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1607
1608 #ifdef BNX2X_STOP_ON_ERROR
1609         if (unlikely(bp->panic))
1610                 return IRQ_HANDLED;
1611 #endif
1612
1613         prefetch(fp->rx_cons_sb);
1614         prefetch(fp->tx_cons_sb);
1615         prefetch(&fp->status_blk->c_status_block.status_block_index);
1616         prefetch(&fp->status_blk->u_status_block.status_block_index);
1617
1618         netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
1619
1620         return IRQ_HANDLED;
1621 }
1622
1623 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1624 {
1625         struct net_device *dev = dev_instance;
1626         struct bnx2x *bp = netdev_priv(dev);
1627         u16 status = bnx2x_ack_int(bp);
1628         u16 mask;
1629
1630         /* Return here if interrupt is shared and it's not for us */
1631         if (unlikely(status == 0)) {
1632                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1633                 return IRQ_NONE;
1634         }
1635         DP(NETIF_MSG_INTR, "got an interrupt  status %u\n", status);
1636
1637         /* Return here if interrupt is disabled */
1638         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1639                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1640                 return IRQ_HANDLED;
1641         }
1642
1643 #ifdef BNX2X_STOP_ON_ERROR
1644         if (unlikely(bp->panic))
1645                 return IRQ_HANDLED;
1646 #endif
1647
1648         mask = 0x2 << bp->fp[0].sb_id;
1649         if (status & mask) {
1650                 struct bnx2x_fastpath *fp = &bp->fp[0];
1651
1652                 prefetch(fp->rx_cons_sb);
1653                 prefetch(fp->tx_cons_sb);
1654                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1655                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1656
1657                 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1658
1659                 status &= ~mask;
1660         }
1661
1662
1663         if (unlikely(status & 0x1)) {
1664                 schedule_work(&bp->sp_task);
1665
1666                 status &= ~0x1;
1667                 if (!status)
1668                         return IRQ_HANDLED;
1669         }
1670
1671         if (status)
1672                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1673                    status);
1674
1675         return IRQ_HANDLED;
1676 }
1677
1678 /* end of fast path */
1679
1680 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1681
1682 /* Link */
1683
1684 /*
1685  * General service functions
1686  */
1687
1688 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1689 {
1690         u32 lock_status;
1691         u32 resource_bit = (1 << resource);
1692         int func = BP_FUNC(bp);
1693         u32 hw_lock_control_reg;
1694         int cnt;
1695
1696         /* Validating that the resource is within range */
1697         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1698                 DP(NETIF_MSG_HW,
1699                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1700                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1701                 return -EINVAL;
1702         }
1703
1704         if (func <= 5) {
1705                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1706         } else {
1707                 hw_lock_control_reg =
1708                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1709         }
1710
1711         /* Validating that the resource is not already taken */
1712         lock_status = REG_RD(bp, hw_lock_control_reg);
1713         if (lock_status & resource_bit) {
1714                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1715                    lock_status, resource_bit);
1716                 return -EEXIST;
1717         }
1718
1719         /* Try for 5 second every 5ms */
1720         for (cnt = 0; cnt < 1000; cnt++) {
1721                 /* Try to acquire the lock */
1722                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1723                 lock_status = REG_RD(bp, hw_lock_control_reg);
1724                 if (lock_status & resource_bit)
1725                         return 0;
1726
1727                 msleep(5);
1728         }
1729         DP(NETIF_MSG_HW, "Timeout\n");
1730         return -EAGAIN;
1731 }
1732
1733 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1734 {
1735         u32 lock_status;
1736         u32 resource_bit = (1 << resource);
1737         int func = BP_FUNC(bp);
1738         u32 hw_lock_control_reg;
1739
1740         /* Validating that the resource is within range */
1741         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1742                 DP(NETIF_MSG_HW,
1743                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1744                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1745                 return -EINVAL;
1746         }
1747
1748         if (func <= 5) {
1749                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1750         } else {
1751                 hw_lock_control_reg =
1752                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1753         }
1754
1755         /* Validating that the resource is currently taken */
1756         lock_status = REG_RD(bp, hw_lock_control_reg);
1757         if (!(lock_status & resource_bit)) {
1758                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1759                    lock_status, resource_bit);
1760                 return -EFAULT;
1761         }
1762
1763         REG_WR(bp, hw_lock_control_reg, resource_bit);
1764         return 0;
1765 }
1766
1767 /* HW Lock for shared dual port PHYs */
1768 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1769 {
1770         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1771
1772         mutex_lock(&bp->port.phy_mutex);
1773
1774         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1775             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1776                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1777 }
1778
1779 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1780 {
1781         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1782
1783         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1784             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1785                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1786
1787         mutex_unlock(&bp->port.phy_mutex);
1788 }
1789
1790 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1791 {
1792         /* The GPIO should be swapped if swap register is set and active */
1793         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1794                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1795         int gpio_shift = gpio_num +
1796                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1797         u32 gpio_mask = (1 << gpio_shift);
1798         u32 gpio_reg;
1799
1800         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1801                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1802                 return -EINVAL;
1803         }
1804
1805         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1806         /* read GPIO and mask except the float bits */
1807         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1808
1809         switch (mode) {
1810         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1811                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1812                    gpio_num, gpio_shift);
1813                 /* clear FLOAT and set CLR */
1814                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1815                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1816                 break;
1817
1818         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1819                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1820                    gpio_num, gpio_shift);
1821                 /* clear FLOAT and set SET */
1822                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1823                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1824                 break;
1825
1826         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1827                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1828                    gpio_num, gpio_shift);
1829                 /* set FLOAT */
1830                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1831                 break;
1832
1833         default:
1834                 break;
1835         }
1836
1837         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1838         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1839
1840         return 0;
1841 }
1842
1843 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1844 {
1845         u32 spio_mask = (1 << spio_num);
1846         u32 spio_reg;
1847
1848         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1849             (spio_num > MISC_REGISTERS_SPIO_7)) {
1850                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1851                 return -EINVAL;
1852         }
1853
1854         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1855         /* read SPIO and mask except the float bits */
1856         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1857
1858         switch (mode) {
1859         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1860                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1861                 /* clear FLOAT and set CLR */
1862                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1863                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1864                 break;
1865
1866         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1867                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1868                 /* clear FLOAT and set SET */
1869                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1870                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1871                 break;
1872
1873         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1874                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1875                 /* set FLOAT */
1876                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1877                 break;
1878
1879         default:
1880                 break;
1881         }
1882
1883         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1884         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1885
1886         return 0;
1887 }
1888
1889 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1890 {
1891         switch (bp->link_vars.ieee_fc) {
1892         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1893                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1894                                           ADVERTISED_Pause);
1895                 break;
1896         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1897                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1898                                          ADVERTISED_Pause);
1899                 break;
1900         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1901                 bp->port.advertising |= ADVERTISED_Asym_Pause;
1902                 break;
1903         default:
1904                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1905                                           ADVERTISED_Pause);
1906                 break;
1907         }
1908 }
1909
1910 static void bnx2x_link_report(struct bnx2x *bp)
1911 {
1912         if (bp->link_vars.link_up) {
1913                 if (bp->state == BNX2X_STATE_OPEN)
1914                         netif_carrier_on(bp->dev);
1915                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1916
1917                 printk("%d Mbps ", bp->link_vars.line_speed);
1918
1919                 if (bp->link_vars.duplex == DUPLEX_FULL)
1920                         printk("full duplex");
1921                 else
1922                         printk("half duplex");
1923
1924                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1925                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
1926                                 printk(", receive ");
1927                                 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1928                                         printk("& transmit ");
1929                         } else {
1930                                 printk(", transmit ");
1931                         }
1932                         printk("flow control ON");
1933                 }
1934                 printk("\n");
1935
1936         } else { /* link_down */
1937                 netif_carrier_off(bp->dev);
1938                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1939         }
1940 }
1941
1942 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1943 {
1944         if (!BP_NOMCP(bp)) {
1945                 u8 rc;
1946
1947                 /* Initialize link parameters structure variables */
1948                 /* It is recommended to turn off RX FC for jumbo frames
1949                    for better performance */
1950                 if (IS_E1HMF(bp))
1951                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1952                 else if (bp->dev->mtu > 5000)
1953                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1954                 else
1955                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1956
1957                 bnx2x_acquire_phy_lock(bp);
1958                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1959                 bnx2x_release_phy_lock(bp);
1960
1961                 if (bp->link_vars.link_up)
1962                         bnx2x_link_report(bp);
1963
1964                 bnx2x_calc_fc_adv(bp);
1965
1966                 return rc;
1967         }
1968         BNX2X_ERR("Bootcode is missing -not initializing link\n");
1969         return -EINVAL;
1970 }
1971
1972 static void bnx2x_link_set(struct bnx2x *bp)
1973 {
1974         if (!BP_NOMCP(bp)) {
1975                 bnx2x_acquire_phy_lock(bp);
1976                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1977                 bnx2x_release_phy_lock(bp);
1978
1979                 bnx2x_calc_fc_adv(bp);
1980         } else
1981                 BNX2X_ERR("Bootcode is missing -not setting link\n");
1982 }
1983
1984 static void bnx2x__link_reset(struct bnx2x *bp)
1985 {
1986         if (!BP_NOMCP(bp)) {
1987                 bnx2x_acquire_phy_lock(bp);
1988                 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
1989                 bnx2x_release_phy_lock(bp);
1990         } else
1991                 BNX2X_ERR("Bootcode is missing -not resetting link\n");
1992 }
1993
1994 static u8 bnx2x_link_test(struct bnx2x *bp)
1995 {
1996         u8 rc;
1997
1998         bnx2x_acquire_phy_lock(bp);
1999         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2000         bnx2x_release_phy_lock(bp);
2001
2002         return rc;
2003 }
2004
2005 /* Calculates the sum of vn_min_rates.
2006    It's needed for further normalizing of the min_rates.
2007
2008    Returns:
2009      sum of vn_min_rates
2010        or
2011      0 - if all the min_rates are 0.
2012      In the later case fairness algorithm should be deactivated.
2013      If not all min_rates are zero then those that are zeroes will
2014      be set to 1.
2015  */
2016 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2017 {
2018         int i, port = BP_PORT(bp);
2019         u32 wsum = 0;
2020         int all_zero = 1;
2021
2022         for (i = 0; i < E1HVN_MAX; i++) {
2023                 u32 vn_cfg =
2024                         SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2025                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2026                                      FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2027                 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2028                         /* If min rate is zero - set it to 1 */
2029                         if (!vn_min_rate)
2030                                 vn_min_rate = DEF_MIN_RATE;
2031                         else
2032                                 all_zero = 0;
2033
2034                         wsum += vn_min_rate;
2035                 }
2036         }
2037
2038         /* ... only if all min rates are zeros - disable FAIRNESS */
2039         if (all_zero)
2040                 return 0;
2041
2042         return wsum;
2043 }
2044
2045 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2046                                    int en_fness,
2047                                    u16 port_rate,
2048                                    struct cmng_struct_per_port *m_cmng_port)
2049 {
2050         u32 r_param = port_rate / 8;
2051         int port = BP_PORT(bp);
2052         int i;
2053
2054         memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2055
2056         /* Enable minmax only if we are in e1hmf mode */
2057         if (IS_E1HMF(bp)) {
2058                 u32 fair_periodic_timeout_usec;
2059                 u32 t_fair;
2060
2061                 /* Enable rate shaping and fairness */
2062                 m_cmng_port->flags.cmng_vn_enable = 1;
2063                 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2064                 m_cmng_port->flags.rate_shaping_enable = 1;
2065
2066                 if (!en_fness)
2067                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2068                            "  fairness will be disabled\n");
2069
2070                 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2071                 m_cmng_port->rs_vars.rs_periodic_timeout =
2072                                                 RS_PERIODIC_TIMEOUT_USEC / 4;
2073
2074                 /* this is the threshold below which no timer arming will occur
2075                    1.25 coefficient is for the threshold to be a little bigger
2076                    than the real time, to compensate for timer in-accuracy */
2077                 m_cmng_port->rs_vars.rs_threshold =
2078                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2079
2080                 /* resolution of fairness timer */
2081                 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2082                 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2083                 t_fair = T_FAIR_COEF / port_rate;
2084
2085                 /* this is the threshold below which we won't arm
2086                    the timer anymore */
2087                 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2088
2089                 /* we multiply by 1e3/8 to get bytes/msec.
2090                    We don't want the credits to pass a credit
2091                    of the T_FAIR*FAIR_MEM (algorithm resolution) */
2092                 m_cmng_port->fair_vars.upper_bound =
2093                                                 r_param * t_fair * FAIR_MEM;
2094                 /* since each tick is 4 usec */
2095                 m_cmng_port->fair_vars.fairness_timeout =
2096                                                 fair_periodic_timeout_usec / 4;
2097
2098         } else {
2099                 /* Disable rate shaping and fairness */
2100                 m_cmng_port->flags.cmng_vn_enable = 0;
2101                 m_cmng_port->flags.fairness_enable = 0;
2102                 m_cmng_port->flags.rate_shaping_enable = 0;
2103
2104                 DP(NETIF_MSG_IFUP,
2105                    "Single function mode  minmax will be disabled\n");
2106         }
2107
2108         /* Store it to internal memory */
2109         for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2110                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2111                        XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2112                        ((u32 *)(m_cmng_port))[i]);
2113 }
2114
2115 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2116                                    u32 wsum, u16 port_rate,
2117                                  struct cmng_struct_per_port *m_cmng_port)
2118 {
2119         struct rate_shaping_vars_per_vn m_rs_vn;
2120         struct fairness_vars_per_vn m_fair_vn;
2121         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2122         u16 vn_min_rate, vn_max_rate;
2123         int i;
2124
2125         /* If function is hidden - set min and max to zeroes */
2126         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2127                 vn_min_rate = 0;
2128                 vn_max_rate = 0;
2129
2130         } else {
2131                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2132                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2133                 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2134                    if current min rate is zero - set it to 1.
2135                    This is a requirement of the algorithm. */
2136                 if ((vn_min_rate == 0) && wsum)
2137                         vn_min_rate = DEF_MIN_RATE;
2138                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2139                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2140         }
2141
2142         DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d  vn_max_rate=%d  "
2143            "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2144
2145         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2146         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2147
2148         /* global vn counter - maximal Mbps for this vn */
2149         m_rs_vn.vn_counter.rate = vn_max_rate;
2150
2151         /* quota - number of bytes transmitted in this period */
2152         m_rs_vn.vn_counter.quota =
2153                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2154
2155 #ifdef BNX2X_PER_PROT_QOS
2156         /* per protocol counter */
2157         for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2158                 /* maximal Mbps for this protocol */
2159                 m_rs_vn.protocol_counters[protocol].rate =
2160                                                 protocol_max_rate[protocol];
2161                 /* the quota in each timer period -
2162                    number of bytes transmitted in this period */
2163                 m_rs_vn.protocol_counters[protocol].quota =
2164                         (u32)(rs_periodic_timeout_usec *
2165                           ((double)m_rs_vn.
2166                                    protocol_counters[protocol].rate/8));
2167         }
2168 #endif
2169
2170         if (wsum) {
2171                 /* credit for each period of the fairness algorithm:
2172                    number of bytes in T_FAIR (the vn share the port rate).
2173                    wsum should not be larger than 10000, thus
2174                    T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2175                 m_fair_vn.vn_credit_delta =
2176                         max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2177                             (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2178                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2179                    m_fair_vn.vn_credit_delta);
2180         }
2181
2182 #ifdef BNX2X_PER_PROT_QOS
2183         do {
2184                 u32 protocolWeightSum = 0;
2185
2186                 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2187                         protocolWeightSum +=
2188                                         drvInit.protocol_min_rate[protocol];
2189                 /* per protocol counter -
2190                    NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2191                 if (protocolWeightSum > 0) {
2192                         for (protocol = 0;
2193                              protocol < NUM_OF_PROTOCOLS; protocol++)
2194                                 /* credit for each period of the
2195                                    fairness algorithm - number of bytes in
2196                                    T_FAIR (the protocol share the vn rate) */
2197                                 m_fair_vn.protocol_credit_delta[protocol] =
2198                                         (u32)((vn_min_rate / 8) * t_fair *
2199                                         protocol_min_rate / protocolWeightSum);
2200                 }
2201         } while (0);
2202 #endif
2203
2204         /* Store it to internal memory */
2205         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2206                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2207                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2208                        ((u32 *)(&m_rs_vn))[i]);
2209
2210         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2211                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2212                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2213                        ((u32 *)(&m_fair_vn))[i]);
2214 }
2215
2216 /* This function is called upon link interrupt */
2217 static void bnx2x_link_attn(struct bnx2x *bp)
2218 {
2219         int vn;
2220
2221         /* Make sure that we are synced with the current statistics */
2222         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2223
2224         bnx2x_acquire_phy_lock(bp);
2225         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2226         bnx2x_release_phy_lock(bp);
2227
2228         if (bp->link_vars.link_up) {
2229
2230                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2231                         struct host_port_stats *pstats;
2232
2233                         pstats = bnx2x_sp(bp, port_stats);
2234                         /* reset old bmac stats */
2235                         memset(&(pstats->mac_stx[0]), 0,
2236                                sizeof(struct mac_stx));
2237                 }
2238                 if ((bp->state == BNX2X_STATE_OPEN) ||
2239                     (bp->state == BNX2X_STATE_DISABLED))
2240                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2241         }
2242
2243         /* indicate link status */
2244         bnx2x_link_report(bp);
2245
2246         if (IS_E1HMF(bp)) {
2247                 int func;
2248
2249                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2250                         if (vn == BP_E1HVN(bp))
2251                                 continue;
2252
2253                         func = ((vn << 1) | BP_PORT(bp));
2254
2255                         /* Set the attention towards other drivers
2256                            on the same port */
2257                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2258                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2259                 }
2260         }
2261
2262         if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2263                 struct cmng_struct_per_port m_cmng_port;
2264                 u32 wsum;
2265                 int port = BP_PORT(bp);
2266
2267                 /* Init RATE SHAPING and FAIRNESS contexts */
2268                 wsum = bnx2x_calc_vn_wsum(bp);
2269                 bnx2x_init_port_minmax(bp, (int)wsum,
2270                                         bp->link_vars.line_speed,
2271                                         &m_cmng_port);
2272                 if (IS_E1HMF(bp))
2273                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2274                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
2275                                         wsum, bp->link_vars.line_speed,
2276                                                      &m_cmng_port);
2277         }
2278 }
2279
2280 static void bnx2x__link_status_update(struct bnx2x *bp)
2281 {
2282         if (bp->state != BNX2X_STATE_OPEN)
2283                 return;
2284
2285         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2286
2287         if (bp->link_vars.link_up)
2288                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2289         else
2290                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2291
2292         /* indicate link status */
2293         bnx2x_link_report(bp);
2294 }
2295
2296 static void bnx2x_pmf_update(struct bnx2x *bp)
2297 {
2298         int port = BP_PORT(bp);
2299         u32 val;
2300
2301         bp->port.pmf = 1;
2302         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2303
2304         /* enable nig attention */
2305         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2306         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2307         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2308
2309         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2310 }
2311
2312 /* end of Link */
2313
2314 /* slow path */
2315
2316 /*
2317  * General service functions
2318  */
2319
2320 /* the slow path queue is odd since completions arrive on the fastpath ring */
2321 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2322                          u32 data_hi, u32 data_lo, int common)
2323 {
2324         int func = BP_FUNC(bp);
2325
2326         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2327            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2328            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2329            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2330            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2331
2332 #ifdef BNX2X_STOP_ON_ERROR
2333         if (unlikely(bp->panic))
2334                 return -EIO;
2335 #endif
2336
2337         spin_lock_bh(&bp->spq_lock);
2338
2339         if (!bp->spq_left) {
2340                 BNX2X_ERR("BUG! SPQ ring full!\n");
2341                 spin_unlock_bh(&bp->spq_lock);
2342                 bnx2x_panic();
2343                 return -EBUSY;
2344         }
2345
2346         /* CID needs port number to be encoded int it */
2347         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2348                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2349                                      HW_CID(bp, cid)));
2350         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2351         if (common)
2352                 bp->spq_prod_bd->hdr.type |=
2353                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2354
2355         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2356         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2357
2358         bp->spq_left--;
2359
2360         if (bp->spq_prod_bd == bp->spq_last_bd) {
2361                 bp->spq_prod_bd = bp->spq;
2362                 bp->spq_prod_idx = 0;
2363                 DP(NETIF_MSG_TIMER, "end of spq\n");
2364
2365         } else {
2366                 bp->spq_prod_bd++;
2367                 bp->spq_prod_idx++;
2368         }
2369
2370         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2371                bp->spq_prod_idx);
2372
2373         spin_unlock_bh(&bp->spq_lock);
2374         return 0;
2375 }
2376
2377 /* acquire split MCP access lock register */
2378 static int bnx2x_acquire_alr(struct bnx2x *bp)
2379 {
2380         u32 i, j, val;
2381         int rc = 0;
2382
2383         might_sleep();
2384         i = 100;
2385         for (j = 0; j < i*10; j++) {
2386                 val = (1UL << 31);
2387                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2388                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2389                 if (val & (1L << 31))
2390                         break;
2391
2392                 msleep(5);
2393         }
2394         if (!(val & (1L << 31))) {
2395                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2396                 rc = -EBUSY;
2397         }
2398
2399         return rc;
2400 }
2401
2402 /* release split MCP access lock register */
2403 static void bnx2x_release_alr(struct bnx2x *bp)
2404 {
2405         u32 val = 0;
2406
2407         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2408 }
2409
2410 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2411 {
2412         struct host_def_status_block *def_sb = bp->def_status_blk;
2413         u16 rc = 0;
2414
2415         barrier(); /* status block is written to by the chip */
2416         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2417                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2418                 rc |= 1;
2419         }
2420         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2421                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2422                 rc |= 2;
2423         }
2424         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2425                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2426                 rc |= 4;
2427         }
2428         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2429                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2430                 rc |= 8;
2431         }
2432         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2433                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2434                 rc |= 16;
2435         }
2436         return rc;
2437 }
2438
2439 /*
2440  * slow path service functions
2441  */
2442
2443 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2444 {
2445         int port = BP_PORT(bp);
2446         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2447                        COMMAND_REG_ATTN_BITS_SET);
2448         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2449                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2450         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2451                                        NIG_REG_MASK_INTERRUPT_PORT0;
2452         u32 aeu_mask;
2453
2454         if (bp->attn_state & asserted)
2455                 BNX2X_ERR("IGU ERROR\n");
2456
2457         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2458         aeu_mask = REG_RD(bp, aeu_addr);
2459
2460         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2461            aeu_mask, asserted);
2462         aeu_mask &= ~(asserted & 0xff);
2463         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2464
2465         REG_WR(bp, aeu_addr, aeu_mask);
2466         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2467
2468         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2469         bp->attn_state |= asserted;
2470         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2471
2472         if (asserted & ATTN_HARD_WIRED_MASK) {
2473                 if (asserted & ATTN_NIG_FOR_FUNC) {
2474
2475                         /* save nig interrupt mask */
2476                         bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2477                         REG_WR(bp, nig_int_mask_addr, 0);
2478
2479                         bnx2x_link_attn(bp);
2480
2481                         /* handle unicore attn? */
2482                 }
2483                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2484                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2485
2486                 if (asserted & GPIO_2_FUNC)
2487                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2488
2489                 if (asserted & GPIO_3_FUNC)
2490                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2491
2492                 if (asserted & GPIO_4_FUNC)
2493                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2494
2495                 if (port == 0) {
2496                         if (asserted & ATTN_GENERAL_ATTN_1) {
2497                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2498                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2499                         }
2500                         if (asserted & ATTN_GENERAL_ATTN_2) {
2501                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2502                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2503                         }
2504                         if (asserted & ATTN_GENERAL_ATTN_3) {
2505                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2506                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2507                         }
2508                 } else {
2509                         if (asserted & ATTN_GENERAL_ATTN_4) {
2510                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2511                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2512                         }
2513                         if (asserted & ATTN_GENERAL_ATTN_5) {
2514                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2515                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2516                         }
2517                         if (asserted & ATTN_GENERAL_ATTN_6) {
2518                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2519                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2520                         }
2521                 }
2522
2523         } /* if hardwired */
2524
2525         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2526            asserted, hc_addr);
2527         REG_WR(bp, hc_addr, asserted);
2528
2529         /* now set back the mask */
2530         if (asserted & ATTN_NIG_FOR_FUNC)
2531                 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2532 }
2533
2534 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2535 {
2536         int port = BP_PORT(bp);
2537         int reg_offset;
2538         u32 val;
2539
2540         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2541                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2542
2543         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2544
2545                 val = REG_RD(bp, reg_offset);
2546                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2547                 REG_WR(bp, reg_offset, val);
2548
2549                 BNX2X_ERR("SPIO5 hw attention\n");
2550
2551                 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2552                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
2553                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2554                         /* Fan failure attention */
2555
2556                         /* The PHY reset is controlled by GPIO 1 */
2557                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2558                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2559                         /* Low power mode is controlled by GPIO 2 */
2560                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2561                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2562                         /* mark the failure */
2563                         bp->link_params.ext_phy_config &=
2564                                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2565                         bp->link_params.ext_phy_config |=
2566                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2567                         SHMEM_WR(bp,
2568                                  dev_info.port_hw_config[port].
2569                                                         external_phy_config,
2570                                  bp->link_params.ext_phy_config);
2571                         /* log the failure */
2572                         printk(KERN_ERR PFX "Fan Failure on Network"
2573                                " Controller %s has caused the driver to"
2574                                " shutdown the card to prevent permanent"
2575                                " damage.  Please contact Dell Support for"
2576                                " assistance\n", bp->dev->name);
2577                         break;
2578
2579                 default:
2580                         break;
2581                 }
2582         }
2583
2584         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2585
2586                 val = REG_RD(bp, reg_offset);
2587                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2588                 REG_WR(bp, reg_offset, val);
2589
2590                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2591                           (attn & HW_INTERRUT_ASSERT_SET_0));
2592                 bnx2x_panic();
2593         }
2594 }
2595
2596 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2597 {
2598         u32 val;
2599
2600         if (attn & BNX2X_DOORQ_ASSERT) {
2601
2602                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2603                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2604                 /* DORQ discard attention */
2605                 if (val & 0x2)
2606                         BNX2X_ERR("FATAL error from DORQ\n");
2607         }
2608
2609         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2610
2611                 int port = BP_PORT(bp);
2612                 int reg_offset;
2613
2614                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2615                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2616
2617                 val = REG_RD(bp, reg_offset);
2618                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2619                 REG_WR(bp, reg_offset, val);
2620
2621                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2622                           (attn & HW_INTERRUT_ASSERT_SET_1));
2623                 bnx2x_panic();
2624         }
2625 }
2626
2627 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2628 {
2629         u32 val;
2630
2631         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2632
2633                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2634                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2635                 /* CFC error attention */
2636                 if (val & 0x2)
2637                         BNX2X_ERR("FATAL error from CFC\n");
2638         }
2639
2640         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2641
2642                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2643                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2644                 /* RQ_USDMDP_FIFO_OVERFLOW */
2645                 if (val & 0x18000)
2646                         BNX2X_ERR("FATAL error from PXP\n");
2647         }
2648
2649         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2650
2651                 int port = BP_PORT(bp);
2652                 int reg_offset;
2653
2654                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2655                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2656
2657                 val = REG_RD(bp, reg_offset);
2658                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2659                 REG_WR(bp, reg_offset, val);
2660
2661                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2662                           (attn & HW_INTERRUT_ASSERT_SET_2));
2663                 bnx2x_panic();
2664         }
2665 }
2666
2667 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2668 {
2669         u32 val;
2670
2671         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2672
2673                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2674                         int func = BP_FUNC(bp);
2675
2676                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2677                         bnx2x__link_status_update(bp);
2678                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2679                                                         DRV_STATUS_PMF)
2680                                 bnx2x_pmf_update(bp);
2681
2682                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2683
2684                         BNX2X_ERR("MC assert!\n");
2685                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2686                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2687                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2688                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2689                         bnx2x_panic();
2690
2691                 } else if (attn & BNX2X_MCP_ASSERT) {
2692
2693                         BNX2X_ERR("MCP assert!\n");
2694                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2695                         bnx2x_fw_dump(bp);
2696
2697                 } else
2698                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2699         }
2700
2701         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2702                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2703                 if (attn & BNX2X_GRC_TIMEOUT) {
2704                         val = CHIP_IS_E1H(bp) ?
2705                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2706                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2707                 }
2708                 if (attn & BNX2X_GRC_RSV) {
2709                         val = CHIP_IS_E1H(bp) ?
2710                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2711                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2712                 }
2713                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2714         }
2715 }
2716
2717 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2718 {
2719         struct attn_route attn;
2720         struct attn_route group_mask;
2721         int port = BP_PORT(bp);
2722         int index;
2723         u32 reg_addr;
2724         u32 val;
2725         u32 aeu_mask;
2726
2727         /* need to take HW lock because MCP or other port might also
2728            try to handle this event */
2729         bnx2x_acquire_alr(bp);
2730
2731         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2732         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2733         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2734         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2735         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2736            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2737
2738         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2739                 if (deasserted & (1 << index)) {
2740                         group_mask = bp->attn_group[index];
2741
2742                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2743                            index, group_mask.sig[0], group_mask.sig[1],
2744                            group_mask.sig[2], group_mask.sig[3]);
2745
2746                         bnx2x_attn_int_deasserted3(bp,
2747                                         attn.sig[3] & group_mask.sig[3]);
2748                         bnx2x_attn_int_deasserted1(bp,
2749                                         attn.sig[1] & group_mask.sig[1]);
2750                         bnx2x_attn_int_deasserted2(bp,
2751                                         attn.sig[2] & group_mask.sig[2]);
2752                         bnx2x_attn_int_deasserted0(bp,
2753                                         attn.sig[0] & group_mask.sig[0]);
2754
2755                         if ((attn.sig[0] & group_mask.sig[0] &
2756                                                 HW_PRTY_ASSERT_SET_0) ||
2757                             (attn.sig[1] & group_mask.sig[1] &
2758                                                 HW_PRTY_ASSERT_SET_1) ||
2759                             (attn.sig[2] & group_mask.sig[2] &
2760                                                 HW_PRTY_ASSERT_SET_2))
2761                                 BNX2X_ERR("FATAL HW block parity attention\n");
2762                 }
2763         }
2764
2765         bnx2x_release_alr(bp);
2766
2767         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2768
2769         val = ~deasserted;
2770         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2771            val, reg_addr);
2772         REG_WR(bp, reg_addr, val);
2773
2774         if (~bp->attn_state & deasserted)
2775                 BNX2X_ERR("IGU ERROR\n");
2776
2777         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2778                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2779
2780         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2781         aeu_mask = REG_RD(bp, reg_addr);
2782
2783         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2784            aeu_mask, deasserted);
2785         aeu_mask |= (deasserted & 0xff);
2786         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2787
2788         REG_WR(bp, reg_addr, aeu_mask);
2789         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2790
2791         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2792         bp->attn_state &= ~deasserted;
2793         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2794 }
2795
2796 static void bnx2x_attn_int(struct bnx2x *bp)
2797 {
2798         /* read local copy of bits */
2799         u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2800         u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2801         u32 attn_state = bp->attn_state;
2802
2803         /* look for changed bits */
2804         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2805         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2806
2807         DP(NETIF_MSG_HW,
2808            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2809            attn_bits, attn_ack, asserted, deasserted);
2810
2811         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2812                 BNX2X_ERR("BAD attention state\n");
2813
2814         /* handle bits that were raised */
2815         if (asserted)
2816                 bnx2x_attn_int_asserted(bp, asserted);
2817
2818         if (deasserted)
2819                 bnx2x_attn_int_deasserted(bp, deasserted);
2820 }
2821
2822 static void bnx2x_sp_task(struct work_struct *work)
2823 {
2824         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
2825         u16 status;
2826
2827
2828         /* Return here if interrupt is disabled */
2829         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2830                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2831                 return;
2832         }
2833
2834         status = bnx2x_update_dsb_idx(bp);
2835 /*      if (status == 0)                                     */
2836 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2837
2838         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2839
2840         /* HW attentions */
2841         if (status & 0x1)
2842                 bnx2x_attn_int(bp);
2843
2844         /* CStorm events: query_stats, port delete ramrod */
2845         if (status & 0x2)
2846                 bp->stats_pending = 0;
2847
2848         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2849                      IGU_INT_NOP, 1);
2850         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2851                      IGU_INT_NOP, 1);
2852         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2853                      IGU_INT_NOP, 1);
2854         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2855                      IGU_INT_NOP, 1);
2856         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2857                      IGU_INT_ENABLE, 1);
2858
2859 }
2860
2861 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2862 {
2863         struct net_device *dev = dev_instance;
2864         struct bnx2x *bp = netdev_priv(dev);
2865
2866         /* Return here if interrupt is disabled */
2867         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2868                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2869                 return IRQ_HANDLED;
2870         }
2871
2872         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2873
2874 #ifdef BNX2X_STOP_ON_ERROR
2875         if (unlikely(bp->panic))
2876                 return IRQ_HANDLED;
2877 #endif
2878
2879         schedule_work(&bp->sp_task);
2880
2881         return IRQ_HANDLED;
2882 }
2883
2884 /* end of slow path */
2885
2886 /* Statistics */
2887
2888 /****************************************************************************
2889 * Macros
2890 ****************************************************************************/
2891
2892 /* sum[hi:lo] += add[hi:lo] */
2893 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2894         do { \
2895                 s_lo += a_lo; \
2896                 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2897         } while (0)
2898
2899 /* difference = minuend - subtrahend */
2900 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2901         do { \
2902                 if (m_lo < s_lo) { \
2903                         /* underflow */ \
2904                         d_hi = m_hi - s_hi; \
2905                         if (d_hi > 0) { \
2906                                 /* we can 'loan' 1 */ \
2907                                 d_hi--; \
2908                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2909                         } else { \
2910                                 /* m_hi <= s_hi */ \
2911                                 d_hi = 0; \
2912                                 d_lo = 0; \
2913                         } \
2914                 } else { \
2915                         /* m_lo >= s_lo */ \
2916                         if (m_hi < s_hi) { \
2917                                 d_hi = 0; \
2918                                 d_lo = 0; \
2919                         } else { \
2920                                 /* m_hi >= s_hi */ \
2921                                 d_hi = m_hi - s_hi; \
2922                                 d_lo = m_lo - s_lo; \
2923                         } \
2924                 } \
2925         } while (0)
2926
2927 #define UPDATE_STAT64(s, t) \
2928         do { \
2929                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2930                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2931                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2932                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2933                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2934                        pstats->mac_stx[1].t##_lo, diff.lo); \
2935         } while (0)
2936
2937 #define UPDATE_STAT64_NIG(s, t) \
2938         do { \
2939                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2940                         diff.lo, new->s##_lo, old->s##_lo); \
2941                 ADD_64(estats->t##_hi, diff.hi, \
2942                        estats->t##_lo, diff.lo); \
2943         } while (0)
2944
2945 /* sum[hi:lo] += add */
2946 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2947         do { \
2948                 s_lo += a; \
2949                 s_hi += (s_lo < a) ? 1 : 0; \
2950         } while (0)
2951
2952 #define UPDATE_EXTEND_STAT(s) \
2953         do { \
2954                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2955                               pstats->mac_stx[1].s##_lo, \
2956                               new->s); \
2957         } while (0)
2958
2959 #define UPDATE_EXTEND_TSTAT(s, t) \
2960         do { \
2961                 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2962                 old_tclient->s = le32_to_cpu(tclient->s); \
2963                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2964         } while (0)
2965
2966 #define UPDATE_EXTEND_XSTAT(s, t) \
2967         do { \
2968                 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2969                 old_xclient->s = le32_to_cpu(xclient->s); \
2970                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2971         } while (0)
2972
2973 /*
2974  * General service functions
2975  */
2976
2977 static inline long bnx2x_hilo(u32 *hiref)
2978 {
2979         u32 lo = *(hiref + 1);
2980 #if (BITS_PER_LONG == 64)
2981         u32 hi = *hiref;
2982
2983         return HILO_U64(hi, lo);
2984 #else
2985         return lo;
2986 #endif
2987 }
2988
2989 /*
2990  * Init service functions
2991  */
2992
2993 static void bnx2x_storm_stats_post(struct bnx2x *bp)
2994 {
2995         if (!bp->stats_pending) {
2996                 struct eth_query_ramrod_data ramrod_data = {0};
2997                 int rc;
2998
2999                 ramrod_data.drv_counter = bp->stats_counter++;
3000                 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3001                 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3002
3003                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3004                                    ((u32 *)&ramrod_data)[1],
3005                                    ((u32 *)&ramrod_data)[0], 0);
3006                 if (rc == 0) {
3007                         /* stats ramrod has it's own slot on the spq */
3008                         bp->spq_left++;
3009                         bp->stats_pending = 1;
3010                 }
3011         }
3012 }
3013
3014 static void bnx2x_stats_init(struct bnx2x *bp)
3015 {
3016         int port = BP_PORT(bp);
3017
3018         bp->executer_idx = 0;
3019         bp->stats_counter = 0;
3020
3021         /* port stats */
3022         if (!BP_NOMCP(bp))
3023                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3024         else
3025                 bp->port.port_stx = 0;
3026         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3027
3028         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3029         bp->port.old_nig_stats.brb_discard =
3030                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3031         bp->port.old_nig_stats.brb_truncate =
3032                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3033         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3034                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3035         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3036                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3037
3038         /* function stats */
3039         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3040         memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3041         memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3042         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3043
3044         bp->stats_state = STATS_STATE_DISABLED;
3045         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3046                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3047 }
3048
3049 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3050 {
3051         struct dmae_command *dmae = &bp->stats_dmae;
3052         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3053
3054         *stats_comp = DMAE_COMP_VAL;
3055
3056         /* loader */
3057         if (bp->executer_idx) {
3058                 int loader_idx = PMF_DMAE_C(bp);
3059
3060                 memset(dmae, 0, sizeof(struct dmae_command));
3061
3062                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3063                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3064                                 DMAE_CMD_DST_RESET |
3065 #ifdef __BIG_ENDIAN
3066                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3067 #else
3068                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3069 #endif
3070                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3071                                                DMAE_CMD_PORT_0) |
3072                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3073                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3074                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3075                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3076                                      sizeof(struct dmae_command) *
3077                                      (loader_idx + 1)) >> 2;
3078                 dmae->dst_addr_hi = 0;
3079                 dmae->len = sizeof(struct dmae_command) >> 2;
3080                 if (CHIP_IS_E1(bp))
3081                         dmae->len--;
3082                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3083                 dmae->comp_addr_hi = 0;
3084                 dmae->comp_val = 1;
3085
3086                 *stats_comp = 0;
3087                 bnx2x_post_dmae(bp, dmae, loader_idx);
3088
3089         } else if (bp->func_stx) {
3090                 *stats_comp = 0;
3091                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3092         }
3093 }
3094
3095 static int bnx2x_stats_comp(struct bnx2x *bp)
3096 {
3097         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3098         int cnt = 10;
3099
3100         might_sleep();
3101         while (*stats_comp != DMAE_COMP_VAL) {
3102                 if (!cnt) {
3103                         BNX2X_ERR("timeout waiting for stats finished\n");
3104                         break;
3105                 }
3106                 cnt--;
3107                 msleep(1);
3108         }
3109         return 1;
3110 }
3111
3112 /*
3113  * Statistics service functions
3114  */
3115
3116 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3117 {
3118         struct dmae_command *dmae;
3119         u32 opcode;
3120         int loader_idx = PMF_DMAE_C(bp);
3121         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3122
3123         /* sanity */
3124         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3125                 BNX2X_ERR("BUG!\n");
3126                 return;
3127         }
3128
3129         bp->executer_idx = 0;
3130
3131         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3132                   DMAE_CMD_C_ENABLE |
3133                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3134 #ifdef __BIG_ENDIAN
3135                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3136 #else
3137                   DMAE_CMD_ENDIANITY_DW_SWAP |
3138 #endif
3139                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3140                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3141
3142         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3143         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3144         dmae->src_addr_lo = bp->port.port_stx >> 2;
3145         dmae->src_addr_hi = 0;
3146         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3147         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3148         dmae->len = DMAE_LEN32_RD_MAX;
3149         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3150         dmae->comp_addr_hi = 0;
3151         dmae->comp_val = 1;
3152
3153         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3154         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3155         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3156         dmae->src_addr_hi = 0;
3157         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3158                                    DMAE_LEN32_RD_MAX * 4);
3159         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3160                                    DMAE_LEN32_RD_MAX * 4);
3161         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3162         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3163         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3164         dmae->comp_val = DMAE_COMP_VAL;
3165
3166         *stats_comp = 0;
3167         bnx2x_hw_stats_post(bp);
3168         bnx2x_stats_comp(bp);
3169 }
3170
3171 static void bnx2x_port_stats_init(struct bnx2x *bp)
3172 {
3173         struct dmae_command *dmae;
3174         int port = BP_PORT(bp);
3175         int vn = BP_E1HVN(bp);
3176         u32 opcode;
3177         int loader_idx = PMF_DMAE_C(bp);
3178         u32 mac_addr;
3179         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3180
3181         /* sanity */
3182         if (!bp->link_vars.link_up || !bp->port.pmf) {
3183                 BNX2X_ERR("BUG!\n");
3184                 return;
3185         }
3186
3187         bp->executer_idx = 0;
3188
3189         /* MCP */
3190         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3191                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3192                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3193 #ifdef __BIG_ENDIAN
3194                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3195 #else
3196                   DMAE_CMD_ENDIANITY_DW_SWAP |
3197 #endif
3198                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3199                   (vn << DMAE_CMD_E1HVN_SHIFT));
3200
3201         if (bp->port.port_stx) {
3202
3203                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3204                 dmae->opcode = opcode;
3205                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3206                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3207                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3208                 dmae->dst_addr_hi = 0;
3209                 dmae->len = sizeof(struct host_port_stats) >> 2;
3210                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3211                 dmae->comp_addr_hi = 0;
3212                 dmae->comp_val = 1;
3213         }
3214
3215         if (bp->func_stx) {
3216
3217                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3218                 dmae->opcode = opcode;
3219                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3220                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3221                 dmae->dst_addr_lo = bp->func_stx >> 2;
3222                 dmae->dst_addr_hi = 0;
3223                 dmae->len = sizeof(struct host_func_stats) >> 2;
3224                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3225                 dmae->comp_addr_hi = 0;
3226                 dmae->comp_val = 1;
3227         }
3228
3229         /* MAC */
3230         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3231                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3232                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3233 #ifdef __BIG_ENDIAN
3234                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3235 #else
3236                   DMAE_CMD_ENDIANITY_DW_SWAP |
3237 #endif
3238                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3239                   (vn << DMAE_CMD_E1HVN_SHIFT));
3240
3241         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3242
3243                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3244                                    NIG_REG_INGRESS_BMAC0_MEM);
3245
3246                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3247                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3248                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3249                 dmae->opcode = opcode;
3250                 dmae->src_addr_lo = (mac_addr +
3251                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3252                 dmae->src_addr_hi = 0;
3253                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3254                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3255                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3256                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3257                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3258                 dmae->comp_addr_hi = 0;
3259                 dmae->comp_val = 1;
3260
3261                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3262                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3263                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3264                 dmae->opcode = opcode;
3265                 dmae->src_addr_lo = (mac_addr +
3266                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3267                 dmae->src_addr_hi = 0;
3268                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3269                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3270                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3271                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3272                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3273                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3274                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3275                 dmae->comp_addr_hi = 0;
3276                 dmae->comp_val = 1;
3277
3278         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3279
3280                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3281
3282                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3283                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3284                 dmae->opcode = opcode;
3285                 dmae->src_addr_lo = (mac_addr +
3286                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3287                 dmae->src_addr_hi = 0;
3288                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3289                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3290                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3291                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3292                 dmae->comp_addr_hi = 0;
3293                 dmae->comp_val = 1;
3294
3295                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3296                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3297                 dmae->opcode = opcode;
3298                 dmae->src_addr_lo = (mac_addr +
3299                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3300                 dmae->src_addr_hi = 0;
3301                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3302                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3303                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3304                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3305                 dmae->len = 1;
3306                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3307                 dmae->comp_addr_hi = 0;
3308                 dmae->comp_val = 1;
3309
3310                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3311                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3312                 dmae->opcode = opcode;
3313                 dmae->src_addr_lo = (mac_addr +
3314                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3315                 dmae->src_addr_hi = 0;
3316                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3317                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3318                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3319                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3320                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3321                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3322                 dmae->comp_addr_hi = 0;
3323                 dmae->comp_val = 1;
3324         }
3325
3326         /* NIG */
3327         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3328         dmae->opcode = opcode;
3329         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3330                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3331         dmae->src_addr_hi = 0;
3332         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3333         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3334         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3335         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3336         dmae->comp_addr_hi = 0;
3337         dmae->comp_val = 1;
3338
3339         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3340         dmae->opcode = opcode;
3341         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3342                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3343         dmae->src_addr_hi = 0;
3344         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3345                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3346         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3347                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3348         dmae->len = (2*sizeof(u32)) >> 2;
3349         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3350         dmae->comp_addr_hi = 0;
3351         dmae->comp_val = 1;
3352
3353         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3354         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3355                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3356                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3357 #ifdef __BIG_ENDIAN
3358                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3359 #else
3360                         DMAE_CMD_ENDIANITY_DW_SWAP |
3361 #endif
3362                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3363                         (vn << DMAE_CMD_E1HVN_SHIFT));
3364         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3365                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3366         dmae->src_addr_hi = 0;
3367         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3368                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3369         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3370                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3371         dmae->len = (2*sizeof(u32)) >> 2;
3372         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3373         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3374         dmae->comp_val = DMAE_COMP_VAL;
3375
3376         *stats_comp = 0;
3377 }
3378
3379 static void bnx2x_func_stats_init(struct bnx2x *bp)
3380 {
3381         struct dmae_command *dmae = &bp->stats_dmae;
3382         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3383
3384         /* sanity */
3385         if (!bp->func_stx) {
3386                 BNX2X_ERR("BUG!\n");
3387                 return;
3388         }
3389
3390         bp->executer_idx = 0;
3391         memset(dmae, 0, sizeof(struct dmae_command));
3392
3393         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3394                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3395                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3396 #ifdef __BIG_ENDIAN
3397                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3398 #else
3399                         DMAE_CMD_ENDIANITY_DW_SWAP |
3400 #endif
3401                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3402                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3403         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3404         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3405         dmae->dst_addr_lo = bp->func_stx >> 2;
3406         dmae->dst_addr_hi = 0;
3407         dmae->len = sizeof(struct host_func_stats) >> 2;
3408         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3409         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3410         dmae->comp_val = DMAE_COMP_VAL;
3411
3412         *stats_comp = 0;
3413 }
3414
3415 static void bnx2x_stats_start(struct bnx2x *bp)
3416 {
3417         if (bp->port.pmf)
3418                 bnx2x_port_stats_init(bp);
3419
3420         else if (bp->func_stx)
3421                 bnx2x_func_stats_init(bp);
3422
3423         bnx2x_hw_stats_post(bp);
3424         bnx2x_storm_stats_post(bp);
3425 }
3426
3427 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3428 {
3429         bnx2x_stats_comp(bp);
3430         bnx2x_stats_pmf_update(bp);
3431         bnx2x_stats_start(bp);
3432 }
3433
3434 static void bnx2x_stats_restart(struct bnx2x *bp)
3435 {
3436         bnx2x_stats_comp(bp);
3437         bnx2x_stats_start(bp);
3438 }
3439
3440 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3441 {
3442         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3443         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3444         struct regpair diff;
3445
3446         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3447         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3448         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3449         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3450         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3451         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3452         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3453         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3454         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3455         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3456         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3457         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3458         UPDATE_STAT64(tx_stat_gt127,
3459                                 tx_stat_etherstatspkts65octetsto127octets);
3460         UPDATE_STAT64(tx_stat_gt255,
3461                                 tx_stat_etherstatspkts128octetsto255octets);
3462         UPDATE_STAT64(tx_stat_gt511,
3463                                 tx_stat_etherstatspkts256octetsto511octets);
3464         UPDATE_STAT64(tx_stat_gt1023,
3465                                 tx_stat_etherstatspkts512octetsto1023octets);
3466         UPDATE_STAT64(tx_stat_gt1518,
3467                                 tx_stat_etherstatspkts1024octetsto1522octets);
3468         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3469         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3470         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3471         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3472         UPDATE_STAT64(tx_stat_gterr,
3473                                 tx_stat_dot3statsinternalmactransmiterrors);
3474         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3475 }
3476
3477 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3478 {
3479         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3480         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3481
3482         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3483         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3484         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3485         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3486         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3487         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3488         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3489         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3490         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3491         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3492         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3493         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3494         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3495         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3496         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3497         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3498         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3499         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3500         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3501         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3502         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3503         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3504         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3505         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3506         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3507         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3508         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3509         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3510         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3511         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3512         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3513 }
3514
3515 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3516 {
3517         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3518         struct nig_stats *old = &(bp->port.old_nig_stats);
3519         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3520         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3521         struct regpair diff;
3522
3523         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3524                 bnx2x_bmac_stats_update(bp);
3525
3526         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3527                 bnx2x_emac_stats_update(bp);
3528
3529         else { /* unreached */
3530                 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3531                 return -1;
3532         }
3533
3534         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3535                       new->brb_discard - old->brb_discard);
3536         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3537                       new->brb_truncate - old->brb_truncate);
3538
3539         UPDATE_STAT64_NIG(egress_mac_pkt0,
3540                                         etherstatspkts1024octetsto1522octets);
3541         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3542
3543         memcpy(old, new, sizeof(struct nig_stats));
3544
3545         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3546                sizeof(struct mac_stx));
3547         estats->brb_drop_hi = pstats->brb_drop_hi;
3548         estats->brb_drop_lo = pstats->brb_drop_lo;
3549
3550         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3551
3552         return 0;
3553 }
3554
3555 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3556 {
3557         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3558         int cl_id = BP_CL_ID(bp);
3559         struct tstorm_per_port_stats *tport =
3560                                 &stats->tstorm_common.port_statistics;
3561         struct tstorm_per_client_stats *tclient =
3562                         &stats->tstorm_common.client_statistics[cl_id];
3563         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3564         struct xstorm_per_client_stats *xclient =
3565                         &stats->xstorm_common.client_statistics[cl_id];
3566         struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3567         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3568         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3569         u32 diff;
3570
3571         /* are storm stats valid? */
3572         if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3573                                                         bp->stats_counter) {
3574                 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3575                    "  tstorm counter (%d) != stats_counter (%d)\n",
3576                    tclient->stats_counter, bp->stats_counter);
3577                 return -1;
3578         }
3579         if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3580                                                         bp->stats_counter) {
3581                 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3582                    "  xstorm counter (%d) != stats_counter (%d)\n",
3583                    xclient->stats_counter, bp->stats_counter);
3584                 return -2;
3585         }
3586
3587         fstats->total_bytes_received_hi =
3588         fstats->valid_bytes_received_hi =
3589                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3590         fstats->total_bytes_received_lo =
3591         fstats->valid_bytes_received_lo =
3592                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3593
3594         estats->error_bytes_received_hi =
3595                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3596         estats->error_bytes_received_lo =
3597                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3598         ADD_64(estats->error_bytes_received_hi,
3599                estats->rx_stat_ifhcinbadoctets_hi,
3600                estats->error_bytes_received_lo,
3601                estats->rx_stat_ifhcinbadoctets_lo);
3602
3603         ADD_64(fstats->total_bytes_received_hi,
3604                estats->error_bytes_received_hi,
3605                fstats->total_bytes_received_lo,
3606                estats->error_bytes_received_lo);
3607
3608         UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3609         UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3610                                 total_multicast_packets_received);
3611         UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3612                                 total_broadcast_packets_received);
3613
3614         fstats->total_bytes_transmitted_hi =
3615                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3616         fstats->total_bytes_transmitted_lo =
3617                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3618
3619         UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3620                                 total_unicast_packets_transmitted);
3621         UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3622                                 total_multicast_packets_transmitted);
3623         UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3624                                 total_broadcast_packets_transmitted);
3625
3626         memcpy(estats, &(fstats->total_bytes_received_hi),
3627                sizeof(struct host_func_stats) - 2*sizeof(u32));
3628
3629         estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3630         estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3631         estats->brb_truncate_discard =
3632                                 le32_to_cpu(tport->brb_truncate_discard);
3633         estats->mac_discard = le32_to_cpu(tport->mac_discard);
3634
3635         old_tclient->rcv_unicast_bytes.hi =
3636                                 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3637         old_tclient->rcv_unicast_bytes.lo =
3638                                 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3639         old_tclient->rcv_broadcast_bytes.hi =
3640                                 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3641         old_tclient->rcv_broadcast_bytes.lo =
3642                                 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3643         old_tclient->rcv_multicast_bytes.hi =
3644                                 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3645         old_tclient->rcv_multicast_bytes.lo =
3646                                 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3647         old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3648
3649         old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3650         old_tclient->packets_too_big_discard =
3651                                 le32_to_cpu(tclient->packets_too_big_discard);
3652         estats->no_buff_discard =
3653         old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3654         old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3655
3656         old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3657         old_xclient->unicast_bytes_sent.hi =
3658                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3659         old_xclient->unicast_bytes_sent.lo =
3660                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3661         old_xclient->multicast_bytes_sent.hi =
3662                                 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3663         old_xclient->multicast_bytes_sent.lo =
3664                                 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3665         old_xclient->broadcast_bytes_sent.hi =
3666                                 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3667         old_xclient->broadcast_bytes_sent.lo =
3668                                 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3669
3670         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3671
3672         return 0;
3673 }
3674
3675 static void bnx2x_net_stats_update(struct bnx2x *bp)
3676 {
3677         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3678         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3679         struct net_device_stats *nstats = &bp->dev->stats;
3680
3681         nstats->rx_packets =
3682                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3683                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3684                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3685
3686         nstats->tx_packets =
3687                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3688                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3689                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3690
3691         nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3692
3693         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3694
3695         nstats->rx_dropped = old_tclient->checksum_discard +
3696                              estats->mac_discard;
3697         nstats->tx_dropped = 0;
3698
3699         nstats->multicast =
3700                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3701
3702         nstats->collisions =
3703                         estats->tx_stat_dot3statssinglecollisionframes_lo +
3704                         estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3705                         estats->tx_stat_dot3statslatecollisions_lo +
3706                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3707
3708         estats->jabber_packets_received =
3709                                 old_tclient->packets_too_big_discard +
3710                                 estats->rx_stat_dot3statsframestoolong_lo;
3711
3712         nstats->rx_length_errors =
3713                                 estats->rx_stat_etherstatsundersizepkts_lo +
3714                                 estats->jabber_packets_received;
3715         nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3716         nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3717         nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3718         nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3719         nstats->rx_missed_errors = estats->xxoverflow_discard;
3720
3721         nstats->rx_errors = nstats->rx_length_errors +
3722                             nstats->rx_over_errors +
3723                             nstats->rx_crc_errors +
3724                             nstats->rx_frame_errors +
3725                             nstats->rx_fifo_errors +
3726                             nstats->rx_missed_errors;
3727
3728         nstats->tx_aborted_errors =
3729                         estats->tx_stat_dot3statslatecollisions_lo +
3730                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3731         nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3732         nstats->tx_fifo_errors = 0;
3733         nstats->tx_heartbeat_errors = 0;
3734         nstats->tx_window_errors = 0;
3735
3736         nstats->tx_errors = nstats->tx_aborted_errors +
3737                             nstats->tx_carrier_errors;
3738 }
3739
3740 static void bnx2x_stats_update(struct bnx2x *bp)
3741 {
3742         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3743         int update = 0;
3744
3745         if (*stats_comp != DMAE_COMP_VAL)
3746                 return;
3747
3748         if (bp->port.pmf)
3749                 update = (bnx2x_hw_stats_update(bp) == 0);
3750
3751         update |= (bnx2x_storm_stats_update(bp) == 0);
3752
3753         if (update)
3754                 bnx2x_net_stats_update(bp);
3755
3756         else {
3757                 if (bp->stats_pending) {
3758                         bp->stats_pending++;
3759                         if (bp->stats_pending == 3) {
3760                                 BNX2X_ERR("stats not updated for 3 times\n");
3761                                 bnx2x_panic();
3762                                 return;
3763                         }
3764                 }
3765         }
3766
3767         if (bp->msglevel & NETIF_MSG_TIMER) {
3768                 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3769                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3770                 struct net_device_stats *nstats = &bp->dev->stats;
3771                 int i;
3772
3773                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3774                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
3775                                   "  tx pkt (%lx)\n",
3776                        bnx2x_tx_avail(bp->fp),
3777                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3778                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
3779                                   "  rx pkt (%lx)\n",
3780                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3781                              bp->fp->rx_comp_cons),
3782                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3783                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u\n",
3784                        netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
3785                        estats->driver_xoff, estats->brb_drop_lo);
3786                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
3787                         "packets_too_big_discard %u  no_buff_discard %u  "
3788                         "mac_discard %u  mac_filter_discard %u  "
3789                         "xxovrflow_discard %u  brb_truncate_discard %u  "
3790                         "ttl0_discard %u\n",
3791                        old_tclient->checksum_discard,
3792                        old_tclient->packets_too_big_discard,
3793                        old_tclient->no_buff_discard, estats->mac_discard,
3794                        estats->mac_filter_discard, estats->xxoverflow_discard,
3795                        estats->brb_truncate_discard,
3796                        old_tclient->ttl0_discard);
3797
3798                 for_each_queue(bp, i) {
3799                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3800                                bnx2x_fp(bp, i, tx_pkt),
3801                                bnx2x_fp(bp, i, rx_pkt),
3802                                bnx2x_fp(bp, i, rx_calls));
3803                 }
3804         }
3805
3806         bnx2x_hw_stats_post(bp);
3807         bnx2x_storm_stats_post(bp);
3808 }
3809
3810 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3811 {
3812         struct dmae_command *dmae;
3813         u32 opcode;
3814         int loader_idx = PMF_DMAE_C(bp);
3815         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3816
3817         bp->executer_idx = 0;
3818
3819         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3820                   DMAE_CMD_C_ENABLE |
3821                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3822 #ifdef __BIG_ENDIAN
3823                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3824 #else
3825                   DMAE_CMD_ENDIANITY_DW_SWAP |
3826 #endif
3827                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3828                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3829
3830         if (bp->port.port_stx) {
3831
3832                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3833                 if (bp->func_stx)
3834                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3835                 else
3836                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3837                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3838                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3839                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3840                 dmae->dst_addr_hi = 0;
3841                 dmae->len = sizeof(struct host_port_stats) >> 2;
3842                 if (bp->func_stx) {
3843                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3844                         dmae->comp_addr_hi = 0;
3845                         dmae->comp_val = 1;
3846                 } else {
3847                         dmae->comp_addr_lo =
3848                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3849                         dmae->comp_addr_hi =
3850                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3851                         dmae->comp_val = DMAE_COMP_VAL;
3852
3853                         *stats_comp = 0;
3854                 }
3855         }
3856
3857         if (bp->func_stx) {
3858
3859                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3860                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3861                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3862                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3863                 dmae->dst_addr_lo = bp->func_stx >> 2;
3864                 dmae->dst_addr_hi = 0;
3865                 dmae->len = sizeof(struct host_func_stats) >> 2;
3866                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3867                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3868                 dmae->comp_val = DMAE_COMP_VAL;
3869
3870                 *stats_comp = 0;
3871         }
3872 }
3873
3874 static void bnx2x_stats_stop(struct bnx2x *bp)
3875 {
3876         int update = 0;
3877
3878         bnx2x_stats_comp(bp);
3879
3880         if (bp->port.pmf)
3881                 update = (bnx2x_hw_stats_update(bp) == 0);
3882
3883         update |= (bnx2x_storm_stats_update(bp) == 0);
3884
3885         if (update) {
3886                 bnx2x_net_stats_update(bp);
3887
3888                 if (bp->port.pmf)
3889                         bnx2x_port_stats_stop(bp);
3890
3891                 bnx2x_hw_stats_post(bp);
3892                 bnx2x_stats_comp(bp);
3893         }
3894 }
3895
3896 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3897 {
3898 }
3899
3900 static const struct {
3901         void (*action)(struct bnx2x *bp);
3902         enum bnx2x_stats_state next_state;
3903 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3904 /* state        event   */
3905 {
3906 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3907 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
3908 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3909 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3910 },
3911 {
3912 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
3913 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
3914 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
3915 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
3916 }
3917 };
3918
3919 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3920 {
3921         enum bnx2x_stats_state state = bp->stats_state;
3922
3923         bnx2x_stats_stm[state][event].action(bp);
3924         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3925
3926         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3927                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3928                    state, event, bp->stats_state);
3929 }
3930
3931 static void bnx2x_timer(unsigned long data)
3932 {
3933         struct bnx2x *bp = (struct bnx2x *) data;
3934
3935         if (!netif_running(bp->dev))
3936                 return;
3937
3938         if (atomic_read(&bp->intr_sem) != 0)
3939                 goto timer_restart;
3940
3941         if (poll) {
3942                 struct bnx2x_fastpath *fp = &bp->fp[0];
3943                 int rc;
3944
3945                 bnx2x_tx_int(fp, 1000);
3946                 rc = bnx2x_rx_int(fp, 1000);
3947         }
3948
3949         if (!BP_NOMCP(bp)) {
3950                 int func = BP_FUNC(bp);
3951                 u32 drv_pulse;
3952                 u32 mcp_pulse;
3953
3954                 ++bp->fw_drv_pulse_wr_seq;
3955                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3956                 /* TBD - add SYSTEM_TIME */
3957                 drv_pulse = bp->fw_drv_pulse_wr_seq;
3958                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
3959
3960                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
3961                              MCP_PULSE_SEQ_MASK);
3962                 /* The delta between driver pulse and mcp response
3963                  * should be 1 (before mcp response) or 0 (after mcp response)
3964                  */
3965                 if ((drv_pulse != mcp_pulse) &&
3966                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3967                         /* someone lost a heartbeat... */
3968                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3969                                   drv_pulse, mcp_pulse);
3970                 }
3971         }
3972
3973         if ((bp->state == BNX2X_STATE_OPEN) ||
3974             (bp->state == BNX2X_STATE_DISABLED))
3975                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
3976
3977 timer_restart:
3978         mod_timer(&bp->timer, jiffies + bp->current_interval);
3979 }
3980
3981 /* end of Statistics */
3982
3983 /* nic init */
3984
3985 /*
3986  * nic init service functions
3987  */
3988
3989 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
3990 {
3991         int port = BP_PORT(bp);
3992
3993         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3994                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3995                         sizeof(struct ustorm_status_block)/4);
3996         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3997                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3998                         sizeof(struct cstorm_status_block)/4);
3999 }
4000
4001 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4002                           dma_addr_t mapping, int sb_id)
4003 {
4004         int port = BP_PORT(bp);
4005         int func = BP_FUNC(bp);
4006         int index;
4007         u64 section;
4008
4009         /* USTORM */
4010         section = ((u64)mapping) + offsetof(struct host_status_block,
4011                                             u_status_block);
4012         sb->u_status_block.status_block_id = sb_id;
4013
4014         REG_WR(bp, BAR_USTRORM_INTMEM +
4015                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4016         REG_WR(bp, BAR_USTRORM_INTMEM +
4017                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4018                U64_HI(section));
4019         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4020                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4021
4022         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4023                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4024                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4025
4026         /* CSTORM */
4027         section = ((u64)mapping) + offsetof(struct host_status_block,
4028                                             c_status_block);
4029         sb->c_status_block.status_block_id = sb_id;
4030
4031         REG_WR(bp, BAR_CSTRORM_INTMEM +
4032                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4033         REG_WR(bp, BAR_CSTRORM_INTMEM +
4034                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4035                U64_HI(section));
4036         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4037                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4038
4039         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4040                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4041                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4042
4043         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4044 }
4045
4046 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4047 {
4048         int func = BP_FUNC(bp);
4049
4050         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4051                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4052                         sizeof(struct ustorm_def_status_block)/4);
4053         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4054                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4055                         sizeof(struct cstorm_def_status_block)/4);
4056         bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4057                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4058                         sizeof(struct xstorm_def_status_block)/4);
4059         bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4060                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4061                         sizeof(struct tstorm_def_status_block)/4);
4062 }
4063
4064 static void bnx2x_init_def_sb(struct bnx2x *bp,
4065                               struct host_def_status_block *def_sb,
4066                               dma_addr_t mapping, int sb_id)
4067 {
4068         int port = BP_PORT(bp);
4069         int func = BP_FUNC(bp);
4070         int index, val, reg_offset;
4071         u64 section;
4072
4073         /* ATTN */
4074         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4075                                             atten_status_block);
4076         def_sb->atten_status_block.status_block_id = sb_id;
4077
4078         bp->attn_state = 0;
4079
4080         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4081                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4082
4083         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4084                 bp->attn_group[index].sig[0] = REG_RD(bp,
4085                                                      reg_offset + 0x10*index);
4086                 bp->attn_group[index].sig[1] = REG_RD(bp,
4087                                                reg_offset + 0x4 + 0x10*index);
4088                 bp->attn_group[index].sig[2] = REG_RD(bp,
4089                                                reg_offset + 0x8 + 0x10*index);
4090                 bp->attn_group[index].sig[3] = REG_RD(bp,
4091                                                reg_offset + 0xc + 0x10*index);
4092         }
4093
4094         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4095                              HC_REG_ATTN_MSG0_ADDR_L);
4096
4097         REG_WR(bp, reg_offset, U64_LO(section));
4098         REG_WR(bp, reg_offset + 4, U64_HI(section));
4099
4100         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4101
4102         val = REG_RD(bp, reg_offset);
4103         val |= sb_id;
4104         REG_WR(bp, reg_offset, val);
4105
4106         /* USTORM */
4107         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4108                                             u_def_status_block);
4109         def_sb->u_def_status_block.status_block_id = sb_id;
4110
4111         REG_WR(bp, BAR_USTRORM_INTMEM +
4112                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4113         REG_WR(bp, BAR_USTRORM_INTMEM +
4114                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4115                U64_HI(section));
4116         REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4117                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4118
4119         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4120                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4121                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4122
4123         /* CSTORM */
4124         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4125                                             c_def_status_block);
4126         def_sb->c_def_status_block.status_block_id = sb_id;
4127
4128         REG_WR(bp, BAR_CSTRORM_INTMEM +
4129                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4130         REG_WR(bp, BAR_CSTRORM_INTMEM +
4131                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4132                U64_HI(section));
4133         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4134                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4135
4136         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4137                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4138                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4139
4140         /* TSTORM */
4141         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4142                                             t_def_status_block);
4143         def_sb->t_def_status_block.status_block_id = sb_id;
4144
4145         REG_WR(bp, BAR_TSTRORM_INTMEM +
4146                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4147         REG_WR(bp, BAR_TSTRORM_INTMEM +
4148                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4149                U64_HI(section));
4150         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4151                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4152
4153         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4154                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4155                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4156
4157         /* XSTORM */
4158         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4159                                             x_def_status_block);
4160         def_sb->x_def_status_block.status_block_id = sb_id;
4161
4162         REG_WR(bp, BAR_XSTRORM_INTMEM +
4163                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4164         REG_WR(bp, BAR_XSTRORM_INTMEM +
4165                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4166                U64_HI(section));
4167         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4168                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4169
4170         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4171                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4172                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4173
4174         bp->stats_pending = 0;
4175         bp->set_mac_pending = 0;
4176
4177         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4178 }
4179
4180 static void bnx2x_update_coalesce(struct bnx2x *bp)
4181 {
4182         int port = BP_PORT(bp);
4183         int i;
4184
4185         for_each_queue(bp, i) {
4186                 int sb_id = bp->fp[i].sb_id;
4187
4188                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4189                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4190                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4191                                                     U_SB_ETH_RX_CQ_INDEX),
4192                         bp->rx_ticks/12);
4193                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4194                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4195                                                      U_SB_ETH_RX_CQ_INDEX),
4196                          bp->rx_ticks ? 0 : 1);
4197                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4198                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4199                                                      U_SB_ETH_RX_BD_INDEX),
4200                          bp->rx_ticks ? 0 : 1);
4201
4202                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4203                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4204                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4205                                                     C_SB_ETH_TX_CQ_INDEX),
4206                         bp->tx_ticks/12);
4207                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4208                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4209                                                      C_SB_ETH_TX_CQ_INDEX),
4210                          bp->tx_ticks ? 0 : 1);
4211         }
4212 }
4213
4214 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4215                                        struct bnx2x_fastpath *fp, int last)
4216 {
4217         int i;
4218
4219         for (i = 0; i < last; i++) {
4220                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4221                 struct sk_buff *skb = rx_buf->skb;
4222
4223                 if (skb == NULL) {
4224                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4225                         continue;
4226                 }
4227
4228                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4229                         pci_unmap_single(bp->pdev,
4230                                          pci_unmap_addr(rx_buf, mapping),
4231                                          bp->rx_buf_size,
4232                                          PCI_DMA_FROMDEVICE);
4233
4234                 dev_kfree_skb(skb);
4235                 rx_buf->skb = NULL;
4236         }
4237 }
4238
4239 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4240 {
4241         int func = BP_FUNC(bp);
4242         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4243                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4244         u16 ring_prod, cqe_ring_prod;
4245         int i, j;
4246
4247         bp->rx_buf_size = bp->dev->mtu;
4248         bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4249                 BCM_RX_ETH_PAYLOAD_ALIGN;
4250
4251         if (bp->flags & TPA_ENABLE_FLAG) {
4252                 DP(NETIF_MSG_IFUP,
4253                    "rx_buf_size %d  effective_mtu %d\n",
4254                    bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
4255
4256                 for_each_queue(bp, j) {
4257                         struct bnx2x_fastpath *fp = &bp->fp[j];
4258
4259                         for (i = 0; i < max_agg_queues; i++) {
4260                                 fp->tpa_pool[i].skb =
4261                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4262                                 if (!fp->tpa_pool[i].skb) {
4263                                         BNX2X_ERR("Failed to allocate TPA "
4264                                                   "skb pool for queue[%d] - "
4265                                                   "disabling TPA on this "
4266                                                   "queue!\n", j);
4267                                         bnx2x_free_tpa_pool(bp, fp, i);
4268                                         fp->disable_tpa = 1;
4269                                         break;
4270                                 }
4271                                 pci_unmap_addr_set((struct sw_rx_bd *)
4272                                                         &bp->fp->tpa_pool[i],
4273                                                    mapping, 0);
4274                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4275                         }
4276                 }
4277         }
4278
4279         for_each_queue(bp, j) {
4280                 struct bnx2x_fastpath *fp = &bp->fp[j];
4281
4282                 fp->rx_bd_cons = 0;
4283                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4284                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4285
4286                 /* "next page" elements initialization */
4287                 /* SGE ring */
4288                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4289                         struct eth_rx_sge *sge;
4290
4291                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4292                         sge->addr_hi =
4293                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4294                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4295                         sge->addr_lo =
4296                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4297                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4298                 }
4299
4300                 bnx2x_init_sge_ring_bit_mask(fp);
4301
4302                 /* RX BD ring */
4303                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4304                         struct eth_rx_bd *rx_bd;
4305
4306                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4307                         rx_bd->addr_hi =
4308                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4309                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4310                         rx_bd->addr_lo =
4311                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4312                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4313                 }
4314
4315                 /* CQ ring */
4316                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4317                         struct eth_rx_cqe_next_page *nextpg;
4318
4319                         nextpg = (struct eth_rx_cqe_next_page *)
4320                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4321                         nextpg->addr_hi =
4322                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4323                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4324                         nextpg->addr_lo =
4325                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4326                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4327                 }
4328
4329                 /* Allocate SGEs and initialize the ring elements */
4330                 for (i = 0, ring_prod = 0;
4331                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4332
4333                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4334                                 BNX2X_ERR("was only able to allocate "
4335                                           "%d rx sges\n", i);
4336                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4337                                 /* Cleanup already allocated elements */
4338                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4339                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4340                                 fp->disable_tpa = 1;
4341                                 ring_prod = 0;
4342                                 break;
4343                         }
4344                         ring_prod = NEXT_SGE_IDX(ring_prod);
4345                 }
4346                 fp->rx_sge_prod = ring_prod;
4347
4348                 /* Allocate BDs and initialize BD ring */
4349                 fp->rx_comp_cons = 0;
4350                 cqe_ring_prod = ring_prod = 0;
4351                 for (i = 0; i < bp->rx_ring_size; i++) {
4352                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4353                                 BNX2X_ERR("was only able to allocate "
4354                                           "%d rx skbs\n", i);
4355                                 bp->eth_stats.rx_skb_alloc_failed++;
4356                                 break;
4357                         }
4358                         ring_prod = NEXT_RX_IDX(ring_prod);
4359                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4360                         WARN_ON(ring_prod <= i);
4361                 }
4362
4363                 fp->rx_bd_prod = ring_prod;
4364                 /* must not have more available CQEs than BDs */
4365                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4366                                        cqe_ring_prod);
4367                 fp->rx_pkt = fp->rx_calls = 0;
4368
4369                 /* Warning!
4370                  * this will generate an interrupt (to the TSTORM)
4371                  * must only be done after chip is initialized
4372                  */
4373                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4374                                      fp->rx_sge_prod);
4375                 if (j != 0)
4376                         continue;
4377
4378                 REG_WR(bp, BAR_USTRORM_INTMEM +
4379                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4380                        U64_LO(fp->rx_comp_mapping));
4381                 REG_WR(bp, BAR_USTRORM_INTMEM +
4382                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4383                        U64_HI(fp->rx_comp_mapping));
4384         }
4385 }
4386
4387 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4388 {
4389         int i, j;
4390
4391         for_each_queue(bp, j) {
4392                 struct bnx2x_fastpath *fp = &bp->fp[j];
4393
4394                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4395                         struct eth_tx_bd *tx_bd =
4396                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4397
4398                         tx_bd->addr_hi =
4399                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4400                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4401                         tx_bd->addr_lo =
4402                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4403                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4404                 }
4405
4406                 fp->tx_pkt_prod = 0;
4407                 fp->tx_pkt_cons = 0;
4408                 fp->tx_bd_prod = 0;
4409                 fp->tx_bd_cons = 0;
4410                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4411                 fp->tx_pkt = 0;
4412         }
4413 }
4414
4415 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4416 {
4417         int func = BP_FUNC(bp);
4418
4419         spin_lock_init(&bp->spq_lock);
4420
4421         bp->spq_left = MAX_SPQ_PENDING;
4422         bp->spq_prod_idx = 0;
4423         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4424         bp->spq_prod_bd = bp->spq;
4425         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4426
4427         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4428                U64_LO(bp->spq_mapping));
4429         REG_WR(bp,
4430                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4431                U64_HI(bp->spq_mapping));
4432
4433         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4434                bp->spq_prod_idx);
4435 }
4436
4437 static void bnx2x_init_context(struct bnx2x *bp)
4438 {
4439         int i;
4440
4441         for_each_queue(bp, i) {
4442                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4443                 struct bnx2x_fastpath *fp = &bp->fp[i];
4444                 u8 sb_id = FP_SB_ID(fp);
4445
4446                 context->xstorm_st_context.tx_bd_page_base_hi =
4447                                                 U64_HI(fp->tx_desc_mapping);
4448                 context->xstorm_st_context.tx_bd_page_base_lo =
4449                                                 U64_LO(fp->tx_desc_mapping);
4450                 context->xstorm_st_context.db_data_addr_hi =
4451                                                 U64_HI(fp->tx_prods_mapping);
4452                 context->xstorm_st_context.db_data_addr_lo =
4453                                                 U64_LO(fp->tx_prods_mapping);
4454                 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4455                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4456
4457                 context->ustorm_st_context.common.sb_index_numbers =
4458                                                 BNX2X_RX_SB_INDEX_NUM;
4459                 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4460                 context->ustorm_st_context.common.status_block_id = sb_id;
4461                 context->ustorm_st_context.common.flags =
4462                         USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4463                 context->ustorm_st_context.common.mc_alignment_size =
4464                         BCM_RX_ETH_PAYLOAD_ALIGN;
4465                 context->ustorm_st_context.common.bd_buff_size =
4466                                                 bp->rx_buf_size;
4467                 context->ustorm_st_context.common.bd_page_base_hi =
4468                                                 U64_HI(fp->rx_desc_mapping);
4469                 context->ustorm_st_context.common.bd_page_base_lo =
4470                                                 U64_LO(fp->rx_desc_mapping);
4471                 if (!fp->disable_tpa) {
4472                         context->ustorm_st_context.common.flags |=
4473                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4474                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4475                         context->ustorm_st_context.common.sge_buff_size =
4476                                         (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4477                         context->ustorm_st_context.common.sge_page_base_hi =
4478                                                 U64_HI(fp->rx_sge_mapping);
4479                         context->ustorm_st_context.common.sge_page_base_lo =
4480                                                 U64_LO(fp->rx_sge_mapping);
4481                 }
4482
4483                 context->cstorm_st_context.sb_index_number =
4484                                                 C_SB_ETH_TX_CQ_INDEX;
4485                 context->cstorm_st_context.status_block_id = sb_id;
4486
4487                 context->xstorm_ag_context.cdu_reserved =
4488                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4489                                                CDU_REGION_NUMBER_XCM_AG,
4490                                                ETH_CONNECTION_TYPE);
4491                 context->ustorm_ag_context.cdu_usage =
4492                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4493                                                CDU_REGION_NUMBER_UCM_AG,
4494                                                ETH_CONNECTION_TYPE);
4495         }
4496 }
4497
4498 static void bnx2x_init_ind_table(struct bnx2x *bp)
4499 {
4500         int port = BP_PORT(bp);
4501         int i;
4502
4503         if (!is_multi(bp))
4504                 return;
4505
4506         DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4507         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4508                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4509                         TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
4510                         i % bp->num_queues);
4511
4512         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4513 }
4514
4515 static void bnx2x_set_client_config(struct bnx2x *bp)
4516 {
4517         struct tstorm_eth_client_config tstorm_client = {0};
4518         int port = BP_PORT(bp);
4519         int i;
4520
4521         tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
4522         tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4523         tstorm_client.config_flags =
4524                                 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4525 #ifdef BCM_VLAN
4526         if (bp->rx_mode && bp->vlgrp) {
4527                 tstorm_client.config_flags |=
4528                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4529                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4530         }
4531 #endif
4532
4533         if (bp->flags & TPA_ENABLE_FLAG) {
4534                 tstorm_client.max_sges_for_packet =
4535                         BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
4536                 tstorm_client.max_sges_for_packet =
4537                         ((tstorm_client.max_sges_for_packet +
4538                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4539                         PAGES_PER_SGE_SHIFT;
4540
4541                 tstorm_client.config_flags |=
4542                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4543         }
4544
4545         for_each_queue(bp, i) {
4546                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4547                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4548                        ((u32 *)&tstorm_client)[0]);
4549                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4550                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4551                        ((u32 *)&tstorm_client)[1]);
4552         }
4553
4554         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4555            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4556 }
4557
4558 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4559 {
4560         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4561         int mode = bp->rx_mode;
4562         int mask = (1 << BP_L_ID(bp));
4563         int func = BP_FUNC(bp);
4564         int i;
4565
4566         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
4567
4568         switch (mode) {
4569         case BNX2X_RX_MODE_NONE: /* no Rx */
4570                 tstorm_mac_filter.ucast_drop_all = mask;
4571                 tstorm_mac_filter.mcast_drop_all = mask;
4572                 tstorm_mac_filter.bcast_drop_all = mask;
4573                 break;
4574         case BNX2X_RX_MODE_NORMAL:
4575                 tstorm_mac_filter.bcast_accept_all = mask;
4576                 break;
4577         case BNX2X_RX_MODE_ALLMULTI:
4578                 tstorm_mac_filter.mcast_accept_all = mask;
4579                 tstorm_mac_filter.bcast_accept_all = mask;
4580                 break;
4581         case BNX2X_RX_MODE_PROMISC:
4582                 tstorm_mac_filter.ucast_accept_all = mask;
4583                 tstorm_mac_filter.mcast_accept_all = mask;
4584                 tstorm_mac_filter.bcast_accept_all = mask;
4585                 break;
4586         default:
4587                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4588                 break;
4589         }
4590
4591         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4592                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4593                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4594                        ((u32 *)&tstorm_mac_filter)[i]);
4595
4596 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4597                    ((u32 *)&tstorm_mac_filter)[i]); */
4598         }
4599
4600         if (mode != BNX2X_RX_MODE_NONE)
4601                 bnx2x_set_client_config(bp);
4602 }
4603
4604 static void bnx2x_init_internal_common(struct bnx2x *bp)
4605 {
4606         int i;
4607
4608         if (bp->flags & TPA_ENABLE_FLAG) {
4609                 struct tstorm_eth_tpa_exist tpa = {0};
4610
4611                 tpa.tpa_exist = 1;
4612
4613                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4614                        ((u32 *)&tpa)[0]);
4615                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4616                        ((u32 *)&tpa)[1]);
4617         }
4618
4619         /* Zero this manually as its initialization is
4620            currently missing in the initTool */
4621         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4622                 REG_WR(bp, BAR_USTRORM_INTMEM +
4623                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4624 }
4625
4626 static void bnx2x_init_internal_port(struct bnx2x *bp)
4627 {
4628         int port = BP_PORT(bp);
4629
4630         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4631         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4632         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4633         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4634 }
4635
4636 static void bnx2x_init_internal_func(struct bnx2x *bp)
4637 {
4638         struct tstorm_eth_function_common_config tstorm_config = {0};
4639         struct stats_indication_flags stats_flags = {0};
4640         int port = BP_PORT(bp);
4641         int func = BP_FUNC(bp);
4642         int i;
4643         u16 max_agg_size;
4644
4645         if (is_multi(bp)) {
4646                 tstorm_config.config_flags = MULTI_FLAGS;
4647                 tstorm_config.rss_result_mask = MULTI_MASK;
4648         }
4649
4650         tstorm_config.leading_client_id = BP_L_ID(bp);
4651
4652         REG_WR(bp, BAR_TSTRORM_INTMEM +
4653                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4654                (*(u32 *)&tstorm_config));
4655
4656         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4657         bnx2x_set_storm_rx_mode(bp);
4658
4659         /* reset xstorm per client statistics */
4660         for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4661                 REG_WR(bp, BAR_XSTRORM_INTMEM +
4662                        XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4663                        i*4, 0);
4664         }
4665         /* reset tstorm per client statistics */
4666         for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4667                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4668                        TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4669                        i*4, 0);
4670         }
4671
4672         /* Init statistics related context */
4673         stats_flags.collect_eth = 1;
4674
4675         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4676                ((u32 *)&stats_flags)[0]);
4677         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4678                ((u32 *)&stats_flags)[1]);
4679
4680         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4681                ((u32 *)&stats_flags)[0]);
4682         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4683                ((u32 *)&stats_flags)[1]);
4684
4685         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4686                ((u32 *)&stats_flags)[0]);
4687         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4688                ((u32 *)&stats_flags)[1]);
4689
4690         REG_WR(bp, BAR_XSTRORM_INTMEM +
4691                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4692                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4693         REG_WR(bp, BAR_XSTRORM_INTMEM +
4694                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4695                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4696
4697         REG_WR(bp, BAR_TSTRORM_INTMEM +
4698                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4699                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4700         REG_WR(bp, BAR_TSTRORM_INTMEM +
4701                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4702                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4703
4704         if (CHIP_IS_E1H(bp)) {
4705                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4706                         IS_E1HMF(bp));
4707                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4708                         IS_E1HMF(bp));
4709                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4710                         IS_E1HMF(bp));
4711                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4712                         IS_E1HMF(bp));
4713
4714                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4715                          bp->e1hov);
4716         }
4717
4718         /* Init CQ ring mapping and aggregation size */
4719         max_agg_size = min((u32)(bp->rx_buf_size +
4720                                  8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4721                            (u32)0xffff);
4722         for_each_queue(bp, i) {
4723                 struct bnx2x_fastpath *fp = &bp->fp[i];
4724
4725                 REG_WR(bp, BAR_USTRORM_INTMEM +
4726                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4727                        U64_LO(fp->rx_comp_mapping));
4728                 REG_WR(bp, BAR_USTRORM_INTMEM +
4729                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4730                        U64_HI(fp->rx_comp_mapping));
4731
4732                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4733                          USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4734                          max_agg_size);
4735         }
4736 }
4737
4738 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4739 {
4740         switch (load_code) {
4741         case FW_MSG_CODE_DRV_LOAD_COMMON:
4742                 bnx2x_init_internal_common(bp);
4743                 /* no break */
4744
4745         case FW_MSG_CODE_DRV_LOAD_PORT:
4746                 bnx2x_init_internal_port(bp);
4747                 /* no break */
4748
4749         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4750                 bnx2x_init_internal_func(bp);
4751                 break;
4752
4753         default:
4754                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4755                 break;
4756         }
4757 }
4758
4759 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4760 {
4761         int i;
4762
4763         for_each_queue(bp, i) {
4764                 struct bnx2x_fastpath *fp = &bp->fp[i];
4765
4766                 fp->bp = bp;
4767                 fp->state = BNX2X_FP_STATE_CLOSED;
4768                 fp->index = i;
4769                 fp->cl_id = BP_L_ID(bp) + i;
4770                 fp->sb_id = fp->cl_id;
4771                 DP(NETIF_MSG_IFUP,
4772                    "bnx2x_init_sb(%p,%p) index %d  cl_id %d  sb %d\n",
4773                    bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4774                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4775                               FP_SB_ID(fp));
4776                 bnx2x_update_fpsb_idx(fp);
4777         }
4778
4779         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4780                           DEF_SB_ID);
4781         bnx2x_update_dsb_idx(bp);
4782         bnx2x_update_coalesce(bp);
4783         bnx2x_init_rx_rings(bp);
4784         bnx2x_init_tx_ring(bp);
4785         bnx2x_init_sp_ring(bp);
4786         bnx2x_init_context(bp);
4787         bnx2x_init_internal(bp, load_code);
4788         bnx2x_init_ind_table(bp);
4789         bnx2x_int_enable(bp);
4790 }
4791
4792 /* end of nic init */
4793
4794 /*
4795  * gzip service functions
4796  */
4797
4798 static int bnx2x_gunzip_init(struct bnx2x *bp)
4799 {
4800         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4801                                               &bp->gunzip_mapping);
4802         if (bp->gunzip_buf  == NULL)
4803                 goto gunzip_nomem1;
4804
4805         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4806         if (bp->strm  == NULL)
4807                 goto gunzip_nomem2;
4808
4809         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4810                                       GFP_KERNEL);
4811         if (bp->strm->workspace == NULL)
4812                 goto gunzip_nomem3;
4813
4814         return 0;
4815
4816 gunzip_nomem3:
4817         kfree(bp->strm);
4818         bp->strm = NULL;
4819
4820 gunzip_nomem2:
4821         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4822                             bp->gunzip_mapping);
4823         bp->gunzip_buf = NULL;
4824
4825 gunzip_nomem1:
4826         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4827                " un-compression\n", bp->dev->name);
4828         return -ENOMEM;
4829 }
4830
4831 static void bnx2x_gunzip_end(struct bnx2x *bp)
4832 {
4833         kfree(bp->strm->workspace);
4834
4835         kfree(bp->strm);
4836         bp->strm = NULL;
4837
4838         if (bp->gunzip_buf) {
4839                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4840                                     bp->gunzip_mapping);
4841                 bp->gunzip_buf = NULL;
4842         }
4843 }
4844
4845 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4846 {
4847         int n, rc;
4848
4849         /* check gzip header */
4850         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4851                 return -EINVAL;
4852
4853         n = 10;
4854
4855 #define FNAME                           0x8
4856
4857         if (zbuf[3] & FNAME)
4858                 while ((zbuf[n++] != 0) && (n < len));
4859
4860         bp->strm->next_in = zbuf + n;
4861         bp->strm->avail_in = len - n;
4862         bp->strm->next_out = bp->gunzip_buf;
4863         bp->strm->avail_out = FW_BUF_SIZE;
4864
4865         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4866         if (rc != Z_OK)
4867                 return rc;
4868
4869         rc = zlib_inflate(bp->strm, Z_FINISH);
4870         if ((rc != Z_OK) && (rc != Z_STREAM_END))
4871                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4872                        bp->dev->name, bp->strm->msg);
4873
4874         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4875         if (bp->gunzip_outlen & 0x3)
4876                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4877                                     " gunzip_outlen (%d) not aligned\n",
4878                        bp->dev->name, bp->gunzip_outlen);
4879         bp->gunzip_outlen >>= 2;
4880
4881         zlib_inflateEnd(bp->strm);
4882
4883         if (rc == Z_STREAM_END)
4884                 return 0;
4885
4886         return rc;
4887 }
4888
4889 /* nic load/unload */
4890
4891 /*
4892  * General service functions
4893  */
4894
4895 /* send a NIG loopback debug packet */
4896 static void bnx2x_lb_pckt(struct bnx2x *bp)
4897 {
4898         u32 wb_write[3];
4899
4900         /* Ethernet source and destination addresses */
4901         wb_write[0] = 0x55555555;
4902         wb_write[1] = 0x55555555;
4903         wb_write[2] = 0x20;             /* SOP */
4904         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4905
4906         /* NON-IP protocol */
4907         wb_write[0] = 0x09000000;
4908         wb_write[1] = 0x55555555;
4909         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
4910         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4911 }
4912
4913 /* some of the internal memories
4914  * are not directly readable from the driver
4915  * to test them we send debug packets
4916  */
4917 static int bnx2x_int_mem_test(struct bnx2x *bp)
4918 {
4919         int factor;
4920         int count, i;
4921         u32 val = 0;
4922
4923         if (CHIP_REV_IS_FPGA(bp))
4924                 factor = 120;
4925         else if (CHIP_REV_IS_EMUL(bp))
4926                 factor = 200;
4927         else
4928                 factor = 1;
4929
4930         DP(NETIF_MSG_HW, "start part1\n");
4931
4932         /* Disable inputs of parser neighbor blocks */
4933         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4934         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4935         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4936         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4937
4938         /*  Write 0 to parser credits for CFC search request */
4939         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4940
4941         /* send Ethernet packet */
4942         bnx2x_lb_pckt(bp);
4943
4944         /* TODO do i reset NIG statistic? */
4945         /* Wait until NIG register shows 1 packet of size 0x10 */
4946         count = 1000 * factor;
4947         while (count) {
4948
4949                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4950                 val = *bnx2x_sp(bp, wb_data[0]);
4951                 if (val == 0x10)
4952                         break;
4953
4954                 msleep(10);
4955                 count--;
4956         }
4957         if (val != 0x10) {
4958                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
4959                 return -1;
4960         }
4961
4962         /* Wait until PRS register shows 1 packet */
4963         count = 1000 * factor;
4964         while (count) {
4965                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4966                 if (val == 1)
4967                         break;
4968
4969                 msleep(10);
4970                 count--;
4971         }
4972         if (val != 0x1) {
4973                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4974                 return -2;
4975         }
4976
4977         /* Reset and init BRB, PRS */
4978         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4979         msleep(50);
4980         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4981         msleep(50);
4982         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4983         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4984
4985         DP(NETIF_MSG_HW, "part2\n");
4986
4987         /* Disable inputs of parser neighbor blocks */
4988         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4989         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4990         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4991         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4992
4993         /* Write 0 to parser credits for CFC search request */
4994         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4995
4996         /* send 10 Ethernet packets */
4997         for (i = 0; i < 10; i++)
4998                 bnx2x_lb_pckt(bp);
4999
5000         /* Wait until NIG register shows 10 + 1
5001            packets of size 11*0x10 = 0xb0 */
5002         count = 1000 * factor;
5003         while (count) {
5004
5005                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5006                 val = *bnx2x_sp(bp, wb_data[0]);
5007                 if (val == 0xb0)
5008                         break;
5009
5010                 msleep(10);
5011                 count--;
5012         }
5013         if (val != 0xb0) {
5014                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5015                 return -3;
5016         }
5017
5018         /* Wait until PRS register shows 2 packets */
5019         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5020         if (val != 2)
5021                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5022
5023         /* Write 1 to parser credits for CFC search request */
5024         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5025
5026         /* Wait until PRS register shows 3 packets */
5027         msleep(10 * factor);
5028         /* Wait until NIG register shows 1 packet of size 0x10 */
5029         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5030         if (val != 3)
5031                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5032
5033         /* clear NIG EOP FIFO */
5034         for (i = 0; i < 11; i++)
5035                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5036         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5037         if (val != 1) {
5038                 BNX2X_ERR("clear of NIG failed\n");
5039                 return -4;
5040         }
5041
5042         /* Reset and init BRB, PRS, NIG */
5043         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5044         msleep(50);
5045         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5046         msleep(50);
5047         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5048         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5049 #ifndef BCM_ISCSI
5050         /* set NIC mode */
5051         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5052 #endif
5053
5054         /* Enable inputs of parser neighbor blocks */
5055         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5056         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5057         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5058         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5059
5060         DP(NETIF_MSG_HW, "done\n");
5061
5062         return 0; /* OK */
5063 }
5064
5065 static void enable_blocks_attention(struct bnx2x *bp)
5066 {
5067         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5068         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5069         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5070         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5071         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5072         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5073         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5074         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5075         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5076 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5077 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5078         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5079         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5080         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5081 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5082 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5083         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5084         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5085         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5086         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5087 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5088 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5089         if (CHIP_REV_IS_FPGA(bp))
5090                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5091         else
5092                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5093         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5094         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5095         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5096 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5097 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5098         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5099         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5100 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5101         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5102 }
5103
5104
5105 static int bnx2x_init_common(struct bnx2x *bp)
5106 {
5107         u32 val, i;
5108
5109         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5110
5111         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5112         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5113
5114         bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5115         if (CHIP_IS_E1H(bp))
5116                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5117
5118         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5119         msleep(30);
5120         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5121
5122         bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5123         if (CHIP_IS_E1(bp)) {
5124                 /* enable HW interrupt from PXP on USDM overflow
5125                    bit 16 on INT_MASK_0 */
5126                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5127         }
5128
5129         bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5130         bnx2x_init_pxp(bp);
5131
5132 #ifdef __BIG_ENDIAN
5133         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5134         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5135         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5136         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5137         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5138         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5139
5140 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5141         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5142         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5143         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5144         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5145 #endif
5146
5147         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5148 #ifdef BCM_ISCSI
5149         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5150         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5151         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5152 #endif
5153
5154         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5155                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5156
5157         /* let the HW do it's magic ... */
5158         msleep(100);
5159         /* finish PXP init */
5160         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5161         if (val != 1) {
5162                 BNX2X_ERR("PXP2 CFG failed\n");
5163                 return -EBUSY;
5164         }
5165         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5166         if (val != 1) {
5167                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5168                 return -EBUSY;
5169         }
5170
5171         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5172         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5173
5174         bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5175
5176         /* clean the DMAE memory */
5177         bp->dmae_ready = 1;
5178         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5179
5180         bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5181         bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5182         bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5183         bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5184
5185         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5186         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5187         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5188         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5189
5190         bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5191         /* soft reset pulse */
5192         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5193         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5194
5195 #ifdef BCM_ISCSI
5196         bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5197 #endif
5198
5199         bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5200         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5201         if (!CHIP_REV_IS_SLOW(bp)) {
5202                 /* enable hw interrupt from doorbell Q */
5203                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5204         }
5205
5206         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5207         if (CHIP_REV_IS_SLOW(bp)) {
5208                 /* fix for emulation and FPGA for no pause */
5209                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5210                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5211                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5212                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5213         }
5214
5215         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5216         /* set NIC mode */
5217         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5218         if (CHIP_IS_E1H(bp))
5219                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5220
5221         bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5222         bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5223         bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5224         bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5225
5226         if (CHIP_IS_E1H(bp)) {
5227                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5228                                 STORM_INTMEM_SIZE_E1H/2);
5229                 bnx2x_init_fill(bp,
5230                                 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5231                                 0, STORM_INTMEM_SIZE_E1H/2);
5232                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5233                                 STORM_INTMEM_SIZE_E1H/2);
5234                 bnx2x_init_fill(bp,
5235                                 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5236                                 0, STORM_INTMEM_SIZE_E1H/2);
5237                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5238                                 STORM_INTMEM_SIZE_E1H/2);
5239                 bnx2x_init_fill(bp,
5240                                 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5241                                 0, STORM_INTMEM_SIZE_E1H/2);
5242                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5243                                 STORM_INTMEM_SIZE_E1H/2);
5244                 bnx2x_init_fill(bp,
5245                                 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5246                                 0, STORM_INTMEM_SIZE_E1H/2);
5247         } else { /* E1 */
5248                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5249                                 STORM_INTMEM_SIZE_E1);
5250                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5251                                 STORM_INTMEM_SIZE_E1);
5252                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5253                                 STORM_INTMEM_SIZE_E1);
5254                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5255                                 STORM_INTMEM_SIZE_E1);
5256         }
5257
5258         bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5259         bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5260         bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5261         bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5262
5263         /* sync semi rtc */
5264         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5265                0x80000000);
5266         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5267                0x80000000);
5268
5269         bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5270         bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5271         bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5272
5273         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5274         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5275                 REG_WR(bp, i, 0xc0cac01a);
5276                 /* TODO: replace with something meaningful */
5277         }
5278         if (CHIP_IS_E1H(bp))
5279                 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5280         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5281
5282         if (sizeof(union cdu_context) != 1024)
5283                 /* we currently assume that a context is 1024 bytes */
5284                 printk(KERN_ALERT PFX "please adjust the size of"
5285                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5286
5287         bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5288         val = (4 << 24) + (0 << 12) + 1024;
5289         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5290         if (CHIP_IS_E1(bp)) {
5291                 /* !!! fix pxp client crdit until excel update */
5292                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5293                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5294         }
5295
5296         bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5297         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5298
5299         bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5300         bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5301
5302         /* PXPCS COMMON comes here */
5303         /* Reset PCIE errors for debug */
5304         REG_WR(bp, 0x2814, 0xffffffff);
5305         REG_WR(bp, 0x3820, 0xffffffff);
5306
5307         /* EMAC0 COMMON comes here */
5308         /* EMAC1 COMMON comes here */
5309         /* DBU COMMON comes here */
5310         /* DBG COMMON comes here */
5311
5312         bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5313         if (CHIP_IS_E1H(bp)) {
5314                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5315                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5316         }
5317
5318         if (CHIP_REV_IS_SLOW(bp))
5319                 msleep(200);
5320
5321         /* finish CFC init */
5322         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5323         if (val != 1) {
5324                 BNX2X_ERR("CFC LL_INIT failed\n");
5325                 return -EBUSY;
5326         }
5327         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5328         if (val != 1) {
5329                 BNX2X_ERR("CFC AC_INIT failed\n");
5330                 return -EBUSY;
5331         }
5332         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5333         if (val != 1) {
5334                 BNX2X_ERR("CFC CAM_INIT failed\n");
5335                 return -EBUSY;
5336         }
5337         REG_WR(bp, CFC_REG_DEBUG0, 0);
5338
5339         /* read NIG statistic
5340            to see if this is our first up since powerup */
5341         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5342         val = *bnx2x_sp(bp, wb_data[0]);
5343
5344         /* do internal memory self test */
5345         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5346                 BNX2X_ERR("internal mem self test failed\n");
5347                 return -EBUSY;
5348         }
5349
5350         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5351         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5352         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5353                 /* Fan failure is indicated by SPIO 5 */
5354                 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5355                                MISC_REGISTERS_SPIO_INPUT_HI_Z);
5356
5357                 /* set to active low mode */
5358                 val = REG_RD(bp, MISC_REG_SPIO_INT);
5359                 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5360                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5361                 REG_WR(bp, MISC_REG_SPIO_INT, val);
5362
5363                 /* enable interrupt to signal the IGU */
5364                 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5365                 val |= (1 << MISC_REGISTERS_SPIO_5);
5366                 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5367                 break;
5368
5369         default:
5370                 break;
5371         }
5372
5373         /* clear PXP2 attentions */
5374         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5375
5376         enable_blocks_attention(bp);
5377
5378         if (!BP_NOMCP(bp)) {
5379                 bnx2x_acquire_phy_lock(bp);
5380                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5381                 bnx2x_release_phy_lock(bp);
5382         } else
5383                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5384
5385         return 0;
5386 }
5387
5388 static int bnx2x_init_port(struct bnx2x *bp)
5389 {
5390         int port = BP_PORT(bp);
5391         u32 val;
5392
5393         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
5394
5395         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5396
5397         /* Port PXP comes here */
5398         /* Port PXP2 comes here */
5399 #ifdef BCM_ISCSI
5400         /* Port0  1
5401          * Port1  385 */
5402         i++;
5403         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5404         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5405         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5406         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5407
5408         /* Port0  2
5409          * Port1  386 */
5410         i++;
5411         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5412         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5413         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5414         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5415
5416         /* Port0  3
5417          * Port1  387 */
5418         i++;
5419         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5420         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5421         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5422         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5423 #endif
5424         /* Port CMs come here */
5425
5426         /* Port QM comes here */
5427 #ifdef BCM_ISCSI
5428         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5429         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5430
5431         bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5432                              func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5433 #endif
5434         /* Port DQ comes here */
5435         /* Port BRB1 comes here */
5436         /* Port PRS comes here */
5437         /* Port TSDM comes here */
5438         /* Port CSDM comes here */
5439         /* Port USDM comes here */
5440         /* Port XSDM comes here */
5441         bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5442                              port ? TSEM_PORT1_END : TSEM_PORT0_END);
5443         bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5444                              port ? USEM_PORT1_END : USEM_PORT0_END);
5445         bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5446                              port ? CSEM_PORT1_END : CSEM_PORT0_END);
5447         bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5448                              port ? XSEM_PORT1_END : XSEM_PORT0_END);
5449         /* Port UPB comes here */
5450         /* Port XPB comes here */
5451
5452         bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5453                              port ? PBF_PORT1_END : PBF_PORT0_END);
5454
5455         /* configure PBF to work without PAUSE mtu 9000 */
5456         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5457
5458         /* update threshold */
5459         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5460         /* update init credit */
5461         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5462
5463         /* probe changes */
5464         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5465         msleep(5);
5466         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5467
5468 #ifdef BCM_ISCSI
5469         /* tell the searcher where the T2 table is */
5470         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5471
5472         wb_write[0] = U64_LO(bp->t2_mapping);
5473         wb_write[1] = U64_HI(bp->t2_mapping);
5474         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5475         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5476         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5477         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5478
5479         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5480         /* Port SRCH comes here */
5481 #endif
5482         /* Port CDU comes here */
5483         /* Port CFC comes here */
5484
5485         if (CHIP_IS_E1(bp)) {
5486                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5487                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5488         }
5489         bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5490                              port ? HC_PORT1_END : HC_PORT0_END);
5491
5492         bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5493                                     MISC_AEU_PORT0_START,
5494                              port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5495         /* init aeu_mask_attn_func_0/1:
5496          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5497          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5498          *             bits 4-7 are used for "per vn group attention" */
5499         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5500                (IS_E1HMF(bp) ? 0xF7 : 0x7));
5501
5502         /* Port PXPCS comes here */
5503         /* Port EMAC0 comes here */
5504         /* Port EMAC1 comes here */
5505         /* Port DBU comes here */
5506         /* Port DBG comes here */
5507         bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5508                              port ? NIG_PORT1_END : NIG_PORT0_END);
5509
5510         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5511
5512         if (CHIP_IS_E1H(bp)) {
5513                 u32 wsum;
5514                 struct cmng_struct_per_port m_cmng_port;
5515                 int vn;
5516
5517                 /* 0x2 disable e1hov, 0x1 enable */
5518                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5519                        (IS_E1HMF(bp) ? 0x1 : 0x2));
5520
5521                 /* Init RATE SHAPING and FAIRNESS contexts.
5522                    Initialize as if there is 10G link. */
5523                 wsum = bnx2x_calc_vn_wsum(bp);
5524                 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5525                 if (IS_E1HMF(bp))
5526                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
5527                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
5528                                         wsum, 10000, &m_cmng_port);
5529         }
5530
5531         /* Port MCP comes here */
5532         /* Port DMAE comes here */
5533
5534         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5535         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5536         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5537                 /* add SPIO 5 to group 0 */
5538                 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5539                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5540                 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5541                 break;
5542
5543         default:
5544                 break;
5545         }
5546
5547         bnx2x__link_reset(bp);
5548
5549         return 0;
5550 }
5551
5552 #define ILT_PER_FUNC            (768/2)
5553 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
5554 /* the phys address is shifted right 12 bits and has an added
5555    1=valid bit added to the 53rd bit
5556    then since this is a wide register(TM)
5557    we split it into two 32 bit writes
5558  */
5559 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5560 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
5561 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
5562 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
5563
5564 #define CNIC_ILT_LINES          0
5565
5566 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5567 {
5568         int reg;
5569
5570         if (CHIP_IS_E1H(bp))
5571                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5572         else /* E1 */
5573                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5574
5575         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5576 }
5577
5578 static int bnx2x_init_func(struct bnx2x *bp)
5579 {
5580         int port = BP_PORT(bp);
5581         int func = BP_FUNC(bp);
5582         int i;
5583
5584         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
5585
5586         i = FUNC_ILT_BASE(func);
5587
5588         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5589         if (CHIP_IS_E1H(bp)) {
5590                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5591                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5592         } else /* E1 */
5593                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5594                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5595
5596
5597         if (CHIP_IS_E1H(bp)) {
5598                 for (i = 0; i < 9; i++)
5599                         bnx2x_init_block(bp,
5600                                          cm_start[func][i], cm_end[func][i]);
5601
5602                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5603                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5604         }
5605
5606         /* HC init per function */
5607         if (CHIP_IS_E1H(bp)) {
5608                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5609
5610                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5611                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5612         }
5613         bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5614
5615         if (CHIP_IS_E1H(bp))
5616                 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5617
5618         /* Reset PCIE errors for debug */
5619         REG_WR(bp, 0x2114, 0xffffffff);
5620         REG_WR(bp, 0x2120, 0xffffffff);
5621
5622         return 0;
5623 }
5624
5625 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5626 {
5627         int i, rc = 0;
5628
5629         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
5630            BP_FUNC(bp), load_code);
5631
5632         bp->dmae_ready = 0;
5633         mutex_init(&bp->dmae_mutex);
5634         bnx2x_gunzip_init(bp);
5635
5636         switch (load_code) {
5637         case FW_MSG_CODE_DRV_LOAD_COMMON:
5638                 rc = bnx2x_init_common(bp);
5639                 if (rc)
5640                         goto init_hw_err;
5641                 /* no break */
5642
5643         case FW_MSG_CODE_DRV_LOAD_PORT:
5644                 bp->dmae_ready = 1;
5645                 rc = bnx2x_init_port(bp);
5646                 if (rc)
5647                         goto init_hw_err;
5648                 /* no break */
5649
5650         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5651                 bp->dmae_ready = 1;
5652                 rc = bnx2x_init_func(bp);
5653                 if (rc)
5654                         goto init_hw_err;
5655                 break;
5656
5657         default:
5658                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5659                 break;
5660         }
5661
5662         if (!BP_NOMCP(bp)) {
5663                 int func = BP_FUNC(bp);
5664
5665                 bp->fw_drv_pulse_wr_seq =
5666                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5667                                  DRV_PULSE_SEQ_MASK);
5668                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5669                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  func_stx 0x%x\n",
5670                    bp->fw_drv_pulse_wr_seq, bp->func_stx);
5671         } else
5672                 bp->func_stx = 0;
5673
5674         /* this needs to be done before gunzip end */
5675         bnx2x_zero_def_sb(bp);
5676         for_each_queue(bp, i)
5677                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5678
5679 init_hw_err:
5680         bnx2x_gunzip_end(bp);
5681
5682         return rc;
5683 }
5684
5685 /* send the MCP a request, block until there is a reply */
5686 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5687 {
5688         int func = BP_FUNC(bp);
5689         u32 seq = ++bp->fw_seq;
5690         u32 rc = 0;
5691         u32 cnt = 1;
5692         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5693
5694         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5695         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5696
5697         do {
5698                 /* let the FW do it's magic ... */
5699                 msleep(delay);
5700
5701                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5702
5703                 /* Give the FW up to 2 second (200*10ms) */
5704         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5705
5706         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5707            cnt*delay, rc, seq);
5708
5709         /* is this a reply to our command? */
5710         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5711                 rc &= FW_MSG_CODE_MASK;
5712
5713         } else {
5714                 /* FW BUG! */
5715                 BNX2X_ERR("FW failed to respond!\n");
5716                 bnx2x_fw_dump(bp);
5717                 rc = 0;
5718         }
5719
5720         return rc;
5721 }
5722
5723 static void bnx2x_free_mem(struct bnx2x *bp)
5724 {
5725
5726 #define BNX2X_PCI_FREE(x, y, size) \
5727         do { \
5728                 if (x) { \
5729                         pci_free_consistent(bp->pdev, size, x, y); \
5730                         x = NULL; \
5731                         y = 0; \
5732                 } \
5733         } while (0)
5734
5735 #define BNX2X_FREE(x) \
5736         do { \
5737                 if (x) { \
5738                         vfree(x); \
5739                         x = NULL; \
5740                 } \
5741         } while (0)
5742
5743         int i;
5744
5745         /* fastpath */
5746         for_each_queue(bp, i) {
5747
5748                 /* Status blocks */
5749                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5750                                bnx2x_fp(bp, i, status_blk_mapping),
5751                                sizeof(struct host_status_block) +
5752                                sizeof(struct eth_tx_db_data));
5753
5754                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5755                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5756                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5757                                bnx2x_fp(bp, i, tx_desc_mapping),
5758                                sizeof(struct eth_tx_bd) * NUM_TX_BD);
5759
5760                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5761                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5762                                bnx2x_fp(bp, i, rx_desc_mapping),
5763                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
5764
5765                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5766                                bnx2x_fp(bp, i, rx_comp_mapping),
5767                                sizeof(struct eth_fast_path_rx_cqe) *
5768                                NUM_RCQ_BD);
5769
5770                 /* SGE ring */
5771                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5772                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5773                                bnx2x_fp(bp, i, rx_sge_mapping),
5774                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5775         }
5776         /* end of fastpath */
5777
5778         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5779                        sizeof(struct host_def_status_block));
5780
5781         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5782                        sizeof(struct bnx2x_slowpath));
5783
5784 #ifdef BCM_ISCSI
5785         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5786         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5787         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5788         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5789 #endif
5790         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5791
5792 #undef BNX2X_PCI_FREE
5793 #undef BNX2X_KFREE
5794 }
5795
5796 static int bnx2x_alloc_mem(struct bnx2x *bp)
5797 {
5798
5799 #define BNX2X_PCI_ALLOC(x, y, size) \
5800         do { \
5801                 x = pci_alloc_consistent(bp->pdev, size, y); \
5802                 if (x == NULL) \
5803                         goto alloc_mem_err; \
5804                 memset(x, 0, size); \
5805         } while (0)
5806
5807 #define BNX2X_ALLOC(x, size) \
5808         do { \
5809                 x = vmalloc(size); \
5810                 if (x == NULL) \
5811                         goto alloc_mem_err; \
5812                 memset(x, 0, size); \
5813         } while (0)
5814
5815         int i;
5816
5817         /* fastpath */
5818         for_each_queue(bp, i) {
5819                 bnx2x_fp(bp, i, bp) = bp;
5820
5821                 /* Status blocks */
5822                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5823                                 &bnx2x_fp(bp, i, status_blk_mapping),
5824                                 sizeof(struct host_status_block) +
5825                                 sizeof(struct eth_tx_db_data));
5826
5827                 bnx2x_fp(bp, i, hw_tx_prods) =
5828                                 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5829
5830                 bnx2x_fp(bp, i, tx_prods_mapping) =
5831                                 bnx2x_fp(bp, i, status_blk_mapping) +
5832                                 sizeof(struct host_status_block);
5833
5834                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5835                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5836                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5837                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5838                                 &bnx2x_fp(bp, i, tx_desc_mapping),
5839                                 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5840
5841                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5842                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5843                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5844                                 &bnx2x_fp(bp, i, rx_desc_mapping),
5845                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5846
5847                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5848                                 &bnx2x_fp(bp, i, rx_comp_mapping),
5849                                 sizeof(struct eth_fast_path_rx_cqe) *
5850                                 NUM_RCQ_BD);
5851
5852                 /* SGE ring */
5853                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5854                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5855                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5856                                 &bnx2x_fp(bp, i, rx_sge_mapping),
5857                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5858         }
5859         /* end of fastpath */
5860
5861         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5862                         sizeof(struct host_def_status_block));
5863
5864         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5865                         sizeof(struct bnx2x_slowpath));
5866
5867 #ifdef BCM_ISCSI
5868         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5869
5870         /* Initialize T1 */
5871         for (i = 0; i < 64*1024; i += 64) {
5872                 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5873                 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5874         }
5875
5876         /* allocate searcher T2 table
5877            we allocate 1/4 of alloc num for T2
5878           (which is not entered into the ILT) */
5879         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5880
5881         /* Initialize T2 */
5882         for (i = 0; i < 16*1024; i += 64)
5883                 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5884
5885         /* now fixup the last line in the block to point to the next block */
5886         *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5887
5888         /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5889         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5890
5891         /* QM queues (128*MAX_CONN) */
5892         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5893 #endif
5894
5895         /* Slow path ring */
5896         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5897
5898         return 0;
5899
5900 alloc_mem_err:
5901         bnx2x_free_mem(bp);
5902         return -ENOMEM;
5903
5904 #undef BNX2X_PCI_ALLOC
5905 #undef BNX2X_ALLOC
5906 }
5907
5908 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5909 {
5910         int i;
5911
5912         for_each_queue(bp, i) {
5913                 struct bnx2x_fastpath *fp = &bp->fp[i];
5914
5915                 u16 bd_cons = fp->tx_bd_cons;
5916                 u16 sw_prod = fp->tx_pkt_prod;
5917                 u16 sw_cons = fp->tx_pkt_cons;
5918
5919                 while (sw_cons != sw_prod) {
5920                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5921                         sw_cons++;
5922                 }
5923         }
5924 }
5925
5926 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5927 {
5928         int i, j;
5929
5930         for_each_queue(bp, j) {
5931                 struct bnx2x_fastpath *fp = &bp->fp[j];
5932
5933                 for (i = 0; i < NUM_RX_BD; i++) {
5934                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5935                         struct sk_buff *skb = rx_buf->skb;
5936
5937                         if (skb == NULL)
5938                                 continue;
5939
5940                         pci_unmap_single(bp->pdev,
5941                                          pci_unmap_addr(rx_buf, mapping),
5942                                          bp->rx_buf_size,
5943                                          PCI_DMA_FROMDEVICE);
5944
5945                         rx_buf->skb = NULL;
5946                         dev_kfree_skb(skb);
5947                 }
5948                 if (!fp->disable_tpa)
5949                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5950                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
5951                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
5952         }
5953 }
5954
5955 static void bnx2x_free_skbs(struct bnx2x *bp)
5956 {
5957         bnx2x_free_tx_skbs(bp);
5958         bnx2x_free_rx_skbs(bp);
5959 }
5960
5961 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5962 {
5963         int i, offset = 1;
5964
5965         free_irq(bp->msix_table[0].vector, bp->dev);
5966         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
5967            bp->msix_table[0].vector);
5968
5969         for_each_queue(bp, i) {
5970                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
5971                    "state %x\n", i, bp->msix_table[i + offset].vector,
5972                    bnx2x_fp(bp, i, state));
5973
5974                 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5975                         BNX2X_ERR("IRQ of fp #%d being freed while "
5976                                   "state != closed\n", i);
5977
5978                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
5979         }
5980 }
5981
5982 static void bnx2x_free_irq(struct bnx2x *bp)
5983 {
5984         if (bp->flags & USING_MSIX_FLAG) {
5985                 bnx2x_free_msix_irqs(bp);
5986                 pci_disable_msix(bp->pdev);
5987                 bp->flags &= ~USING_MSIX_FLAG;
5988
5989         } else
5990                 free_irq(bp->pdev->irq, bp->dev);
5991 }
5992
5993 static int bnx2x_enable_msix(struct bnx2x *bp)
5994 {
5995         int i, rc, offset;
5996
5997         bp->msix_table[0].entry = 0;
5998         offset = 1;
5999         DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
6000
6001         for_each_queue(bp, i) {
6002                 int igu_vec = offset + i + BP_L_ID(bp);
6003
6004                 bp->msix_table[i + offset].entry = igu_vec;
6005                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6006                    "(fastpath #%u)\n", i + offset, igu_vec, i);
6007         }
6008
6009         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6010                              bp->num_queues + offset);
6011         if (rc) {
6012                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6013                 return -1;
6014         }
6015         bp->flags |= USING_MSIX_FLAG;
6016
6017         return 0;
6018 }
6019
6020 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6021 {
6022         int i, rc, offset = 1;
6023
6024         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6025                          bp->dev->name, bp->dev);
6026         if (rc) {
6027                 BNX2X_ERR("request sp irq failed\n");
6028                 return -EBUSY;
6029         }
6030
6031         for_each_queue(bp, i) {
6032                 rc = request_irq(bp->msix_table[i + offset].vector,
6033                                  bnx2x_msix_fp_int, 0,
6034                                  bp->dev->name, &bp->fp[i]);
6035                 if (rc) {
6036                         BNX2X_ERR("request fp #%d irq failed  rc -%d\n",
6037                                   i + offset, -rc);
6038                         bnx2x_free_msix_irqs(bp);
6039                         return -EBUSY;
6040                 }
6041
6042                 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6043         }
6044
6045         return 0;
6046 }
6047
6048 static int bnx2x_req_irq(struct bnx2x *bp)
6049 {
6050         int rc;
6051
6052         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6053                          bp->dev->name, bp->dev);
6054         if (!rc)
6055                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6056
6057         return rc;
6058 }
6059
6060 static void bnx2x_napi_enable(struct bnx2x *bp)
6061 {
6062         int i;
6063
6064         for_each_queue(bp, i)
6065                 napi_enable(&bnx2x_fp(bp, i, napi));
6066 }
6067
6068 static void bnx2x_napi_disable(struct bnx2x *bp)
6069 {
6070         int i;
6071
6072         for_each_queue(bp, i)
6073                 napi_disable(&bnx2x_fp(bp, i, napi));
6074 }
6075
6076 static void bnx2x_netif_start(struct bnx2x *bp)
6077 {
6078         if (atomic_dec_and_test(&bp->intr_sem)) {
6079                 if (netif_running(bp->dev)) {
6080                         if (bp->state == BNX2X_STATE_OPEN)
6081                                 netif_wake_queue(bp->dev);
6082                         bnx2x_napi_enable(bp);
6083                         bnx2x_int_enable(bp);
6084                 }
6085         }
6086 }
6087
6088 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6089 {
6090         bnx2x_int_disable_sync(bp, disable_hw);
6091         if (netif_running(bp->dev)) {
6092                 bnx2x_napi_disable(bp);
6093                 netif_tx_disable(bp->dev);
6094                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6095         }
6096 }
6097
6098 /*
6099  * Init service functions
6100  */
6101
6102 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6103 {
6104         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6105         int port = BP_PORT(bp);
6106
6107         /* CAM allocation
6108          * unicasts 0-31:port0 32-63:port1
6109          * multicast 64-127:port0 128-191:port1
6110          */
6111         config->hdr.length_6b = 2;
6112         config->hdr.offset = port ? 31 : 0;
6113         config->hdr.client_id = BP_CL_ID(bp);
6114         config->hdr.reserved1 = 0;
6115
6116         /* primary MAC */
6117         config->config_table[0].cam_entry.msb_mac_addr =
6118                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6119         config->config_table[0].cam_entry.middle_mac_addr =
6120                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6121         config->config_table[0].cam_entry.lsb_mac_addr =
6122                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6123         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6124         if (set)
6125                 config->config_table[0].target_table_entry.flags = 0;
6126         else
6127                 CAM_INVALIDATE(config->config_table[0]);
6128         config->config_table[0].target_table_entry.client_id = 0;
6129         config->config_table[0].target_table_entry.vlan_id = 0;
6130
6131         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6132            (set ? "setting" : "clearing"),
6133            config->config_table[0].cam_entry.msb_mac_addr,
6134            config->config_table[0].cam_entry.middle_mac_addr,
6135            config->config_table[0].cam_entry.lsb_mac_addr);
6136
6137         /* broadcast */
6138         config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6139         config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6140         config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6141         config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6142         if (set)
6143                 config->config_table[1].target_table_entry.flags =
6144                                 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6145         else
6146                 CAM_INVALIDATE(config->config_table[1]);
6147         config->config_table[1].target_table_entry.client_id = 0;
6148         config->config_table[1].target_table_entry.vlan_id = 0;
6149
6150         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6151                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6152                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6153 }
6154
6155 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6156 {
6157         struct mac_configuration_cmd_e1h *config =
6158                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6159
6160         if (set && (bp->state != BNX2X_STATE_OPEN)) {
6161                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6162                 return;
6163         }
6164
6165         /* CAM allocation for E1H
6166          * unicasts: by func number
6167          * multicast: 20+FUNC*20, 20 each
6168          */
6169         config->hdr.length_6b = 1;
6170         config->hdr.offset = BP_FUNC(bp);
6171         config->hdr.client_id = BP_CL_ID(bp);
6172         config->hdr.reserved1 = 0;
6173
6174         /* primary MAC */
6175         config->config_table[0].msb_mac_addr =
6176                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6177         config->config_table[0].middle_mac_addr =
6178                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6179         config->config_table[0].lsb_mac_addr =
6180                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6181         config->config_table[0].client_id = BP_L_ID(bp);
6182         config->config_table[0].vlan_id = 0;
6183         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6184         if (set)
6185                 config->config_table[0].flags = BP_PORT(bp);
6186         else
6187                 config->config_table[0].flags =
6188                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6189
6190         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID %d\n",
6191            (set ? "setting" : "clearing"),
6192            config->config_table[0].msb_mac_addr,
6193            config->config_table[0].middle_mac_addr,
6194            config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6195
6196         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6197                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6198                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6199 }
6200
6201 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6202                              int *state_p, int poll)
6203 {
6204         /* can take a while if any port is running */
6205         int cnt = 500;
6206
6207         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6208            poll ? "polling" : "waiting", state, idx);
6209
6210         might_sleep();
6211         while (cnt--) {
6212                 if (poll) {
6213                         bnx2x_rx_int(bp->fp, 10);
6214                         /* if index is different from 0
6215                          * the reply for some commands will
6216                          * be on the non default queue
6217                          */
6218                         if (idx)
6219                                 bnx2x_rx_int(&bp->fp[idx], 10);
6220                 }
6221
6222                 mb(); /* state is changed by bnx2x_sp_event() */
6223                 if (*state_p == state)
6224                         return 0;
6225
6226                 msleep(1);
6227         }
6228
6229         /* timeout! */
6230         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6231                   poll ? "polling" : "waiting", state, idx);
6232 #ifdef BNX2X_STOP_ON_ERROR
6233         bnx2x_panic();
6234 #endif
6235
6236         return -EBUSY;
6237 }
6238
6239 static int bnx2x_setup_leading(struct bnx2x *bp)
6240 {
6241         int rc;
6242
6243         /* reset IGU state */
6244         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6245
6246         /* SETUP ramrod */
6247         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6248
6249         /* Wait for completion */
6250         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6251
6252         return rc;
6253 }
6254
6255 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6256 {
6257         /* reset IGU state */
6258         bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6259
6260         /* SETUP ramrod */
6261         bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6262         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6263
6264         /* Wait for completion */
6265         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6266                                  &(bp->fp[index].state), 0);
6267 }
6268
6269 static int bnx2x_poll(struct napi_struct *napi, int budget);
6270 static void bnx2x_set_rx_mode(struct net_device *dev);
6271
6272 /* must be called with rtnl_lock */
6273 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6274 {
6275         u32 load_code;
6276         int i, rc;
6277 #ifdef BNX2X_STOP_ON_ERROR
6278         if (unlikely(bp->panic))
6279                 return -EPERM;
6280 #endif
6281
6282         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6283
6284         /* Send LOAD_REQUEST command to MCP
6285            Returns the type of LOAD command:
6286            if it is the first port to be initialized
6287            common blocks should be initialized, otherwise - not
6288         */
6289         if (!BP_NOMCP(bp)) {
6290                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6291                 if (!load_code) {
6292                         BNX2X_ERR("MCP response failure, aborting\n");
6293                         return -EBUSY;
6294                 }
6295                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6296                         return -EBUSY; /* other port in diagnostic mode */
6297
6298         } else {
6299                 int port = BP_PORT(bp);
6300
6301                 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6302                    load_count[0], load_count[1], load_count[2]);
6303                 load_count[0]++;
6304                 load_count[1 + port]++;
6305                 DP(NETIF_MSG_IFUP, "NO MCP new load counts       %d, %d, %d\n",
6306                    load_count[0], load_count[1], load_count[2]);
6307                 if (load_count[0] == 1)
6308                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6309                 else if (load_count[1 + port] == 1)
6310                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6311                 else
6312                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6313         }
6314
6315         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6316             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6317                 bp->port.pmf = 1;
6318         else
6319                 bp->port.pmf = 0;
6320         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6321
6322         /* if we can't use MSI-X we only need one fp,
6323          * so try to enable MSI-X with the requested number of fp's
6324          * and fallback to inta with one fp
6325          */
6326         if (use_inta) {
6327                 bp->num_queues = 1;
6328
6329         } else {
6330                 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6331                         /* user requested number */
6332                         bp->num_queues = use_multi;
6333
6334                 else if (use_multi)
6335                         bp->num_queues = min_t(u32, num_online_cpus(),
6336                                                BP_MAX_QUEUES(bp));
6337                 else
6338                         bp->num_queues = 1;
6339
6340                 if (bnx2x_enable_msix(bp)) {
6341                         /* failed to enable MSI-X */
6342                         bp->num_queues = 1;
6343                         if (use_multi)
6344                                 BNX2X_ERR("Multi requested but failed"
6345                                           " to enable MSI-X\n");
6346                 }
6347         }
6348         DP(NETIF_MSG_IFUP,
6349            "set number of queues to %d\n", bp->num_queues);
6350
6351         if (bnx2x_alloc_mem(bp))
6352                 return -ENOMEM;
6353
6354         for_each_queue(bp, i)
6355                 bnx2x_fp(bp, i, disable_tpa) =
6356                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
6357
6358         if (bp->flags & USING_MSIX_FLAG) {
6359                 rc = bnx2x_req_msix_irqs(bp);
6360                 if (rc) {
6361                         pci_disable_msix(bp->pdev);
6362                         goto load_error;
6363                 }
6364         } else {
6365                 bnx2x_ack_int(bp);
6366                 rc = bnx2x_req_irq(bp);
6367                 if (rc) {
6368                         BNX2X_ERR("IRQ request failed, aborting\n");
6369                         goto load_error;
6370                 }
6371         }
6372
6373         for_each_queue(bp, i)
6374                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6375                                bnx2x_poll, 128);
6376
6377         /* Initialize HW */
6378         rc = bnx2x_init_hw(bp, load_code);
6379         if (rc) {
6380                 BNX2X_ERR("HW init failed, aborting\n");
6381                 goto load_int_disable;
6382         }
6383
6384         /* Setup NIC internals and enable interrupts */
6385         bnx2x_nic_init(bp, load_code);
6386
6387         /* Send LOAD_DONE command to MCP */
6388         if (!BP_NOMCP(bp)) {
6389                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6390                 if (!load_code) {
6391                         BNX2X_ERR("MCP response failure, aborting\n");
6392                         rc = -EBUSY;
6393                         goto load_rings_free;
6394                 }
6395         }
6396
6397         bnx2x_stats_init(bp);
6398
6399         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6400
6401         /* Enable Rx interrupt handling before sending the ramrod
6402            as it's completed on Rx FP queue */
6403         bnx2x_napi_enable(bp);
6404
6405         /* Enable interrupt handling */
6406         atomic_set(&bp->intr_sem, 0);
6407
6408         rc = bnx2x_setup_leading(bp);
6409         if (rc) {
6410                 BNX2X_ERR("Setup leading failed!\n");
6411                 goto load_netif_stop;
6412         }
6413
6414         if (CHIP_IS_E1H(bp))
6415                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6416                         BNX2X_ERR("!!!  mf_cfg function disabled\n");
6417                         bp->state = BNX2X_STATE_DISABLED;
6418                 }
6419
6420         if (bp->state == BNX2X_STATE_OPEN)
6421                 for_each_nondefault_queue(bp, i) {
6422                         rc = bnx2x_setup_multi(bp, i);
6423                         if (rc)
6424                                 goto load_netif_stop;
6425                 }
6426
6427         if (CHIP_IS_E1(bp))
6428                 bnx2x_set_mac_addr_e1(bp, 1);
6429         else
6430                 bnx2x_set_mac_addr_e1h(bp, 1);
6431
6432         if (bp->port.pmf)
6433                 bnx2x_initial_phy_init(bp);
6434
6435         /* Start fast path */
6436         switch (load_mode) {
6437         case LOAD_NORMAL:
6438                 /* Tx queue should be only reenabled */
6439                 netif_wake_queue(bp->dev);
6440                 bnx2x_set_rx_mode(bp->dev);
6441                 break;
6442
6443         case LOAD_OPEN:
6444                 netif_start_queue(bp->dev);
6445                 bnx2x_set_rx_mode(bp->dev);
6446                 if (bp->flags & USING_MSIX_FLAG)
6447                         printk(KERN_INFO PFX "%s: using MSI-X\n",
6448                                bp->dev->name);
6449                 break;
6450
6451         case LOAD_DIAG:
6452                 bnx2x_set_rx_mode(bp->dev);
6453                 bp->state = BNX2X_STATE_DIAG;
6454                 break;
6455
6456         default:
6457                 break;
6458         }
6459
6460         if (!bp->port.pmf)
6461                 bnx2x__link_status_update(bp);
6462
6463         /* start the timer */
6464         mod_timer(&bp->timer, jiffies + bp->current_interval);
6465
6466
6467         return 0;
6468
6469 load_netif_stop:
6470         bnx2x_napi_disable(bp);
6471 load_rings_free:
6472         /* Free SKBs, SGEs, TPA pool and driver internals */
6473         bnx2x_free_skbs(bp);
6474         for_each_queue(bp, i)
6475                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6476 load_int_disable:
6477         bnx2x_int_disable_sync(bp, 1);
6478         /* Release IRQs */
6479         bnx2x_free_irq(bp);
6480 load_error:
6481         bnx2x_free_mem(bp);
6482         bp->port.pmf = 0;
6483
6484         /* TBD we really need to reset the chip
6485            if we want to recover from this */
6486         return rc;
6487 }
6488
6489 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6490 {
6491         int rc;
6492
6493         /* halt the connection */
6494         bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6495         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
6496
6497         /* Wait for completion */
6498         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6499                                &(bp->fp[index].state), 1);
6500         if (rc) /* timeout */
6501                 return rc;
6502
6503         /* delete cfc entry */
6504         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6505
6506         /* Wait for completion */
6507         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6508                                &(bp->fp[index].state), 1);
6509         return rc;
6510 }
6511
6512 static int bnx2x_stop_leading(struct bnx2x *bp)
6513 {
6514         u16 dsb_sp_prod_idx;
6515         /* if the other port is handling traffic,
6516            this can take a lot of time */
6517         int cnt = 500;
6518         int rc;
6519
6520         might_sleep();
6521
6522         /* Send HALT ramrod */
6523         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6524         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6525
6526         /* Wait for completion */
6527         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6528                                &(bp->fp[0].state), 1);
6529         if (rc) /* timeout */
6530                 return rc;
6531
6532         dsb_sp_prod_idx = *bp->dsb_sp_prod;
6533
6534         /* Send PORT_DELETE ramrod */
6535         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6536
6537         /* Wait for completion to arrive on default status block
6538            we are going to reset the chip anyway
6539            so there is not much to do if this times out
6540          */
6541         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6542                 if (!cnt) {
6543                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6544                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6545                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
6546 #ifdef BNX2X_STOP_ON_ERROR
6547                         bnx2x_panic();
6548 #else
6549                         rc = -EBUSY;
6550 #endif
6551                         break;
6552                 }
6553                 cnt--;
6554                 msleep(1);
6555         }
6556         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6557         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6558
6559         return rc;
6560 }
6561
6562 static void bnx2x_reset_func(struct bnx2x *bp)
6563 {
6564         int port = BP_PORT(bp);
6565         int func = BP_FUNC(bp);
6566         int base, i;
6567
6568         /* Configure IGU */
6569         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6570         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6571
6572         REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6573
6574         /* Clear ILT */
6575         base = FUNC_ILT_BASE(func);
6576         for (i = base; i < base + ILT_PER_FUNC; i++)
6577                 bnx2x_ilt_wr(bp, i, 0);
6578 }
6579
6580 static void bnx2x_reset_port(struct bnx2x *bp)
6581 {
6582         int port = BP_PORT(bp);
6583         u32 val;
6584
6585         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6586
6587         /* Do not rcv packets to BRB */
6588         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6589         /* Do not direct rcv packets that are not for MCP to the BRB */
6590         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6591                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6592
6593         /* Configure AEU */
6594         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6595
6596         msleep(100);
6597         /* Check for BRB port occupancy */
6598         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6599         if (val)
6600                 DP(NETIF_MSG_IFDOWN,
6601                    "BRB1 is not empty  %d blocks are occupied\n", val);
6602
6603         /* TODO: Close Doorbell port? */
6604 }
6605
6606 static void bnx2x_reset_common(struct bnx2x *bp)
6607 {
6608         /* reset_common */
6609         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6610                0xd3ffff7f);
6611         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6612 }
6613
6614 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6615 {
6616         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
6617            BP_FUNC(bp), reset_code);
6618
6619         switch (reset_code) {
6620         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6621                 bnx2x_reset_port(bp);
6622                 bnx2x_reset_func(bp);
6623                 bnx2x_reset_common(bp);
6624                 break;
6625
6626         case FW_MSG_CODE_DRV_UNLOAD_PORT:
6627                 bnx2x_reset_port(bp);
6628                 bnx2x_reset_func(bp);
6629                 break;
6630
6631         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6632                 bnx2x_reset_func(bp);
6633                 break;
6634
6635         default:
6636                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6637                 break;
6638         }
6639 }
6640
6641 /* must be called with rtnl_lock */
6642 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6643 {
6644         int port = BP_PORT(bp);
6645         u32 reset_code = 0;
6646         int i, cnt, rc;
6647
6648         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6649
6650         bp->rx_mode = BNX2X_RX_MODE_NONE;
6651         bnx2x_set_storm_rx_mode(bp);
6652
6653         bnx2x_netif_stop(bp, 1);
6654         if (!netif_running(bp->dev))
6655                 bnx2x_napi_disable(bp);
6656         del_timer_sync(&bp->timer);
6657         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6658                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6659         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6660
6661         /* Wait until tx fast path tasks complete */
6662         for_each_queue(bp, i) {
6663                 struct bnx2x_fastpath *fp = &bp->fp[i];
6664
6665                 cnt = 1000;
6666                 smp_rmb();
6667                 while (BNX2X_HAS_TX_WORK(fp)) {
6668
6669                         bnx2x_tx_int(fp, 1000);
6670                         if (!cnt) {
6671                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
6672                                           i);
6673 #ifdef BNX2X_STOP_ON_ERROR
6674                                 bnx2x_panic();
6675                                 return -EBUSY;
6676 #else
6677                                 break;
6678 #endif
6679                         }
6680                         cnt--;
6681                         msleep(1);
6682                         smp_rmb();
6683                 }
6684         }
6685         /* Give HW time to discard old tx messages */
6686         msleep(1);
6687
6688         /* Release IRQs */
6689         bnx2x_free_irq(bp);
6690
6691         if (CHIP_IS_E1(bp)) {
6692                 struct mac_configuration_cmd *config =
6693                                                 bnx2x_sp(bp, mcast_config);
6694
6695                 bnx2x_set_mac_addr_e1(bp, 0);
6696
6697                 for (i = 0; i < config->hdr.length_6b; i++)
6698                         CAM_INVALIDATE(config->config_table[i]);
6699
6700                 config->hdr.length_6b = i;
6701                 if (CHIP_REV_IS_SLOW(bp))
6702                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6703                 else
6704                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6705                 config->hdr.client_id = BP_CL_ID(bp);
6706                 config->hdr.reserved1 = 0;
6707
6708                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6709                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6710                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6711
6712         } else { /* E1H */
6713                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6714
6715                 bnx2x_set_mac_addr_e1h(bp, 0);
6716
6717                 for (i = 0; i < MC_HASH_SIZE; i++)
6718                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6719         }
6720
6721         if (unload_mode == UNLOAD_NORMAL)
6722                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6723
6724         else if (bp->flags & NO_WOL_FLAG) {
6725                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6726                 if (CHIP_IS_E1H(bp))
6727                         REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6728
6729         } else if (bp->wol) {
6730                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6731                 u8 *mac_addr = bp->dev->dev_addr;
6732                 u32 val;
6733                 /* The mac address is written to entries 1-4 to
6734                    preserve entry 0 which is used by the PMF */
6735                 u8 entry = (BP_E1HVN(bp) + 1)*8;
6736
6737                 val = (mac_addr[0] << 8) | mac_addr[1];
6738                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6739
6740                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6741                       (mac_addr[4] << 8) | mac_addr[5];
6742                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6743
6744                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6745
6746         } else
6747                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6748
6749         /* Close multi and leading connections
6750            Completions for ramrods are collected in a synchronous way */
6751         for_each_nondefault_queue(bp, i)
6752                 if (bnx2x_stop_multi(bp, i))
6753                         goto unload_error;
6754
6755         rc = bnx2x_stop_leading(bp);
6756         if (rc) {
6757                 BNX2X_ERR("Stop leading failed!\n");
6758 #ifdef BNX2X_STOP_ON_ERROR
6759                 return -EBUSY;
6760 #else
6761                 goto unload_error;
6762 #endif
6763         }
6764
6765 unload_error:
6766         if (!BP_NOMCP(bp))
6767                 reset_code = bnx2x_fw_command(bp, reset_code);
6768         else {
6769                 DP(NETIF_MSG_IFDOWN, "NO MCP load counts      %d, %d, %d\n",
6770                    load_count[0], load_count[1], load_count[2]);
6771                 load_count[0]--;
6772                 load_count[1 + port]--;
6773                 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts  %d, %d, %d\n",
6774                    load_count[0], load_count[1], load_count[2]);
6775                 if (load_count[0] == 0)
6776                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6777                 else if (load_count[1 + port] == 0)
6778                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6779                 else
6780                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6781         }
6782
6783         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6784             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6785                 bnx2x__link_reset(bp);
6786
6787         /* Reset the chip */
6788         bnx2x_reset_chip(bp, reset_code);
6789
6790         /* Report UNLOAD_DONE to MCP */
6791         if (!BP_NOMCP(bp))
6792                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6793         bp->port.pmf = 0;
6794
6795         /* Free SKBs, SGEs, TPA pool and driver internals */
6796         bnx2x_free_skbs(bp);
6797         for_each_queue(bp, i)
6798                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6799         bnx2x_free_mem(bp);
6800
6801         bp->state = BNX2X_STATE_CLOSED;
6802
6803         netif_carrier_off(bp->dev);
6804
6805         return 0;
6806 }
6807
6808 static void bnx2x_reset_task(struct work_struct *work)
6809 {
6810         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6811
6812 #ifdef BNX2X_STOP_ON_ERROR
6813         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6814                   " so reset not done to allow debug dump,\n"
6815          KERN_ERR " you will need to reboot when done\n");
6816         return;
6817 #endif
6818
6819         rtnl_lock();
6820
6821         if (!netif_running(bp->dev))
6822                 goto reset_task_exit;
6823
6824         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6825         bnx2x_nic_load(bp, LOAD_NORMAL);
6826
6827 reset_task_exit:
6828         rtnl_unlock();
6829 }
6830
6831 /* end of nic load/unload */
6832
6833 /* ethtool_ops */
6834
6835 /*
6836  * Init service functions
6837  */
6838
6839 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6840 {
6841         u32 val;
6842
6843         /* Check if there is any driver already loaded */
6844         val = REG_RD(bp, MISC_REG_UNPREPARED);
6845         if (val == 0x1) {
6846                 /* Check if it is the UNDI driver
6847                  * UNDI driver initializes CID offset for normal bell to 0x7
6848                  */
6849                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6850                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6851                 if (val == 0x7)
6852                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6853                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6854
6855                 if (val == 0x7) {
6856                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6857                         /* save our func */
6858                         int func = BP_FUNC(bp);
6859                         u32 swap_en;
6860                         u32 swap_val;
6861
6862                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
6863
6864                         /* try unload UNDI on port 0 */
6865                         bp->func = 0;
6866                         bp->fw_seq =
6867                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6868                                 DRV_MSG_SEQ_NUMBER_MASK);
6869                         reset_code = bnx2x_fw_command(bp, reset_code);
6870
6871                         /* if UNDI is loaded on the other port */
6872                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6873
6874                                 /* send "DONE" for previous unload */
6875                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6876
6877                                 /* unload UNDI on port 1 */
6878                                 bp->func = 1;
6879                                 bp->fw_seq =
6880                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6881                                         DRV_MSG_SEQ_NUMBER_MASK);
6882                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6883
6884                                 bnx2x_fw_command(bp, reset_code);
6885                         }
6886
6887                         REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6888                                     HC_REG_CONFIG_0), 0x1000);
6889
6890                         /* close input traffic and wait for it */
6891                         /* Do not rcv packets to BRB */
6892                         REG_WR(bp,
6893                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6894                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6895                         /* Do not direct rcv packets that are not for MCP to
6896                          * the BRB */
6897                         REG_WR(bp,
6898                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6899                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6900                         /* clear AEU */
6901                         REG_WR(bp,
6902                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6903                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6904                         msleep(10);
6905
6906                         /* save NIG port swap info */
6907                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6908                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6909                         /* reset device */
6910                         REG_WR(bp,
6911                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6912                                0xd3ffffff);
6913                         REG_WR(bp,
6914                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6915                                0x1403);
6916                         /* take the NIG out of reset and restore swap values */
6917                         REG_WR(bp,
6918                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6919                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
6920                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6921                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6922
6923                         /* send unload done to the MCP */
6924                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6925
6926                         /* restore our func and fw_seq */
6927                         bp->func = func;
6928                         bp->fw_seq =
6929                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6930                                 DRV_MSG_SEQ_NUMBER_MASK);
6931                 }
6932         }
6933 }
6934
6935 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6936 {
6937         u32 val, val2, val3, val4, id;
6938         u16 pmc;
6939
6940         /* Get the chip revision id and number. */
6941         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6942         val = REG_RD(bp, MISC_REG_CHIP_NUM);
6943         id = ((val & 0xffff) << 16);
6944         val = REG_RD(bp, MISC_REG_CHIP_REV);
6945         id |= ((val & 0xf) << 12);
6946         val = REG_RD(bp, MISC_REG_CHIP_METAL);
6947         id |= ((val & 0xff) << 4);
6948         REG_RD(bp, MISC_REG_BOND_ID);
6949         id |= (val & 0xf);
6950         bp->common.chip_id = id;
6951         bp->link_params.chip_id = bp->common.chip_id;
6952         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6953
6954         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6955         bp->common.flash_size = (NVRAM_1MB_SIZE <<
6956                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
6957         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6958                        bp->common.flash_size, bp->common.flash_size);
6959
6960         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6961         bp->link_params.shmem_base = bp->common.shmem_base;
6962         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6963
6964         if (!bp->common.shmem_base ||
6965             (bp->common.shmem_base < 0xA0000) ||
6966             (bp->common.shmem_base >= 0xC0000)) {
6967                 BNX2X_DEV_INFO("MCP not active\n");
6968                 bp->flags |= NO_MCP_FLAG;
6969                 return;
6970         }
6971
6972         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6973         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6974                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6975                 BNX2X_ERR("BAD MCP validity signature\n");
6976
6977         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6978         bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6979
6980         BNX2X_DEV_INFO("hw_config 0x%08x  board 0x%08x\n",
6981                        bp->common.hw_config, bp->common.board);
6982
6983         bp->link_params.hw_led_mode = ((bp->common.hw_config &
6984                                         SHARED_HW_CFG_LED_MODE_MASK) >>
6985                                        SHARED_HW_CFG_LED_MODE_SHIFT);
6986
6987         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6988         bp->common.bc_ver = val;
6989         BNX2X_DEV_INFO("bc_ver %X\n", val);
6990         if (val < BNX2X_BC_VER) {
6991                 /* for now only warn
6992                  * later we might need to enforce this */
6993                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6994                           " please upgrade BC\n", BNX2X_BC_VER, val);
6995         }
6996
6997         if (BP_E1HVN(bp) == 0) {
6998                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6999                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7000         } else {
7001                 /* no WOL capability for E1HVN != 0 */
7002                 bp->flags |= NO_WOL_FLAG;
7003         }
7004         BNX2X_DEV_INFO("%sWoL capable\n",
7005                        (bp->flags & NO_WOL_FLAG) ? "Not " : "");
7006
7007         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7008         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7009         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7010         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7011
7012         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7013                val, val2, val3, val4);
7014 }
7015
7016 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7017                                                     u32 switch_cfg)
7018 {
7019         int port = BP_PORT(bp);
7020         u32 ext_phy_type;
7021
7022         switch (switch_cfg) {
7023         case SWITCH_CFG_1G:
7024                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7025
7026                 ext_phy_type =
7027                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7028                 switch (ext_phy_type) {
7029                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7030                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7031                                        ext_phy_type);
7032
7033                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7034                                                SUPPORTED_10baseT_Full |
7035                                                SUPPORTED_100baseT_Half |
7036                                                SUPPORTED_100baseT_Full |
7037                                                SUPPORTED_1000baseT_Full |
7038                                                SUPPORTED_2500baseX_Full |
7039                                                SUPPORTED_TP |
7040                                                SUPPORTED_FIBRE |
7041                                                SUPPORTED_Autoneg |
7042                                                SUPPORTED_Pause |
7043                                                SUPPORTED_Asym_Pause);
7044                         break;
7045
7046                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7047                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7048                                        ext_phy_type);
7049
7050                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7051                                                SUPPORTED_10baseT_Full |
7052                                                SUPPORTED_100baseT_Half |
7053                                                SUPPORTED_100baseT_Full |
7054                                                SUPPORTED_1000baseT_Full |
7055                                                SUPPORTED_TP |
7056                                                SUPPORTED_FIBRE |
7057                                                SUPPORTED_Autoneg |
7058                                                SUPPORTED_Pause |
7059                                                SUPPORTED_Asym_Pause);
7060                         break;
7061
7062                 default:
7063                         BNX2X_ERR("NVRAM config error. "
7064                                   "BAD SerDes ext_phy_config 0x%x\n",
7065                                   bp->link_params.ext_phy_config);
7066                         return;
7067                 }
7068
7069                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7070                                            port*0x10);
7071                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7072                 break;
7073
7074         case SWITCH_CFG_10G:
7075                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7076
7077                 ext_phy_type =
7078                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7079                 switch (ext_phy_type) {
7080                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7081                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7082                                        ext_phy_type);
7083
7084                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7085                                                SUPPORTED_10baseT_Full |
7086                                                SUPPORTED_100baseT_Half |
7087                                                SUPPORTED_100baseT_Full |
7088                                                SUPPORTED_1000baseT_Full |
7089                                                SUPPORTED_2500baseX_Full |
7090                                                SUPPORTED_10000baseT_Full |
7091                                                SUPPORTED_TP |
7092                                                SUPPORTED_FIBRE |
7093                                                SUPPORTED_Autoneg |
7094                                                SUPPORTED_Pause |
7095                                                SUPPORTED_Asym_Pause);
7096                         break;
7097
7098                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7099                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7100                                        ext_phy_type);
7101
7102                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7103                                                SUPPORTED_FIBRE |
7104                                                SUPPORTED_Pause |
7105                                                SUPPORTED_Asym_Pause);
7106                         break;
7107
7108                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7109                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7110                                        ext_phy_type);
7111
7112                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7113                                                SUPPORTED_1000baseT_Full |
7114                                                SUPPORTED_FIBRE |
7115                                                SUPPORTED_Pause |
7116                                                SUPPORTED_Asym_Pause);
7117                         break;
7118
7119                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7120                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7121                                        ext_phy_type);
7122
7123                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7124                                                SUPPORTED_1000baseT_Full |
7125                                                SUPPORTED_FIBRE |
7126                                                SUPPORTED_Autoneg |
7127                                                SUPPORTED_Pause |
7128                                                SUPPORTED_Asym_Pause);
7129                         break;
7130
7131                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7132                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7133                                        ext_phy_type);
7134
7135                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7136                                                SUPPORTED_2500baseX_Full |
7137                                                SUPPORTED_1000baseT_Full |
7138                                                SUPPORTED_FIBRE |
7139                                                SUPPORTED_Autoneg |
7140                                                SUPPORTED_Pause |
7141                                                SUPPORTED_Asym_Pause);
7142                         break;
7143
7144                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7145                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7146                                        ext_phy_type);
7147
7148                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7149                                                SUPPORTED_TP |
7150                                                SUPPORTED_Autoneg |
7151                                                SUPPORTED_Pause |
7152                                                SUPPORTED_Asym_Pause);
7153                         break;
7154
7155                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7156                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7157                                   bp->link_params.ext_phy_config);
7158                         break;
7159
7160                 default:
7161                         BNX2X_ERR("NVRAM config error. "
7162                                   "BAD XGXS ext_phy_config 0x%x\n",
7163                                   bp->link_params.ext_phy_config);
7164                         return;
7165                 }
7166
7167                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7168                                            port*0x18);
7169                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7170
7171                 break;
7172
7173         default:
7174                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7175                           bp->port.link_config);
7176                 return;
7177         }
7178         bp->link_params.phy_addr = bp->port.phy_addr;
7179
7180         /* mask what we support according to speed_cap_mask */
7181         if (!(bp->link_params.speed_cap_mask &
7182                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7183                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7184
7185         if (!(bp->link_params.speed_cap_mask &
7186                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7187                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7188
7189         if (!(bp->link_params.speed_cap_mask &
7190                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7191                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7192
7193         if (!(bp->link_params.speed_cap_mask &
7194                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7195                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7196
7197         if (!(bp->link_params.speed_cap_mask &
7198                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7199                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7200                                         SUPPORTED_1000baseT_Full);
7201
7202         if (!(bp->link_params.speed_cap_mask &
7203                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7204                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7205
7206         if (!(bp->link_params.speed_cap_mask &
7207                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7208                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7209
7210         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7211 }
7212
7213 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7214 {
7215         bp->link_params.req_duplex = DUPLEX_FULL;
7216
7217         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7218         case PORT_FEATURE_LINK_SPEED_AUTO:
7219                 if (bp->port.supported & SUPPORTED_Autoneg) {
7220                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7221                         bp->port.advertising = bp->port.supported;
7222                 } else {
7223                         u32 ext_phy_type =
7224                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7225
7226                         if ((ext_phy_type ==
7227                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7228                             (ext_phy_type ==
7229                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7230                                 /* force 10G, no AN */
7231                                 bp->link_params.req_line_speed = SPEED_10000;
7232                                 bp->port.advertising =
7233                                                 (ADVERTISED_10000baseT_Full |
7234                                                  ADVERTISED_FIBRE);
7235                                 break;
7236                         }
7237                         BNX2X_ERR("NVRAM config error. "
7238                                   "Invalid link_config 0x%x"
7239                                   "  Autoneg not supported\n",
7240                                   bp->port.link_config);
7241                         return;
7242                 }
7243                 break;
7244
7245         case PORT_FEATURE_LINK_SPEED_10M_FULL:
7246                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7247                         bp->link_params.req_line_speed = SPEED_10;
7248                         bp->port.advertising = (ADVERTISED_10baseT_Full |
7249                                                 ADVERTISED_TP);
7250                 } else {
7251                         BNX2X_ERR("NVRAM config error. "
7252                                   "Invalid link_config 0x%x"
7253                                   "  speed_cap_mask 0x%x\n",
7254                                   bp->port.link_config,
7255                                   bp->link_params.speed_cap_mask);
7256                         return;
7257                 }
7258                 break;
7259
7260         case PORT_FEATURE_LINK_SPEED_10M_HALF:
7261                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7262                         bp->link_params.req_line_speed = SPEED_10;
7263                         bp->link_params.req_duplex = DUPLEX_HALF;
7264                         bp->port.advertising = (ADVERTISED_10baseT_Half |
7265                                                 ADVERTISED_TP);
7266                 } else {
7267                         BNX2X_ERR("NVRAM config error. "
7268                                   "Invalid link_config 0x%x"
7269                                   "  speed_cap_mask 0x%x\n",
7270                                   bp->port.link_config,
7271                                   bp->link_params.speed_cap_mask);
7272                         return;
7273                 }
7274                 break;
7275
7276         case PORT_FEATURE_LINK_SPEED_100M_FULL:
7277                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7278                         bp->link_params.req_line_speed = SPEED_100;
7279                         bp->port.advertising = (ADVERTISED_100baseT_Full |
7280                                                 ADVERTISED_TP);
7281                 } else {
7282                         BNX2X_ERR("NVRAM config error. "
7283                                   "Invalid link_config 0x%x"
7284                                   "  speed_cap_mask 0x%x\n",
7285                                   bp->port.link_config,
7286                                   bp->link_params.speed_cap_mask);
7287                         return;
7288                 }
7289                 break;
7290
7291         case PORT_FEATURE_LINK_SPEED_100M_HALF:
7292                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7293                         bp->link_params.req_line_speed = SPEED_100;
7294                         bp->link_params.req_duplex = DUPLEX_HALF;
7295                         bp->port.advertising = (ADVERTISED_100baseT_Half |
7296                                                 ADVERTISED_TP);
7297                 } else {
7298                         BNX2X_ERR("NVRAM config error. "
7299                                   "Invalid link_config 0x%x"
7300                                   "  speed_cap_mask 0x%x\n",
7301                                   bp->port.link_config,
7302                                   bp->link_params.speed_cap_mask);
7303                         return;
7304                 }
7305                 break;
7306
7307         case PORT_FEATURE_LINK_SPEED_1G:
7308                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7309                         bp->link_params.req_line_speed = SPEED_1000;
7310                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
7311                                                 ADVERTISED_TP);
7312                 } else {
7313                         BNX2X_ERR("NVRAM config error. "
7314                                   "Invalid link_config 0x%x"
7315                                   "  speed_cap_mask 0x%x\n",
7316                                   bp->port.link_config,
7317                                   bp->link_params.speed_cap_mask);
7318                         return;
7319                 }
7320                 break;
7321
7322         case PORT_FEATURE_LINK_SPEED_2_5G:
7323                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7324                         bp->link_params.req_line_speed = SPEED_2500;
7325                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
7326                                                 ADVERTISED_TP);
7327                 } else {
7328                         BNX2X_ERR("NVRAM config error. "
7329                                   "Invalid link_config 0x%x"
7330                                   "  speed_cap_mask 0x%x\n",
7331                                   bp->port.link_config,
7332                                   bp->link_params.speed_cap_mask);
7333                         return;
7334                 }
7335                 break;
7336
7337         case PORT_FEATURE_LINK_SPEED_10G_CX4:
7338         case PORT_FEATURE_LINK_SPEED_10G_KX4:
7339         case PORT_FEATURE_LINK_SPEED_10G_KR:
7340                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7341                         bp->link_params.req_line_speed = SPEED_10000;
7342                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
7343                                                 ADVERTISED_FIBRE);
7344                 } else {
7345                         BNX2X_ERR("NVRAM config error. "
7346                                   "Invalid link_config 0x%x"
7347                                   "  speed_cap_mask 0x%x\n",
7348                                   bp->port.link_config,
7349                                   bp->link_params.speed_cap_mask);
7350                         return;
7351                 }
7352                 break;
7353
7354         default:
7355                 BNX2X_ERR("NVRAM config error. "
7356                           "BAD link speed link_config 0x%x\n",
7357                           bp->port.link_config);
7358                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7359                 bp->port.advertising = bp->port.supported;
7360                 break;
7361         }
7362
7363         bp->link_params.req_flow_ctrl = (bp->port.link_config &
7364                                          PORT_FEATURE_FLOW_CONTROL_MASK);
7365         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
7366             !(bp->port.supported & SUPPORTED_Autoneg))
7367                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7368
7369         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
7370                        "  advertising 0x%x\n",
7371                        bp->link_params.req_line_speed,
7372                        bp->link_params.req_duplex,
7373                        bp->link_params.req_flow_ctrl, bp->port.advertising);
7374 }
7375
7376 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7377 {
7378         int port = BP_PORT(bp);
7379         u32 val, val2;
7380
7381         bp->link_params.bp = bp;
7382         bp->link_params.port = port;
7383
7384         bp->link_params.serdes_config =
7385                 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7386         bp->link_params.lane_config =
7387                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7388         bp->link_params.ext_phy_config =
7389                 SHMEM_RD(bp,
7390                          dev_info.port_hw_config[port].external_phy_config);
7391         bp->link_params.speed_cap_mask =
7392                 SHMEM_RD(bp,
7393                          dev_info.port_hw_config[port].speed_capability_mask);
7394
7395         bp->port.link_config =
7396                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7397
7398         BNX2X_DEV_INFO("serdes_config 0x%08x  lane_config 0x%08x\n"
7399              KERN_INFO "  ext_phy_config 0x%08x  speed_cap_mask 0x%08x"
7400                        "  link_config 0x%08x\n",
7401                        bp->link_params.serdes_config,
7402                        bp->link_params.lane_config,
7403                        bp->link_params.ext_phy_config,
7404                        bp->link_params.speed_cap_mask, bp->port.link_config);
7405
7406         bp->link_params.switch_cfg = (bp->port.link_config &
7407                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
7408         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7409
7410         bnx2x_link_settings_requested(bp);
7411
7412         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7413         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7414         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7415         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7416         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7417         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7418         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7419         bp->dev->dev_addr[5] = (u8)(val & 0xff);
7420         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7421         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7422 }
7423
7424 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7425 {
7426         int func = BP_FUNC(bp);
7427         u32 val, val2;
7428         int rc = 0;
7429
7430         bnx2x_get_common_hwinfo(bp);
7431
7432         bp->e1hov = 0;
7433         bp->e1hmf = 0;
7434         if (CHIP_IS_E1H(bp)) {
7435                 bp->mf_config =
7436                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7437
7438                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7439                        FUNC_MF_CFG_E1HOV_TAG_MASK);
7440                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7441
7442                         bp->e1hov = val;
7443                         bp->e1hmf = 1;
7444                         BNX2X_DEV_INFO("MF mode  E1HOV for func %d is %d "
7445                                        "(0x%04x)\n",
7446                                        func, bp->e1hov, bp->e1hov);
7447                 } else {
7448                         BNX2X_DEV_INFO("Single function mode\n");
7449                         if (BP_E1HVN(bp)) {
7450                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
7451                                           "  aborting\n", func);
7452                                 rc = -EPERM;
7453                         }
7454                 }
7455         }
7456
7457         if (!BP_NOMCP(bp)) {
7458                 bnx2x_get_port_hwinfo(bp);
7459
7460                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7461                               DRV_MSG_SEQ_NUMBER_MASK);
7462                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7463         }
7464
7465         if (IS_E1HMF(bp)) {
7466                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7467                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
7468                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7469                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7470                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7471                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7472                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7473                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7474                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7475                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
7476                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7477                                ETH_ALEN);
7478                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7479                                ETH_ALEN);
7480                 }
7481
7482                 return rc;
7483         }
7484
7485         if (BP_NOMCP(bp)) {
7486                 /* only supposed to happen on emulation/FPGA */
7487                 BNX2X_ERR("warning random MAC workaround active\n");
7488                 random_ether_addr(bp->dev->dev_addr);
7489                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7490         }
7491
7492         return rc;
7493 }
7494
7495 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7496 {
7497         int func = BP_FUNC(bp);
7498         int rc;
7499
7500         /* Disable interrupt handling until HW is initialized */
7501         atomic_set(&bp->intr_sem, 1);
7502
7503         mutex_init(&bp->port.phy_mutex);
7504
7505         INIT_WORK(&bp->sp_task, bnx2x_sp_task);
7506         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7507
7508         rc = bnx2x_get_hwinfo(bp);
7509
7510         /* need to reset chip if undi was active */
7511         if (!BP_NOMCP(bp))
7512                 bnx2x_undi_unload(bp);
7513
7514         if (CHIP_REV_IS_FPGA(bp))
7515                 printk(KERN_ERR PFX "FPGA detected\n");
7516
7517         if (BP_NOMCP(bp) && (func == 0))
7518                 printk(KERN_ERR PFX
7519                        "MCP disabled, must load devices in order!\n");
7520
7521         /* Set TPA flags */
7522         if (disable_tpa) {
7523                 bp->flags &= ~TPA_ENABLE_FLAG;
7524                 bp->dev->features &= ~NETIF_F_LRO;
7525         } else {
7526                 bp->flags |= TPA_ENABLE_FLAG;
7527                 bp->dev->features |= NETIF_F_LRO;
7528         }
7529
7530
7531         bp->tx_ring_size = MAX_TX_AVAIL;
7532         bp->rx_ring_size = MAX_RX_AVAIL;
7533
7534         bp->rx_csum = 1;
7535         bp->rx_offset = 0;
7536
7537         bp->tx_ticks = 50;
7538         bp->rx_ticks = 25;
7539
7540         bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7541         bp->current_interval = (poll ? poll : bp->timer_interval);
7542
7543         init_timer(&bp->timer);
7544         bp->timer.expires = jiffies + bp->current_interval;
7545         bp->timer.data = (unsigned long) bp;
7546         bp->timer.function = bnx2x_timer;
7547
7548         return rc;
7549 }
7550
7551 /*
7552  * ethtool service functions
7553  */
7554
7555 /* All ethtool functions called with rtnl_lock */
7556
7557 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7558 {
7559         struct bnx2x *bp = netdev_priv(dev);
7560
7561         cmd->supported = bp->port.supported;
7562         cmd->advertising = bp->port.advertising;
7563
7564         if (netif_carrier_ok(dev)) {
7565                 cmd->speed = bp->link_vars.line_speed;
7566                 cmd->duplex = bp->link_vars.duplex;
7567         } else {
7568                 cmd->speed = bp->link_params.req_line_speed;
7569                 cmd->duplex = bp->link_params.req_duplex;
7570         }
7571         if (IS_E1HMF(bp)) {
7572                 u16 vn_max_rate;
7573
7574                 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7575                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7576                 if (vn_max_rate < cmd->speed)
7577                         cmd->speed = vn_max_rate;
7578         }
7579
7580         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7581                 u32 ext_phy_type =
7582                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7583
7584                 switch (ext_phy_type) {
7585                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7586                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7587                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7588                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7589                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7590                         cmd->port = PORT_FIBRE;
7591                         break;
7592
7593                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7594                         cmd->port = PORT_TP;
7595                         break;
7596
7597                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7598                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7599                                   bp->link_params.ext_phy_config);
7600                         break;
7601
7602                 default:
7603                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7604                            bp->link_params.ext_phy_config);
7605                         break;
7606                 }
7607         } else
7608                 cmd->port = PORT_TP;
7609
7610         cmd->phy_address = bp->port.phy_addr;
7611         cmd->transceiver = XCVR_INTERNAL;
7612
7613         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7614                 cmd->autoneg = AUTONEG_ENABLE;
7615         else
7616                 cmd->autoneg = AUTONEG_DISABLE;
7617
7618         cmd->maxtxpkt = 0;
7619         cmd->maxrxpkt = 0;
7620
7621         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7622            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7623            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7624            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7625            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7626            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7627            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7628
7629         return 0;
7630 }
7631
7632 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7633 {
7634         struct bnx2x *bp = netdev_priv(dev);
7635         u32 advertising;
7636
7637         if (IS_E1HMF(bp))
7638                 return 0;
7639
7640         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7641            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7642            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7643            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7644            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7645            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7646            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7647
7648         if (cmd->autoneg == AUTONEG_ENABLE) {
7649                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7650                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7651                         return -EINVAL;
7652                 }
7653
7654                 /* advertise the requested speed and duplex if supported */
7655                 cmd->advertising &= bp->port.supported;
7656
7657                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7658                 bp->link_params.req_duplex = DUPLEX_FULL;
7659                 bp->port.advertising |= (ADVERTISED_Autoneg |
7660                                          cmd->advertising);
7661
7662         } else { /* forced speed */
7663                 /* advertise the requested speed and duplex if supported */
7664                 switch (cmd->speed) {
7665                 case SPEED_10:
7666                         if (cmd->duplex == DUPLEX_FULL) {
7667                                 if (!(bp->port.supported &
7668                                       SUPPORTED_10baseT_Full)) {
7669                                         DP(NETIF_MSG_LINK,
7670                                            "10M full not supported\n");
7671                                         return -EINVAL;
7672                                 }
7673
7674                                 advertising = (ADVERTISED_10baseT_Full |
7675                                                ADVERTISED_TP);
7676                         } else {
7677                                 if (!(bp->port.supported &
7678                                       SUPPORTED_10baseT_Half)) {
7679                                         DP(NETIF_MSG_LINK,
7680                                            "10M half not supported\n");
7681                                         return -EINVAL;
7682                                 }
7683
7684                                 advertising = (ADVERTISED_10baseT_Half |
7685                                                ADVERTISED_TP);
7686                         }
7687                         break;
7688
7689                 case SPEED_100:
7690                         if (cmd->duplex == DUPLEX_FULL) {
7691                                 if (!(bp->port.supported &
7692                                                 SUPPORTED_100baseT_Full)) {
7693                                         DP(NETIF_MSG_LINK,
7694                                            "100M full not supported\n");
7695                                         return -EINVAL;
7696                                 }
7697
7698                                 advertising = (ADVERTISED_100baseT_Full |
7699                                                ADVERTISED_TP);
7700                         } else {
7701                                 if (!(bp->port.supported &
7702                                                 SUPPORTED_100baseT_Half)) {
7703                                         DP(NETIF_MSG_LINK,
7704                                            "100M half not supported\n");
7705                                         return -EINVAL;
7706                                 }
7707
7708                                 advertising = (ADVERTISED_100baseT_Half |
7709                                                ADVERTISED_TP);
7710                         }
7711                         break;
7712
7713                 case SPEED_1000:
7714                         if (cmd->duplex != DUPLEX_FULL) {
7715                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
7716                                 return -EINVAL;
7717                         }
7718
7719                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7720                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
7721                                 return -EINVAL;
7722                         }
7723
7724                         advertising = (ADVERTISED_1000baseT_Full |
7725                                        ADVERTISED_TP);
7726                         break;
7727
7728                 case SPEED_2500:
7729                         if (cmd->duplex != DUPLEX_FULL) {
7730                                 DP(NETIF_MSG_LINK,
7731                                    "2.5G half not supported\n");
7732                                 return -EINVAL;
7733                         }
7734
7735                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7736                                 DP(NETIF_MSG_LINK,
7737                                    "2.5G full not supported\n");
7738                                 return -EINVAL;
7739                         }
7740
7741                         advertising = (ADVERTISED_2500baseX_Full |
7742                                        ADVERTISED_TP);
7743                         break;
7744
7745                 case SPEED_10000:
7746                         if (cmd->duplex != DUPLEX_FULL) {
7747                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
7748                                 return -EINVAL;
7749                         }
7750
7751                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7752                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
7753                                 return -EINVAL;
7754                         }
7755
7756                         advertising = (ADVERTISED_10000baseT_Full |
7757                                        ADVERTISED_FIBRE);
7758                         break;
7759
7760                 default:
7761                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
7762                         return -EINVAL;
7763                 }
7764
7765                 bp->link_params.req_line_speed = cmd->speed;
7766                 bp->link_params.req_duplex = cmd->duplex;
7767                 bp->port.advertising = advertising;
7768         }
7769
7770         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
7771            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
7772            bp->link_params.req_line_speed, bp->link_params.req_duplex,
7773            bp->port.advertising);
7774
7775         if (netif_running(dev)) {
7776                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7777                 bnx2x_link_set(bp);
7778         }
7779
7780         return 0;
7781 }
7782
7783 #define PHY_FW_VER_LEN                  10
7784
7785 static void bnx2x_get_drvinfo(struct net_device *dev,
7786                               struct ethtool_drvinfo *info)
7787 {
7788         struct bnx2x *bp = netdev_priv(dev);
7789         u8 phy_fw_ver[PHY_FW_VER_LEN];
7790
7791         strcpy(info->driver, DRV_MODULE_NAME);
7792         strcpy(info->version, DRV_MODULE_VERSION);
7793
7794         phy_fw_ver[0] = '\0';
7795         if (bp->port.pmf) {
7796                 bnx2x_acquire_phy_lock(bp);
7797                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7798                                              (bp->state != BNX2X_STATE_CLOSED),
7799                                              phy_fw_ver, PHY_FW_VER_LEN);
7800                 bnx2x_release_phy_lock(bp);
7801         }
7802
7803         snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7804                  (bp->common.bc_ver & 0xff0000) >> 16,
7805                  (bp->common.bc_ver & 0xff00) >> 8,
7806                  (bp->common.bc_ver & 0xff),
7807                  ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
7808         strcpy(info->bus_info, pci_name(bp->pdev));
7809         info->n_stats = BNX2X_NUM_STATS;
7810         info->testinfo_len = BNX2X_NUM_TESTS;
7811         info->eedump_len = bp->common.flash_size;
7812         info->regdump_len = 0;
7813 }
7814
7815 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7816 {
7817         struct bnx2x *bp = netdev_priv(dev);
7818
7819         if (bp->flags & NO_WOL_FLAG) {
7820                 wol->supported = 0;
7821                 wol->wolopts = 0;
7822         } else {
7823                 wol->supported = WAKE_MAGIC;
7824                 if (bp->wol)
7825                         wol->wolopts = WAKE_MAGIC;
7826                 else
7827                         wol->wolopts = 0;
7828         }
7829         memset(&wol->sopass, 0, sizeof(wol->sopass));
7830 }
7831
7832 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7833 {
7834         struct bnx2x *bp = netdev_priv(dev);
7835
7836         if (wol->wolopts & ~WAKE_MAGIC)
7837                 return -EINVAL;
7838
7839         if (wol->wolopts & WAKE_MAGIC) {
7840                 if (bp->flags & NO_WOL_FLAG)
7841                         return -EINVAL;
7842
7843                 bp->wol = 1;
7844         } else
7845                 bp->wol = 0;
7846
7847         return 0;
7848 }
7849
7850 static u32 bnx2x_get_msglevel(struct net_device *dev)
7851 {
7852         struct bnx2x *bp = netdev_priv(dev);
7853
7854         return bp->msglevel;
7855 }
7856
7857 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7858 {
7859         struct bnx2x *bp = netdev_priv(dev);
7860
7861         if (capable(CAP_NET_ADMIN))
7862                 bp->msglevel = level;
7863 }
7864
7865 static int bnx2x_nway_reset(struct net_device *dev)
7866 {
7867         struct bnx2x *bp = netdev_priv(dev);
7868
7869         if (!bp->port.pmf)
7870                 return 0;
7871
7872         if (netif_running(dev)) {
7873                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7874                 bnx2x_link_set(bp);
7875         }
7876
7877         return 0;
7878 }
7879
7880 static int bnx2x_get_eeprom_len(struct net_device *dev)
7881 {
7882         struct bnx2x *bp = netdev_priv(dev);
7883
7884         return bp->common.flash_size;
7885 }
7886
7887 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7888 {
7889         int port = BP_PORT(bp);
7890         int count, i;
7891         u32 val = 0;
7892
7893         /* adjust timeout for emulation/FPGA */
7894         count = NVRAM_TIMEOUT_COUNT;
7895         if (CHIP_REV_IS_SLOW(bp))
7896                 count *= 100;
7897
7898         /* request access to nvram interface */
7899         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7900                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7901
7902         for (i = 0; i < count*10; i++) {
7903                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7904                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7905                         break;
7906
7907                 udelay(5);
7908         }
7909
7910         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7911                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
7912                 return -EBUSY;
7913         }
7914
7915         return 0;
7916 }
7917
7918 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7919 {
7920         int port = BP_PORT(bp);
7921         int count, i;
7922         u32 val = 0;
7923
7924         /* adjust timeout for emulation/FPGA */
7925         count = NVRAM_TIMEOUT_COUNT;
7926         if (CHIP_REV_IS_SLOW(bp))
7927                 count *= 100;
7928
7929         /* relinquish nvram interface */
7930         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7931                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7932
7933         for (i = 0; i < count*10; i++) {
7934                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7935                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7936                         break;
7937
7938                 udelay(5);
7939         }
7940
7941         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
7942                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
7943                 return -EBUSY;
7944         }
7945
7946         return 0;
7947 }
7948
7949 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7950 {
7951         u32 val;
7952
7953         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7954
7955         /* enable both bits, even on read */
7956         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7957                (val | MCPR_NVM_ACCESS_ENABLE_EN |
7958                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
7959 }
7960
7961 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7962 {
7963         u32 val;
7964
7965         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7966
7967         /* disable both bits, even after read */
7968         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7969                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7970                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7971 }
7972
7973 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7974                                   u32 cmd_flags)
7975 {
7976         int count, i, rc;
7977         u32 val;
7978
7979         /* build the command word */
7980         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7981
7982         /* need to clear DONE bit separately */
7983         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7984
7985         /* address of the NVRAM to read from */
7986         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7987                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7988
7989         /* issue a read command */
7990         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7991
7992         /* adjust timeout for emulation/FPGA */
7993         count = NVRAM_TIMEOUT_COUNT;
7994         if (CHIP_REV_IS_SLOW(bp))
7995                 count *= 100;
7996
7997         /* wait for completion */
7998         *ret_val = 0;
7999         rc = -EBUSY;
8000         for (i = 0; i < count; i++) {
8001                 udelay(5);
8002                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8003
8004                 if (val & MCPR_NVM_COMMAND_DONE) {
8005                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8006                         /* we read nvram data in cpu order
8007                          * but ethtool sees it as an array of bytes
8008                          * converting to big-endian will do the work */
8009                         val = cpu_to_be32(val);
8010                         *ret_val = val;
8011                         rc = 0;
8012                         break;
8013                 }
8014         }
8015
8016         return rc;
8017 }
8018
8019 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8020                             int buf_size)
8021 {
8022         int rc;
8023         u32 cmd_flags;
8024         u32 val;
8025
8026         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8027                 DP(BNX2X_MSG_NVM,
8028                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8029                    offset, buf_size);
8030                 return -EINVAL;
8031         }
8032
8033         if (offset + buf_size > bp->common.flash_size) {
8034                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8035                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8036                    offset, buf_size, bp->common.flash_size);
8037                 return -EINVAL;
8038         }
8039
8040         /* request access to nvram interface */
8041         rc = bnx2x_acquire_nvram_lock(bp);
8042         if (rc)
8043                 return rc;
8044
8045         /* enable access to nvram interface */
8046         bnx2x_enable_nvram_access(bp);
8047
8048         /* read the first word(s) */
8049         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8050         while ((buf_size > sizeof(u32)) && (rc == 0)) {
8051                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8052                 memcpy(ret_buf, &val, 4);
8053
8054                 /* advance to the next dword */
8055                 offset += sizeof(u32);
8056                 ret_buf += sizeof(u32);
8057                 buf_size -= sizeof(u32);
8058                 cmd_flags = 0;
8059         }
8060
8061         if (rc == 0) {
8062                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8063                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8064                 memcpy(ret_buf, &val, 4);
8065         }
8066
8067         /* disable access to nvram interface */
8068         bnx2x_disable_nvram_access(bp);
8069         bnx2x_release_nvram_lock(bp);
8070
8071         return rc;
8072 }
8073
8074 static int bnx2x_get_eeprom(struct net_device *dev,
8075                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8076 {
8077         struct bnx2x *bp = netdev_priv(dev);
8078         int rc;
8079
8080         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8081            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8082            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8083            eeprom->len, eeprom->len);
8084
8085         /* parameters already validated in ethtool_get_eeprom */
8086
8087         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8088
8089         return rc;
8090 }
8091
8092 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8093                                    u32 cmd_flags)
8094 {
8095         int count, i, rc;
8096
8097         /* build the command word */
8098         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8099
8100         /* need to clear DONE bit separately */
8101         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8102
8103         /* write the data */
8104         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8105
8106         /* address of the NVRAM to write to */
8107         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8108                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8109
8110         /* issue the write command */
8111         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8112
8113         /* adjust timeout for emulation/FPGA */
8114         count = NVRAM_TIMEOUT_COUNT;
8115         if (CHIP_REV_IS_SLOW(bp))
8116                 count *= 100;
8117
8118         /* wait for completion */
8119         rc = -EBUSY;
8120         for (i = 0; i < count; i++) {
8121                 udelay(5);
8122                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8123                 if (val & MCPR_NVM_COMMAND_DONE) {
8124                         rc = 0;
8125                         break;
8126                 }
8127         }
8128
8129         return rc;
8130 }
8131
8132 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
8133
8134 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8135                               int buf_size)
8136 {
8137         int rc;
8138         u32 cmd_flags;
8139         u32 align_offset;
8140         u32 val;
8141
8142         if (offset + buf_size > bp->common.flash_size) {
8143                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8144                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8145                    offset, buf_size, bp->common.flash_size);
8146                 return -EINVAL;
8147         }
8148
8149         /* request access to nvram interface */
8150         rc = bnx2x_acquire_nvram_lock(bp);
8151         if (rc)
8152                 return rc;
8153
8154         /* enable access to nvram interface */
8155         bnx2x_enable_nvram_access(bp);
8156
8157         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8158         align_offset = (offset & ~0x03);
8159         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8160
8161         if (rc == 0) {
8162                 val &= ~(0xff << BYTE_OFFSET(offset));
8163                 val |= (*data_buf << BYTE_OFFSET(offset));
8164
8165                 /* nvram data is returned as an array of bytes
8166                  * convert it back to cpu order */
8167                 val = be32_to_cpu(val);
8168
8169                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8170                                              cmd_flags);
8171         }
8172
8173         /* disable access to nvram interface */
8174         bnx2x_disable_nvram_access(bp);
8175         bnx2x_release_nvram_lock(bp);
8176
8177         return rc;
8178 }
8179
8180 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8181                              int buf_size)
8182 {
8183         int rc;
8184         u32 cmd_flags;
8185         u32 val;
8186         u32 written_so_far;
8187
8188         if (buf_size == 1)      /* ethtool */
8189                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8190
8191         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8192                 DP(BNX2X_MSG_NVM,
8193                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8194                    offset, buf_size);
8195                 return -EINVAL;
8196         }
8197
8198         if (offset + buf_size > bp->common.flash_size) {
8199                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8200                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8201                    offset, buf_size, bp->common.flash_size);
8202                 return -EINVAL;
8203         }
8204
8205         /* request access to nvram interface */
8206         rc = bnx2x_acquire_nvram_lock(bp);
8207         if (rc)
8208                 return rc;
8209
8210         /* enable access to nvram interface */
8211         bnx2x_enable_nvram_access(bp);
8212
8213         written_so_far = 0;
8214         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8215         while ((written_so_far < buf_size) && (rc == 0)) {
8216                 if (written_so_far == (buf_size - sizeof(u32)))
8217                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8218                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8219                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8220                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8221                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8222
8223                 memcpy(&val, data_buf, 4);
8224
8225                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8226
8227                 /* advance to the next dword */
8228                 offset += sizeof(u32);
8229                 data_buf += sizeof(u32);
8230                 written_so_far += sizeof(u32);
8231                 cmd_flags = 0;
8232         }
8233
8234         /* disable access to nvram interface */
8235         bnx2x_disable_nvram_access(bp);
8236         bnx2x_release_nvram_lock(bp);
8237
8238         return rc;
8239 }
8240
8241 static int bnx2x_set_eeprom(struct net_device *dev,
8242                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8243 {
8244         struct bnx2x *bp = netdev_priv(dev);
8245         int rc;
8246
8247         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8248            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8249            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8250            eeprom->len, eeprom->len);
8251
8252         /* parameters already validated in ethtool_set_eeprom */
8253
8254         /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8255         if (eeprom->magic == 0x00504859)
8256                 if (bp->port.pmf) {
8257
8258                         bnx2x_acquire_phy_lock(bp);
8259                         rc = bnx2x_flash_download(bp, BP_PORT(bp),
8260                                              bp->link_params.ext_phy_config,
8261                                              (bp->state != BNX2X_STATE_CLOSED),
8262                                              eebuf, eeprom->len);
8263                         if ((bp->state == BNX2X_STATE_OPEN) ||
8264                             (bp->state == BNX2X_STATE_DISABLED)) {
8265                                 rc |= bnx2x_link_reset(&bp->link_params,
8266                                                        &bp->link_vars);
8267                                 rc |= bnx2x_phy_init(&bp->link_params,
8268                                                      &bp->link_vars);
8269                         }
8270                         bnx2x_release_phy_lock(bp);
8271
8272                 } else /* Only the PMF can access the PHY */
8273                         return -EINVAL;
8274         else
8275                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8276
8277         return rc;
8278 }
8279
8280 static int bnx2x_get_coalesce(struct net_device *dev,
8281                               struct ethtool_coalesce *coal)
8282 {
8283         struct bnx2x *bp = netdev_priv(dev);
8284
8285         memset(coal, 0, sizeof(struct ethtool_coalesce));
8286
8287         coal->rx_coalesce_usecs = bp->rx_ticks;
8288         coal->tx_coalesce_usecs = bp->tx_ticks;
8289
8290         return 0;
8291 }
8292
8293 static int bnx2x_set_coalesce(struct net_device *dev,
8294                               struct ethtool_coalesce *coal)
8295 {
8296         struct bnx2x *bp = netdev_priv(dev);
8297
8298         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8299         if (bp->rx_ticks > 3000)
8300                 bp->rx_ticks = 3000;
8301
8302         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8303         if (bp->tx_ticks > 0x3000)
8304                 bp->tx_ticks = 0x3000;
8305
8306         if (netif_running(dev))
8307                 bnx2x_update_coalesce(bp);
8308
8309         return 0;
8310 }
8311
8312 static void bnx2x_get_ringparam(struct net_device *dev,
8313                                 struct ethtool_ringparam *ering)
8314 {
8315         struct bnx2x *bp = netdev_priv(dev);
8316
8317         ering->rx_max_pending = MAX_RX_AVAIL;
8318         ering->rx_mini_max_pending = 0;
8319         ering->rx_jumbo_max_pending = 0;
8320
8321         ering->rx_pending = bp->rx_ring_size;
8322         ering->rx_mini_pending = 0;
8323         ering->rx_jumbo_pending = 0;
8324
8325         ering->tx_max_pending = MAX_TX_AVAIL;
8326         ering->tx_pending = bp->tx_ring_size;
8327 }
8328
8329 static int bnx2x_set_ringparam(struct net_device *dev,
8330                                struct ethtool_ringparam *ering)
8331 {
8332         struct bnx2x *bp = netdev_priv(dev);
8333         int rc = 0;
8334
8335         if ((ering->rx_pending > MAX_RX_AVAIL) ||
8336             (ering->tx_pending > MAX_TX_AVAIL) ||
8337             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8338                 return -EINVAL;
8339
8340         bp->rx_ring_size = ering->rx_pending;
8341         bp->tx_ring_size = ering->tx_pending;
8342
8343         if (netif_running(dev)) {
8344                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8345                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8346         }
8347
8348         return rc;
8349 }
8350
8351 static void bnx2x_get_pauseparam(struct net_device *dev,
8352                                  struct ethtool_pauseparam *epause)
8353 {
8354         struct bnx2x *bp = netdev_priv(dev);
8355
8356         epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8357                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8358
8359         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8360                             BNX2X_FLOW_CTRL_RX);
8361         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8362                             BNX2X_FLOW_CTRL_TX);
8363
8364         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8365            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8366            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8367 }
8368
8369 static int bnx2x_set_pauseparam(struct net_device *dev,
8370                                 struct ethtool_pauseparam *epause)
8371 {
8372         struct bnx2x *bp = netdev_priv(dev);
8373
8374         if (IS_E1HMF(bp))
8375                 return 0;
8376
8377         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8378            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8379            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8380
8381         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8382
8383         if (epause->rx_pause)
8384                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
8385
8386         if (epause->tx_pause)
8387                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
8388
8389         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8390                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8391
8392         if (epause->autoneg) {
8393                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8394                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
8395                         return -EINVAL;
8396                 }
8397
8398                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8399                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8400         }
8401
8402         DP(NETIF_MSG_LINK,
8403            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8404
8405         if (netif_running(dev)) {
8406                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8407                 bnx2x_link_set(bp);
8408         }
8409
8410         return 0;
8411 }
8412
8413 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8414 {
8415         struct bnx2x *bp = netdev_priv(dev);
8416         int changed = 0;
8417         int rc = 0;
8418
8419         /* TPA requires Rx CSUM offloading */
8420         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8421                 if (!(dev->features & NETIF_F_LRO)) {
8422                         dev->features |= NETIF_F_LRO;
8423                         bp->flags |= TPA_ENABLE_FLAG;
8424                         changed = 1;
8425                 }
8426
8427         } else if (dev->features & NETIF_F_LRO) {
8428                 dev->features &= ~NETIF_F_LRO;
8429                 bp->flags &= ~TPA_ENABLE_FLAG;
8430                 changed = 1;
8431         }
8432
8433         if (changed && netif_running(dev)) {
8434                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8435                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8436         }
8437
8438         return rc;
8439 }
8440
8441 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8442 {
8443         struct bnx2x *bp = netdev_priv(dev);
8444
8445         return bp->rx_csum;
8446 }
8447
8448 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8449 {
8450         struct bnx2x *bp = netdev_priv(dev);
8451         int rc = 0;
8452
8453         bp->rx_csum = data;
8454
8455         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8456            TPA'ed packets will be discarded due to wrong TCP CSUM */
8457         if (!data) {
8458                 u32 flags = ethtool_op_get_flags(dev);
8459
8460                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8461         }
8462
8463         return rc;
8464 }
8465
8466 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8467 {
8468         if (data) {
8469                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8470                 dev->features |= NETIF_F_TSO6;
8471         } else {
8472                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8473                 dev->features &= ~NETIF_F_TSO6;
8474         }
8475
8476         return 0;
8477 }
8478
8479 static const struct {
8480         char string[ETH_GSTRING_LEN];
8481 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8482         { "register_test (offline)" },
8483         { "memory_test (offline)" },
8484         { "loopback_test (offline)" },
8485         { "nvram_test (online)" },
8486         { "interrupt_test (online)" },
8487         { "link_test (online)" },
8488         { "idle check (online)" },
8489         { "MC errors (online)" }
8490 };
8491
8492 static int bnx2x_self_test_count(struct net_device *dev)
8493 {
8494         return BNX2X_NUM_TESTS;
8495 }
8496
8497 static int bnx2x_test_registers(struct bnx2x *bp)
8498 {
8499         int idx, i, rc = -ENODEV;
8500         u32 wr_val = 0;
8501         int port = BP_PORT(bp);
8502         static const struct {
8503                 u32  offset0;
8504                 u32  offset1;
8505                 u32  mask;
8506         } reg_tbl[] = {
8507 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
8508                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
8509                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
8510                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
8511                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
8512                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
8513                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
8514                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
8515                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
8516                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
8517 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
8518                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
8519                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
8520                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
8521                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
8522                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8523                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
8524                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
8525                 { NIG_REG_EGRESS_MNG0_FIFO,           20, 0xffffffff },
8526                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
8527 /* 20 */        { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
8528                 { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
8529                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
8530                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
8531                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
8532                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
8533                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
8534                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
8535                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
8536                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
8537 /* 30 */        { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
8538                 { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
8539                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
8540                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
8541                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8542                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
8543                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8544                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
8545
8546                 { 0xffffffff, 0, 0x00000000 }
8547         };
8548
8549         if (!netif_running(bp->dev))
8550                 return rc;
8551
8552         /* Repeat the test twice:
8553            First by writing 0x00000000, second by writing 0xffffffff */
8554         for (idx = 0; idx < 2; idx++) {
8555
8556                 switch (idx) {
8557                 case 0:
8558                         wr_val = 0;
8559                         break;
8560                 case 1:
8561                         wr_val = 0xffffffff;
8562                         break;
8563                 }
8564
8565                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8566                         u32 offset, mask, save_val, val;
8567
8568                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8569                         mask = reg_tbl[i].mask;
8570
8571                         save_val = REG_RD(bp, offset);
8572
8573                         REG_WR(bp, offset, wr_val);
8574                         val = REG_RD(bp, offset);
8575
8576                         /* Restore the original register's value */
8577                         REG_WR(bp, offset, save_val);
8578
8579                         /* verify that value is as expected value */
8580                         if ((val & mask) != (wr_val & mask))
8581                                 goto test_reg_exit;
8582                 }
8583         }
8584
8585         rc = 0;
8586
8587 test_reg_exit:
8588         return rc;
8589 }
8590
8591 static int bnx2x_test_memory(struct bnx2x *bp)
8592 {
8593         int i, j, rc = -ENODEV;
8594         u32 val;
8595         static const struct {
8596                 u32 offset;
8597                 int size;
8598         } mem_tbl[] = {
8599                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
8600                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8601                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
8602                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
8603                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
8604                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
8605                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
8606
8607                 { 0xffffffff, 0 }
8608         };
8609         static const struct {
8610                 char *name;
8611                 u32 offset;
8612                 u32 e1_mask;
8613                 u32 e1h_mask;
8614         } prty_tbl[] = {
8615                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
8616                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
8617                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
8618                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
8619                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
8620                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
8621
8622                 { NULL, 0xffffffff, 0, 0 }
8623         };
8624
8625         if (!netif_running(bp->dev))
8626                 return rc;
8627
8628         /* Go through all the memories */
8629         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8630                 for (j = 0; j < mem_tbl[i].size; j++)
8631                         REG_RD(bp, mem_tbl[i].offset + j*4);
8632
8633         /* Check the parity status */
8634         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8635                 val = REG_RD(bp, prty_tbl[i].offset);
8636                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8637                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
8638                         DP(NETIF_MSG_HW,
8639                            "%s is 0x%x\n", prty_tbl[i].name, val);
8640                         goto test_mem_exit;
8641                 }
8642         }
8643
8644         rc = 0;
8645
8646 test_mem_exit:
8647         return rc;
8648 }
8649
8650 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8651 {
8652         int cnt = 1000;
8653
8654         if (link_up)
8655                 while (bnx2x_link_test(bp) && cnt--)
8656                         msleep(10);
8657 }
8658
8659 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8660 {
8661         unsigned int pkt_size, num_pkts, i;
8662         struct sk_buff *skb;
8663         unsigned char *packet;
8664         struct bnx2x_fastpath *fp = &bp->fp[0];
8665         u16 tx_start_idx, tx_idx;
8666         u16 rx_start_idx, rx_idx;
8667         u16 pkt_prod;
8668         struct sw_tx_bd *tx_buf;
8669         struct eth_tx_bd *tx_bd;
8670         dma_addr_t mapping;
8671         union eth_rx_cqe *cqe;
8672         u8 cqe_fp_flags;
8673         struct sw_rx_bd *rx_buf;
8674         u16 len;
8675         int rc = -ENODEV;
8676
8677         if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8678                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8679                 bnx2x_acquire_phy_lock(bp);
8680                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8681                 bnx2x_release_phy_lock(bp);
8682
8683         } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8684                 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8685                 bnx2x_acquire_phy_lock(bp);
8686                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8687                 bnx2x_release_phy_lock(bp);
8688                 /* wait until link state is restored */
8689                 bnx2x_wait_for_link(bp, link_up);
8690
8691         } else
8692                 return -EINVAL;
8693
8694         pkt_size = 1514;
8695         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8696         if (!skb) {
8697                 rc = -ENOMEM;
8698                 goto test_loopback_exit;
8699         }
8700         packet = skb_put(skb, pkt_size);
8701         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8702         memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8703         for (i = ETH_HLEN; i < pkt_size; i++)
8704                 packet[i] = (unsigned char) (i & 0xff);
8705
8706         num_pkts = 0;
8707         tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8708         rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8709
8710         pkt_prod = fp->tx_pkt_prod++;
8711         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8712         tx_buf->first_bd = fp->tx_bd_prod;
8713         tx_buf->skb = skb;
8714
8715         tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8716         mapping = pci_map_single(bp->pdev, skb->data,
8717                                  skb_headlen(skb), PCI_DMA_TODEVICE);
8718         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8719         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8720         tx_bd->nbd = cpu_to_le16(1);
8721         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8722         tx_bd->vlan = cpu_to_le16(pkt_prod);
8723         tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8724                                        ETH_TX_BD_FLAGS_END_BD);
8725         tx_bd->general_data = ((UNICAST_ADDRESS <<
8726                                 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8727
8728         fp->hw_tx_prods->bds_prod =
8729                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8730         mb(); /* FW restriction: must not reorder writing nbd and packets */
8731         fp->hw_tx_prods->packets_prod =
8732                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8733         DOORBELL(bp, FP_IDX(fp), 0);
8734
8735         mmiowb();
8736
8737         num_pkts++;
8738         fp->tx_bd_prod++;
8739         bp->dev->trans_start = jiffies;
8740
8741         udelay(100);
8742
8743         tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8744         if (tx_idx != tx_start_idx + num_pkts)
8745                 goto test_loopback_exit;
8746
8747         rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8748         if (rx_idx != rx_start_idx + num_pkts)
8749                 goto test_loopback_exit;
8750
8751         cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8752         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8753         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8754                 goto test_loopback_rx_exit;
8755
8756         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8757         if (len != pkt_size)
8758                 goto test_loopback_rx_exit;
8759
8760         rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8761         skb = rx_buf->skb;
8762         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8763         for (i = ETH_HLEN; i < pkt_size; i++)
8764                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8765                         goto test_loopback_rx_exit;
8766
8767         rc = 0;
8768
8769 test_loopback_rx_exit:
8770
8771         fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8772         fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8773         fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8774         fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8775
8776         /* Update producers */
8777         bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8778                              fp->rx_sge_prod);
8779         mmiowb(); /* keep prod updates ordered */
8780
8781 test_loopback_exit:
8782         bp->link_params.loopback_mode = LOOPBACK_NONE;
8783
8784         return rc;
8785 }
8786
8787 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8788 {
8789         int rc = 0;
8790
8791         if (!netif_running(bp->dev))
8792                 return BNX2X_LOOPBACK_FAILED;
8793
8794         bnx2x_netif_stop(bp, 1);
8795
8796         if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8797                 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8798                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8799         }
8800
8801         if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8802                 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8803                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8804         }
8805
8806         bnx2x_netif_start(bp);
8807
8808         return rc;
8809 }
8810
8811 #define CRC32_RESIDUAL                  0xdebb20e3
8812
8813 static int bnx2x_test_nvram(struct bnx2x *bp)
8814 {
8815         static const struct {
8816                 int offset;
8817                 int size;
8818         } nvram_tbl[] = {
8819                 {     0,  0x14 }, /* bootstrap */
8820                 {  0x14,  0xec }, /* dir */
8821                 { 0x100, 0x350 }, /* manuf_info */
8822                 { 0x450,  0xf0 }, /* feature_info */
8823                 { 0x640,  0x64 }, /* upgrade_key_info */
8824                 { 0x6a4,  0x64 },
8825                 { 0x708,  0x70 }, /* manuf_key_info */
8826                 { 0x778,  0x70 },
8827                 {     0,     0 }
8828         };
8829         u32 buf[0x350 / 4];
8830         u8 *data = (u8 *)buf;
8831         int i, rc;
8832         u32 magic, csum;
8833
8834         rc = bnx2x_nvram_read(bp, 0, data, 4);
8835         if (rc) {
8836                 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8837                 goto test_nvram_exit;
8838         }
8839
8840         magic = be32_to_cpu(buf[0]);
8841         if (magic != 0x669955aa) {
8842                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8843                 rc = -ENODEV;
8844                 goto test_nvram_exit;
8845         }
8846
8847         for (i = 0; nvram_tbl[i].size; i++) {
8848
8849                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8850                                       nvram_tbl[i].size);
8851                 if (rc) {
8852                         DP(NETIF_MSG_PROBE,
8853                            "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8854                         goto test_nvram_exit;
8855                 }
8856
8857                 csum = ether_crc_le(nvram_tbl[i].size, data);
8858                 if (csum != CRC32_RESIDUAL) {
8859                         DP(NETIF_MSG_PROBE,
8860                            "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8861                         rc = -ENODEV;
8862                         goto test_nvram_exit;
8863                 }
8864         }
8865
8866 test_nvram_exit:
8867         return rc;
8868 }
8869
8870 static int bnx2x_test_intr(struct bnx2x *bp)
8871 {
8872         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8873         int i, rc;
8874
8875         if (!netif_running(bp->dev))
8876                 return -ENODEV;
8877
8878         config->hdr.length_6b = 0;
8879         config->hdr.offset = 0;
8880         config->hdr.client_id = BP_CL_ID(bp);
8881         config->hdr.reserved1 = 0;
8882
8883         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8884                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8885                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8886         if (rc == 0) {
8887                 bp->set_mac_pending++;
8888                 for (i = 0; i < 10; i++) {
8889                         if (!bp->set_mac_pending)
8890                                 break;
8891                         msleep_interruptible(10);
8892                 }
8893                 if (i == 10)
8894                         rc = -ENODEV;
8895         }
8896
8897         return rc;
8898 }
8899
8900 static void bnx2x_self_test(struct net_device *dev,
8901                             struct ethtool_test *etest, u64 *buf)
8902 {
8903         struct bnx2x *bp = netdev_priv(dev);
8904
8905         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8906
8907         if (!netif_running(dev))
8908                 return;
8909
8910         /* offline tests are not supported in MF mode */
8911         if (IS_E1HMF(bp))
8912                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8913
8914         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8915                 u8 link_up;
8916
8917                 link_up = bp->link_vars.link_up;
8918                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8919                 bnx2x_nic_load(bp, LOAD_DIAG);
8920                 /* wait until link state is restored */
8921                 bnx2x_wait_for_link(bp, link_up);
8922
8923                 if (bnx2x_test_registers(bp) != 0) {
8924                         buf[0] = 1;
8925                         etest->flags |= ETH_TEST_FL_FAILED;
8926                 }
8927                 if (bnx2x_test_memory(bp) != 0) {
8928                         buf[1] = 1;
8929                         etest->flags |= ETH_TEST_FL_FAILED;
8930                 }
8931                 buf[2] = bnx2x_test_loopback(bp, link_up);
8932                 if (buf[2] != 0)
8933                         etest->flags |= ETH_TEST_FL_FAILED;
8934
8935                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8936                 bnx2x_nic_load(bp, LOAD_NORMAL);
8937                 /* wait until link state is restored */
8938                 bnx2x_wait_for_link(bp, link_up);
8939         }
8940         if (bnx2x_test_nvram(bp) != 0) {
8941                 buf[3] = 1;
8942                 etest->flags |= ETH_TEST_FL_FAILED;
8943         }
8944         if (bnx2x_test_intr(bp) != 0) {
8945                 buf[4] = 1;
8946                 etest->flags |= ETH_TEST_FL_FAILED;
8947         }
8948         if (bp->port.pmf)
8949                 if (bnx2x_link_test(bp) != 0) {
8950                         buf[5] = 1;
8951                         etest->flags |= ETH_TEST_FL_FAILED;
8952                 }
8953         buf[7] = bnx2x_mc_assert(bp);
8954         if (buf[7] != 0)
8955                 etest->flags |= ETH_TEST_FL_FAILED;
8956
8957 #ifdef BNX2X_EXTRA_DEBUG
8958         bnx2x_panic_dump(bp);
8959 #endif
8960 }
8961
8962 static const struct {
8963         long offset;
8964         int size;
8965         u32 flags;
8966 #define STATS_FLAGS_PORT                1
8967 #define STATS_FLAGS_FUNC                2
8968         u8 string[ETH_GSTRING_LEN];
8969 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
8970 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8971                                 8, STATS_FLAGS_FUNC, "rx_bytes" },
8972         { STATS_OFFSET32(error_bytes_received_hi),
8973                                 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8974         { STATS_OFFSET32(total_bytes_transmitted_hi),
8975                                 8, STATS_FLAGS_FUNC, "tx_bytes" },
8976         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8977                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
8978         { STATS_OFFSET32(total_unicast_packets_received_hi),
8979                                 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
8980         { STATS_OFFSET32(total_multicast_packets_received_hi),
8981                                 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
8982         { STATS_OFFSET32(total_broadcast_packets_received_hi),
8983                                 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
8984         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8985                                 8, STATS_FLAGS_FUNC, "tx_packets" },
8986         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
8987                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
8988 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
8989                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
8990         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
8991                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
8992         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
8993                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
8994         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
8995                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
8996         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
8997                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
8998         { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
8999                                 8, STATS_FLAGS_PORT, "tx_deferred" },
9000         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9001                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9002         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9003                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9004         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9005                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9006         { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9007                                 8, STATS_FLAGS_PORT, "rx_fragments" },
9008 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9009                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
9010         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9011                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9012         { STATS_OFFSET32(jabber_packets_received),
9013                                 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
9014         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9015                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9016         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9017                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9018         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9019                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9020         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9021                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9022         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9023                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9024         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9025                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9026         { STATS_OFFSET32(etherstatspktsover1522octets_hi),
9027                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9028 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
9029                                 8, STATS_FLAGS_PORT, "rx_xon_frames" },
9030         { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
9031                                 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9032         { STATS_OFFSET32(tx_stat_outxonsent_hi),
9033                                 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9034         { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9035                                 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
9036         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9037                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9038         { STATS_OFFSET32(mac_filter_discard),
9039                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9040         { STATS_OFFSET32(no_buff_discard),
9041                                 4, STATS_FLAGS_FUNC, "rx_discards" },
9042         { STATS_OFFSET32(xxoverflow_discard),
9043                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9044         { STATS_OFFSET32(brb_drop_hi),
9045                                 8, STATS_FLAGS_PORT, "brb_discard" },
9046         { STATS_OFFSET32(brb_truncate_hi),
9047                                 8, STATS_FLAGS_PORT, "brb_truncate" },
9048 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9049                                 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9050         { STATS_OFFSET32(rx_skb_alloc_failed),
9051                                 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9052 /* 42 */{ STATS_OFFSET32(hw_csum_err),
9053                                 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
9054 };
9055
9056 #define IS_NOT_E1HMF_STAT(bp, i) \
9057                 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9058
9059 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9060 {
9061         struct bnx2x *bp = netdev_priv(dev);
9062         int i, j;
9063
9064         switch (stringset) {
9065         case ETH_SS_STATS:
9066                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9067                         if (IS_NOT_E1HMF_STAT(bp, i))
9068                                 continue;
9069                         strcpy(buf + j*ETH_GSTRING_LEN,
9070                                bnx2x_stats_arr[i].string);
9071                         j++;
9072                 }
9073                 break;
9074
9075         case ETH_SS_TEST:
9076                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9077                 break;
9078         }
9079 }
9080
9081 static int bnx2x_get_stats_count(struct net_device *dev)
9082 {
9083         struct bnx2x *bp = netdev_priv(dev);
9084         int i, num_stats = 0;
9085
9086         for (i = 0; i < BNX2X_NUM_STATS; i++) {
9087                 if (IS_NOT_E1HMF_STAT(bp, i))
9088                         continue;
9089                 num_stats++;
9090         }
9091         return num_stats;
9092 }
9093
9094 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9095                                     struct ethtool_stats *stats, u64 *buf)
9096 {
9097         struct bnx2x *bp = netdev_priv(dev);
9098         u32 *hw_stats = (u32 *)&bp->eth_stats;
9099         int i, j;
9100
9101         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9102                 if (IS_NOT_E1HMF_STAT(bp, i))
9103                         continue;
9104
9105                 if (bnx2x_stats_arr[i].size == 0) {
9106                         /* skip this counter */
9107                         buf[j] = 0;
9108                         j++;
9109                         continue;
9110                 }
9111                 if (bnx2x_stats_arr[i].size == 4) {
9112                         /* 4-byte counter */
9113                         buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9114                         j++;
9115                         continue;
9116                 }
9117                 /* 8-byte counter */
9118                 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9119                                   *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9120                 j++;
9121         }
9122 }
9123
9124 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9125 {
9126         struct bnx2x *bp = netdev_priv(dev);
9127         int port = BP_PORT(bp);
9128         int i;
9129
9130         if (!netif_running(dev))
9131                 return 0;
9132
9133         if (!bp->port.pmf)
9134                 return 0;
9135
9136         if (data == 0)
9137                 data = 2;
9138
9139         for (i = 0; i < (data * 2); i++) {
9140                 if ((i % 2) == 0)
9141                         bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9142                                       bp->link_params.hw_led_mode,
9143                                       bp->link_params.chip_id);
9144                 else
9145                         bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9146                                       bp->link_params.hw_led_mode,
9147                                       bp->link_params.chip_id);
9148
9149                 msleep_interruptible(500);
9150                 if (signal_pending(current))
9151                         break;
9152         }
9153
9154         if (bp->link_vars.link_up)
9155                 bnx2x_set_led(bp, port, LED_MODE_OPER,
9156                               bp->link_vars.line_speed,
9157                               bp->link_params.hw_led_mode,
9158                               bp->link_params.chip_id);
9159
9160         return 0;
9161 }
9162
9163 static struct ethtool_ops bnx2x_ethtool_ops = {
9164         .get_settings           = bnx2x_get_settings,
9165         .set_settings           = bnx2x_set_settings,
9166         .get_drvinfo            = bnx2x_get_drvinfo,
9167         .get_wol                = bnx2x_get_wol,
9168         .set_wol                = bnx2x_set_wol,
9169         .get_msglevel           = bnx2x_get_msglevel,
9170         .set_msglevel           = bnx2x_set_msglevel,
9171         .nway_reset             = bnx2x_nway_reset,
9172         .get_link               = ethtool_op_get_link,
9173         .get_eeprom_len         = bnx2x_get_eeprom_len,
9174         .get_eeprom             = bnx2x_get_eeprom,
9175         .set_eeprom             = bnx2x_set_eeprom,
9176         .get_coalesce           = bnx2x_get_coalesce,
9177         .set_coalesce           = bnx2x_set_coalesce,
9178         .get_ringparam          = bnx2x_get_ringparam,
9179         .set_ringparam          = bnx2x_set_ringparam,
9180         .get_pauseparam         = bnx2x_get_pauseparam,
9181         .set_pauseparam         = bnx2x_set_pauseparam,
9182         .get_rx_csum            = bnx2x_get_rx_csum,
9183         .set_rx_csum            = bnx2x_set_rx_csum,
9184         .get_tx_csum            = ethtool_op_get_tx_csum,
9185         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
9186         .set_flags              = bnx2x_set_flags,
9187         .get_flags              = ethtool_op_get_flags,
9188         .get_sg                 = ethtool_op_get_sg,
9189         .set_sg                 = ethtool_op_set_sg,
9190         .get_tso                = ethtool_op_get_tso,
9191         .set_tso                = bnx2x_set_tso,
9192         .self_test_count        = bnx2x_self_test_count,
9193         .self_test              = bnx2x_self_test,
9194         .get_strings            = bnx2x_get_strings,
9195         .phys_id                = bnx2x_phys_id,
9196         .get_stats_count        = bnx2x_get_stats_count,
9197         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
9198 };
9199
9200 /* end of ethtool_ops */
9201
9202 /****************************************************************************
9203 * General service functions
9204 ****************************************************************************/
9205
9206 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9207 {
9208         u16 pmcsr;
9209
9210         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9211
9212         switch (state) {
9213         case PCI_D0:
9214                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9215                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9216                                        PCI_PM_CTRL_PME_STATUS));
9217
9218                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9219                         /* delay required during transition out of D3hot */
9220                         msleep(20);
9221                 break;
9222
9223         case PCI_D3hot:
9224                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9225                 pmcsr |= 3;
9226
9227                 if (bp->wol)
9228                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9229
9230                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9231                                       pmcsr);
9232
9233                 /* No more memory access after this point until
9234                 * device is brought back to D0.
9235                 */
9236                 break;
9237
9238         default:
9239                 return -EINVAL;
9240         }
9241         return 0;
9242 }
9243
9244 /*
9245  * net_device service functions
9246  */
9247
9248 static int bnx2x_poll(struct napi_struct *napi, int budget)
9249 {
9250         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9251                                                  napi);
9252         struct bnx2x *bp = fp->bp;
9253         int work_done = 0;
9254         u16 rx_cons_sb;
9255
9256 #ifdef BNX2X_STOP_ON_ERROR
9257         if (unlikely(bp->panic))
9258                 goto poll_panic;
9259 #endif
9260
9261         prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9262         prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9263         prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9264
9265         bnx2x_update_fpsb_idx(fp);
9266
9267         if (BNX2X_HAS_TX_WORK(fp))
9268                 bnx2x_tx_int(fp, budget);
9269
9270         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9271         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9272                 rx_cons_sb++;
9273         if (BNX2X_HAS_RX_WORK(fp))
9274                 work_done = bnx2x_rx_int(fp, budget);
9275
9276         rmb(); /* BNX2X_HAS_WORK() reads the status block */
9277         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9278         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9279                 rx_cons_sb++;
9280
9281         /* must not complete if we consumed full budget */
9282         if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9283
9284 #ifdef BNX2X_STOP_ON_ERROR
9285 poll_panic:
9286 #endif
9287                 netif_rx_complete(bp->dev, napi);
9288
9289                 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9290                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9291                 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9292                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9293         }
9294         return work_done;
9295 }
9296
9297
9298 /* we split the first BD into headers and data BDs
9299  * to ease the pain of our fellow microcode engineers
9300  * we use one mapping for both BDs
9301  * So far this has only been observed to happen
9302  * in Other Operating Systems(TM)
9303  */
9304 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9305                                    struct bnx2x_fastpath *fp,
9306                                    struct eth_tx_bd **tx_bd, u16 hlen,
9307                                    u16 bd_prod, int nbd)
9308 {
9309         struct eth_tx_bd *h_tx_bd = *tx_bd;
9310         struct eth_tx_bd *d_tx_bd;
9311         dma_addr_t mapping;
9312         int old_len = le16_to_cpu(h_tx_bd->nbytes);
9313
9314         /* first fix first BD */
9315         h_tx_bd->nbd = cpu_to_le16(nbd);
9316         h_tx_bd->nbytes = cpu_to_le16(hlen);
9317
9318         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9319            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9320            h_tx_bd->addr_lo, h_tx_bd->nbd);
9321
9322         /* now get a new data BD
9323          * (after the pbd) and fill it */
9324         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9325         d_tx_bd = &fp->tx_desc_ring[bd_prod];
9326
9327         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9328                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9329
9330         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9331         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9332         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9333         d_tx_bd->vlan = 0;
9334         /* this marks the BD as one that has no individual mapping
9335          * the FW ignores this flag in a BD not marked start
9336          */
9337         d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9338         DP(NETIF_MSG_TX_QUEUED,
9339            "TSO split data size is %d (%x:%x)\n",
9340            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9341
9342         /* update tx_bd for marking the last BD flag */
9343         *tx_bd = d_tx_bd;
9344
9345         return bd_prod;
9346 }
9347
9348 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9349 {
9350         if (fix > 0)
9351                 csum = (u16) ~csum_fold(csum_sub(csum,
9352                                 csum_partial(t_header - fix, fix, 0)));
9353
9354         else if (fix < 0)
9355                 csum = (u16) ~csum_fold(csum_add(csum,
9356                                 csum_partial(t_header, -fix, 0)));
9357
9358         return swab16(csum);
9359 }
9360
9361 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9362 {
9363         u32 rc;
9364
9365         if (skb->ip_summed != CHECKSUM_PARTIAL)
9366                 rc = XMIT_PLAIN;
9367
9368         else {
9369                 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9370                         rc = XMIT_CSUM_V6;
9371                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9372                                 rc |= XMIT_CSUM_TCP;
9373
9374                 } else {
9375                         rc = XMIT_CSUM_V4;
9376                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9377                                 rc |= XMIT_CSUM_TCP;
9378                 }
9379         }
9380
9381         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9382                 rc |= XMIT_GSO_V4;
9383
9384         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9385                 rc |= XMIT_GSO_V6;
9386
9387         return rc;
9388 }
9389
9390 /* check if packet requires linearization (packet is too fragmented) */
9391 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9392                              u32 xmit_type)
9393 {
9394         int to_copy = 0;
9395         int hlen = 0;
9396         int first_bd_sz = 0;
9397
9398         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9399         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9400
9401                 if (xmit_type & XMIT_GSO) {
9402                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9403                         /* Check if LSO packet needs to be copied:
9404                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9405                         int wnd_size = MAX_FETCH_BD - 3;
9406                         /* Number of windows to check */
9407                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9408                         int wnd_idx = 0;
9409                         int frag_idx = 0;
9410                         u32 wnd_sum = 0;
9411
9412                         /* Headers length */
9413                         hlen = (int)(skb_transport_header(skb) - skb->data) +
9414                                 tcp_hdrlen(skb);
9415
9416                         /* Amount of data (w/o headers) on linear part of SKB*/
9417                         first_bd_sz = skb_headlen(skb) - hlen;
9418
9419                         wnd_sum  = first_bd_sz;
9420
9421                         /* Calculate the first sum - it's special */
9422                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9423                                 wnd_sum +=
9424                                         skb_shinfo(skb)->frags[frag_idx].size;
9425
9426                         /* If there was data on linear skb data - check it */
9427                         if (first_bd_sz > 0) {
9428                                 if (unlikely(wnd_sum < lso_mss)) {
9429                                         to_copy = 1;
9430                                         goto exit_lbl;
9431                                 }
9432
9433                                 wnd_sum -= first_bd_sz;
9434                         }
9435
9436                         /* Others are easier: run through the frag list and
9437                            check all windows */
9438                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9439                                 wnd_sum +=
9440                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9441
9442                                 if (unlikely(wnd_sum < lso_mss)) {
9443                                         to_copy = 1;
9444                                         break;
9445                                 }
9446                                 wnd_sum -=
9447                                         skb_shinfo(skb)->frags[wnd_idx].size;
9448                         }
9449
9450                 } else {
9451                         /* in non-LSO too fragmented packet should always
9452                            be linearized */
9453                         to_copy = 1;
9454                 }
9455         }
9456
9457 exit_lbl:
9458         if (unlikely(to_copy))
9459                 DP(NETIF_MSG_TX_QUEUED,
9460                    "Linearization IS REQUIRED for %s packet. "
9461                    "num_frags %d  hlen %d  first_bd_sz %d\n",
9462                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9463                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9464
9465         return to_copy;
9466 }
9467
9468 /* called with netif_tx_lock
9469  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9470  * netif_wake_queue()
9471  */
9472 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9473 {
9474         struct bnx2x *bp = netdev_priv(dev);
9475         struct bnx2x_fastpath *fp;
9476         struct sw_tx_bd *tx_buf;
9477         struct eth_tx_bd *tx_bd;
9478         struct eth_tx_parse_bd *pbd = NULL;
9479         u16 pkt_prod, bd_prod;
9480         int nbd, fp_index;
9481         dma_addr_t mapping;
9482         u32 xmit_type = bnx2x_xmit_type(bp, skb);
9483         int vlan_off = (bp->e1hov ? 4 : 0);
9484         int i;
9485         u8 hlen = 0;
9486
9487 #ifdef BNX2X_STOP_ON_ERROR
9488         if (unlikely(bp->panic))
9489                 return NETDEV_TX_BUSY;
9490 #endif
9491
9492         fp_index = (smp_processor_id() % bp->num_queues);
9493         fp = &bp->fp[fp_index];
9494
9495         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
9496                 bp->eth_stats.driver_xoff++,
9497                 netif_stop_queue(dev);
9498                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9499                 return NETDEV_TX_BUSY;
9500         }
9501
9502         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
9503            "  gso type %x  xmit_type %x\n",
9504            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9505            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9506
9507         /* First, check if we need to linearize the skb
9508            (due to FW restrictions) */
9509         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9510                 /* Statistics of linearization */
9511                 bp->lin_cnt++;
9512                 if (skb_linearize(skb) != 0) {
9513                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9514                            "silently dropping this SKB\n");
9515                         dev_kfree_skb_any(skb);
9516                         return NETDEV_TX_OK;
9517                 }
9518         }
9519
9520         /*
9521         Please read carefully. First we use one BD which we mark as start,
9522         then for TSO or xsum we have a parsing info BD,
9523         and only then we have the rest of the TSO BDs.
9524         (don't forget to mark the last one as last,
9525         and to unmap only AFTER you write to the BD ...)
9526         And above all, all pdb sizes are in words - NOT DWORDS!
9527         */
9528
9529         pkt_prod = fp->tx_pkt_prod++;
9530         bd_prod = TX_BD(fp->tx_bd_prod);
9531
9532         /* get a tx_buf and first BD */
9533         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9534         tx_bd = &fp->tx_desc_ring[bd_prod];
9535
9536         tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9537         tx_bd->general_data = (UNICAST_ADDRESS <<
9538                                ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9539         /* header nbd */
9540         tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
9541
9542         /* remember the first BD of the packet */
9543         tx_buf->first_bd = fp->tx_bd_prod;
9544         tx_buf->skb = skb;
9545
9546         DP(NETIF_MSG_TX_QUEUED,
9547            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
9548            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9549
9550         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9551                 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9552                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9553                 vlan_off += 4;
9554         } else
9555                 tx_bd->vlan = cpu_to_le16(pkt_prod);
9556
9557         if (xmit_type) {
9558                 /* turn on parsing and get a BD */
9559                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9560                 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9561
9562                 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9563         }
9564
9565         if (xmit_type & XMIT_CSUM) {
9566                 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9567
9568                 /* for now NS flag is not used in Linux */
9569                 pbd->global_data = (hlen |
9570                                     ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9571                                      ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9572
9573                 pbd->ip_hlen = (skb_transport_header(skb) -
9574                                 skb_network_header(skb)) / 2;
9575
9576                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9577
9578                 pbd->total_hlen = cpu_to_le16(hlen);
9579                 hlen = hlen*2 - vlan_off;
9580
9581                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9582
9583                 if (xmit_type & XMIT_CSUM_V4)
9584                         tx_bd->bd_flags.as_bitfield |=
9585                                                 ETH_TX_BD_FLAGS_IP_CSUM;
9586                 else
9587                         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9588
9589                 if (xmit_type & XMIT_CSUM_TCP) {
9590                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9591
9592                 } else {
9593                         s8 fix = SKB_CS_OFF(skb); /* signed! */
9594
9595                         pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9596                         pbd->cs_offset = fix / 2;
9597
9598                         DP(NETIF_MSG_TX_QUEUED,
9599                            "hlen %d  offset %d  fix %d  csum before fix %x\n",
9600                            le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9601                            SKB_CS(skb));
9602
9603                         /* HW bug: fixup the CSUM */
9604                         pbd->tcp_pseudo_csum =
9605                                 bnx2x_csum_fix(skb_transport_header(skb),
9606                                                SKB_CS(skb), fix);
9607
9608                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9609                            pbd->tcp_pseudo_csum);
9610                 }
9611         }
9612
9613         mapping = pci_map_single(bp->pdev, skb->data,
9614                                  skb_headlen(skb), PCI_DMA_TODEVICE);
9615
9616         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9617         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9618         nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
9619         tx_bd->nbd = cpu_to_le16(nbd);
9620         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9621
9622         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
9623            "  nbytes %d  flags %x  vlan %x\n",
9624            tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9625            le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9626            le16_to_cpu(tx_bd->vlan));
9627
9628         if (xmit_type & XMIT_GSO) {
9629
9630                 DP(NETIF_MSG_TX_QUEUED,
9631                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
9632                    skb->len, hlen, skb_headlen(skb),
9633                    skb_shinfo(skb)->gso_size);
9634
9635                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9636
9637                 if (unlikely(skb_headlen(skb) > hlen))
9638                         bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9639                                                  bd_prod, ++nbd);
9640
9641                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9642                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9643                 pbd->tcp_flags = pbd_tcp_flags(skb);
9644
9645                 if (xmit_type & XMIT_GSO_V4) {
9646                         pbd->ip_id = swab16(ip_hdr(skb)->id);
9647                         pbd->tcp_pseudo_csum =
9648                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9649                                                           ip_hdr(skb)->daddr,
9650                                                           0, IPPROTO_TCP, 0));
9651
9652                 } else
9653                         pbd->tcp_pseudo_csum =
9654                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9655                                                         &ipv6_hdr(skb)->daddr,
9656                                                         0, IPPROTO_TCP, 0));
9657
9658                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9659         }
9660
9661         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9662                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9663
9664                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9665                 tx_bd = &fp->tx_desc_ring[bd_prod];
9666
9667                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9668                                        frag->size, PCI_DMA_TODEVICE);
9669
9670                 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9671                 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9672                 tx_bd->nbytes = cpu_to_le16(frag->size);
9673                 tx_bd->vlan = cpu_to_le16(pkt_prod);
9674                 tx_bd->bd_flags.as_bitfield = 0;
9675
9676                 DP(NETIF_MSG_TX_QUEUED,
9677                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d  flags %x\n",
9678                    i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9679                    le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9680         }
9681
9682         /* now at last mark the BD as the last BD */
9683         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9684
9685         DP(NETIF_MSG_TX_QUEUED, "last bd @%p  flags %x\n",
9686            tx_bd, tx_bd->bd_flags.as_bitfield);
9687
9688         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9689
9690         /* now send a tx doorbell, counting the next BD
9691          * if the packet contains or ends with it
9692          */
9693         if (TX_BD_POFF(bd_prod) < nbd)
9694                 nbd++;
9695
9696         if (pbd)
9697                 DP(NETIF_MSG_TX_QUEUED,
9698                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
9699                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
9700                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9701                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9702                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9703
9704         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
9705
9706         fp->hw_tx_prods->bds_prod =
9707                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9708         mb(); /* FW restriction: must not reorder writing nbd and packets */
9709         fp->hw_tx_prods->packets_prod =
9710                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9711         DOORBELL(bp, FP_IDX(fp), 0);
9712
9713         mmiowb();
9714
9715         fp->tx_bd_prod += nbd;
9716         dev->trans_start = jiffies;
9717
9718         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9719                 netif_stop_queue(dev);
9720                 bp->eth_stats.driver_xoff++;
9721                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9722                         netif_wake_queue(dev);
9723         }
9724         fp->tx_pkt++;
9725
9726         return NETDEV_TX_OK;
9727 }
9728
9729 /* called with rtnl_lock */
9730 static int bnx2x_open(struct net_device *dev)
9731 {
9732         struct bnx2x *bp = netdev_priv(dev);
9733
9734         bnx2x_set_power_state(bp, PCI_D0);
9735
9736         return bnx2x_nic_load(bp, LOAD_OPEN);
9737 }
9738
9739 /* called with rtnl_lock */
9740 static int bnx2x_close(struct net_device *dev)
9741 {
9742         struct bnx2x *bp = netdev_priv(dev);
9743
9744         /* Unload the driver, release IRQs */
9745         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9746         if (atomic_read(&bp->pdev->enable_cnt) == 1)
9747                 if (!CHIP_REV_IS_SLOW(bp))
9748                         bnx2x_set_power_state(bp, PCI_D3hot);
9749
9750         return 0;
9751 }
9752
9753 /* called with netif_tx_lock from set_multicast */
9754 static void bnx2x_set_rx_mode(struct net_device *dev)
9755 {
9756         struct bnx2x *bp = netdev_priv(dev);
9757         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9758         int port = BP_PORT(bp);
9759
9760         if (bp->state != BNX2X_STATE_OPEN) {
9761                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9762                 return;
9763         }
9764
9765         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9766
9767         if (dev->flags & IFF_PROMISC)
9768                 rx_mode = BNX2X_RX_MODE_PROMISC;
9769
9770         else if ((dev->flags & IFF_ALLMULTI) ||
9771                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9772                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9773
9774         else { /* some multicasts */
9775                 if (CHIP_IS_E1(bp)) {
9776                         int i, old, offset;
9777                         struct dev_mc_list *mclist;
9778                         struct mac_configuration_cmd *config =
9779                                                 bnx2x_sp(bp, mcast_config);
9780
9781                         for (i = 0, mclist = dev->mc_list;
9782                              mclist && (i < dev->mc_count);
9783                              i++, mclist = mclist->next) {
9784
9785                                 config->config_table[i].
9786                                         cam_entry.msb_mac_addr =
9787                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
9788                                 config->config_table[i].
9789                                         cam_entry.middle_mac_addr =
9790                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
9791                                 config->config_table[i].
9792                                         cam_entry.lsb_mac_addr =
9793                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
9794                                 config->config_table[i].cam_entry.flags =
9795                                                         cpu_to_le16(port);
9796                                 config->config_table[i].
9797                                         target_table_entry.flags = 0;
9798                                 config->config_table[i].
9799                                         target_table_entry.client_id = 0;
9800                                 config->config_table[i].
9801                                         target_table_entry.vlan_id = 0;
9802
9803                                 DP(NETIF_MSG_IFUP,
9804                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9805                                    config->config_table[i].
9806                                                 cam_entry.msb_mac_addr,
9807                                    config->config_table[i].
9808                                                 cam_entry.middle_mac_addr,
9809                                    config->config_table[i].
9810                                                 cam_entry.lsb_mac_addr);
9811                         }
9812                         old = config->hdr.length_6b;
9813                         if (old > i) {
9814                                 for (; i < old; i++) {
9815                                         if (CAM_IS_INVALID(config->
9816                                                            config_table[i])) {
9817                                                 i--; /* already invalidated */
9818                                                 break;
9819                                         }
9820                                         /* invalidate */
9821                                         CAM_INVALIDATE(config->
9822                                                        config_table[i]);
9823                                 }
9824                         }
9825
9826                         if (CHIP_REV_IS_SLOW(bp))
9827                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9828                         else
9829                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
9830
9831                         config->hdr.length_6b = i;
9832                         config->hdr.offset = offset;
9833                         config->hdr.client_id = BP_CL_ID(bp);
9834                         config->hdr.reserved1 = 0;
9835
9836                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9837                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9838                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9839                                       0);
9840                 } else { /* E1H */
9841                         /* Accept one or more multicasts */
9842                         struct dev_mc_list *mclist;
9843                         u32 mc_filter[MC_HASH_SIZE];
9844                         u32 crc, bit, regidx;
9845                         int i;
9846
9847                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9848
9849                         for (i = 0, mclist = dev->mc_list;
9850                              mclist && (i < dev->mc_count);
9851                              i++, mclist = mclist->next) {
9852
9853                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
9854                                    mclist->dmi_addr);
9855
9856                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9857                                 bit = (crc >> 24) & 0xff;
9858                                 regidx = bit >> 5;
9859                                 bit &= 0x1f;
9860                                 mc_filter[regidx] |= (1 << bit);
9861                         }
9862
9863                         for (i = 0; i < MC_HASH_SIZE; i++)
9864                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9865                                        mc_filter[i]);
9866                 }
9867         }
9868
9869         bp->rx_mode = rx_mode;
9870         bnx2x_set_storm_rx_mode(bp);
9871 }
9872
9873 /* called with rtnl_lock */
9874 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9875 {
9876         struct sockaddr *addr = p;
9877         struct bnx2x *bp = netdev_priv(dev);
9878
9879         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
9880                 return -EINVAL;
9881
9882         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9883         if (netif_running(dev)) {
9884                 if (CHIP_IS_E1(bp))
9885                         bnx2x_set_mac_addr_e1(bp, 1);
9886                 else
9887                         bnx2x_set_mac_addr_e1h(bp, 1);
9888         }
9889
9890         return 0;
9891 }
9892
9893 /* called with rtnl_lock */
9894 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9895 {
9896         struct mii_ioctl_data *data = if_mii(ifr);
9897         struct bnx2x *bp = netdev_priv(dev);
9898         int port = BP_PORT(bp);
9899         int err;
9900
9901         switch (cmd) {
9902         case SIOCGMIIPHY:
9903                 data->phy_id = bp->port.phy_addr;
9904
9905                 /* fallthrough */
9906
9907         case SIOCGMIIREG: {
9908                 u16 mii_regval;
9909
9910                 if (!netif_running(dev))
9911                         return -EAGAIN;
9912
9913                 mutex_lock(&bp->port.phy_mutex);
9914                 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
9915                                       DEFAULT_PHY_DEV_ADDR,
9916                                       (data->reg_num & 0x1f), &mii_regval);
9917                 data->val_out = mii_regval;
9918                 mutex_unlock(&bp->port.phy_mutex);
9919                 return err;
9920         }
9921
9922         case SIOCSMIIREG:
9923                 if (!capable(CAP_NET_ADMIN))
9924                         return -EPERM;
9925
9926                 if (!netif_running(dev))
9927                         return -EAGAIN;
9928
9929                 mutex_lock(&bp->port.phy_mutex);
9930                 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
9931                                        DEFAULT_PHY_DEV_ADDR,
9932                                        (data->reg_num & 0x1f), data->val_in);
9933                 mutex_unlock(&bp->port.phy_mutex);
9934                 return err;
9935
9936         default:
9937                 /* do nothing */
9938                 break;
9939         }
9940
9941         return -EOPNOTSUPP;
9942 }
9943
9944 /* called with rtnl_lock */
9945 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9946 {
9947         struct bnx2x *bp = netdev_priv(dev);
9948         int rc = 0;
9949
9950         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9951             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9952                 return -EINVAL;
9953
9954         /* This does not race with packet allocation
9955          * because the actual alloc size is
9956          * only updated as part of load
9957          */
9958         dev->mtu = new_mtu;
9959
9960         if (netif_running(dev)) {
9961                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9962                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9963         }
9964
9965         return rc;
9966 }
9967
9968 static void bnx2x_tx_timeout(struct net_device *dev)
9969 {
9970         struct bnx2x *bp = netdev_priv(dev);
9971
9972 #ifdef BNX2X_STOP_ON_ERROR
9973         if (!bp->panic)
9974                 bnx2x_panic();
9975 #endif
9976         /* This allows the netif to be shutdown gracefully before resetting */
9977         schedule_work(&bp->reset_task);
9978 }
9979
9980 #ifdef BCM_VLAN
9981 /* called with rtnl_lock */
9982 static void bnx2x_vlan_rx_register(struct net_device *dev,
9983                                    struct vlan_group *vlgrp)
9984 {
9985         struct bnx2x *bp = netdev_priv(dev);
9986
9987         bp->vlgrp = vlgrp;
9988         if (netif_running(dev))
9989                 bnx2x_set_client_config(bp);
9990 }
9991
9992 #endif
9993
9994 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9995 static void poll_bnx2x(struct net_device *dev)
9996 {
9997         struct bnx2x *bp = netdev_priv(dev);
9998
9999         disable_irq(bp->pdev->irq);
10000         bnx2x_interrupt(bp->pdev->irq, dev);
10001         enable_irq(bp->pdev->irq);
10002 }
10003 #endif
10004
10005 static const struct net_device_ops bnx2x_netdev_ops = {
10006         .ndo_open               = bnx2x_open,
10007         .ndo_stop               = bnx2x_close,
10008         .ndo_start_xmit         = bnx2x_start_xmit,
10009         .ndo_set_multicast_list = bnx2x_set_rx_mode,
10010         .ndo_set_mac_address    = bnx2x_change_mac_addr,
10011         .ndo_validate_addr      = eth_validate_addr,
10012         .ndo_do_ioctl           = bnx2x_ioctl,
10013         .ndo_change_mtu         = bnx2x_change_mtu,
10014         .ndo_tx_timeout         = bnx2x_tx_timeout,
10015 #ifdef BCM_VLAN
10016         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
10017 #endif
10018 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10019         .ndo_poll_controller    = poll_bnx2x,
10020 #endif
10021 };
10022
10023
10024 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10025                                     struct net_device *dev)
10026 {
10027         struct bnx2x *bp;
10028         int rc;
10029
10030         SET_NETDEV_DEV(dev, &pdev->dev);
10031         bp = netdev_priv(dev);
10032
10033         bp->dev = dev;
10034         bp->pdev = pdev;
10035         bp->flags = 0;
10036         bp->func = PCI_FUNC(pdev->devfn);
10037
10038         rc = pci_enable_device(pdev);
10039         if (rc) {
10040                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10041                 goto err_out;
10042         }
10043
10044         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10045                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10046                        " aborting\n");
10047                 rc = -ENODEV;
10048                 goto err_out_disable;
10049         }
10050
10051         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10052                 printk(KERN_ERR PFX "Cannot find second PCI device"
10053                        " base address, aborting\n");
10054                 rc = -ENODEV;
10055                 goto err_out_disable;
10056         }
10057
10058         if (atomic_read(&pdev->enable_cnt) == 1) {
10059                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10060                 if (rc) {
10061                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10062                                " aborting\n");
10063                         goto err_out_disable;
10064                 }
10065
10066                 pci_set_master(pdev);
10067                 pci_save_state(pdev);
10068         }
10069
10070         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10071         if (bp->pm_cap == 0) {
10072                 printk(KERN_ERR PFX "Cannot find power management"
10073                        " capability, aborting\n");
10074                 rc = -EIO;
10075                 goto err_out_release;
10076         }
10077
10078         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10079         if (bp->pcie_cap == 0) {
10080                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10081                        " aborting\n");
10082                 rc = -EIO;
10083                 goto err_out_release;
10084         }
10085
10086         if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10087                 bp->flags |= USING_DAC_FLAG;
10088                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10089                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10090                                " failed, aborting\n");
10091                         rc = -EIO;
10092                         goto err_out_release;
10093                 }
10094
10095         } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10096                 printk(KERN_ERR PFX "System does not support DMA,"
10097                        " aborting\n");
10098                 rc = -EIO;
10099                 goto err_out_release;
10100         }
10101
10102         dev->mem_start = pci_resource_start(pdev, 0);
10103         dev->base_addr = dev->mem_start;
10104         dev->mem_end = pci_resource_end(pdev, 0);
10105
10106         dev->irq = pdev->irq;
10107
10108         bp->regview = pci_ioremap_bar(pdev, 0);
10109         if (!bp->regview) {
10110                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10111                 rc = -ENOMEM;
10112                 goto err_out_release;
10113         }
10114
10115         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10116                                         min_t(u64, BNX2X_DB_SIZE,
10117                                               pci_resource_len(pdev, 2)));
10118         if (!bp->doorbells) {
10119                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10120                 rc = -ENOMEM;
10121                 goto err_out_unmap;
10122         }
10123
10124         bnx2x_set_power_state(bp, PCI_D0);
10125
10126         /* clean indirect addresses */
10127         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10128                                PCICFG_VENDOR_ID_OFFSET);
10129         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10130         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10131         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10132         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10133
10134         dev->watchdog_timeo = TX_TIMEOUT;
10135
10136         dev->netdev_ops = &bnx2x_netdev_ops;
10137         dev->ethtool_ops = &bnx2x_ethtool_ops;
10138         dev->features |= NETIF_F_SG;
10139         dev->features |= NETIF_F_HW_CSUM;
10140         if (bp->flags & USING_DAC_FLAG)
10141                 dev->features |= NETIF_F_HIGHDMA;
10142 #ifdef BCM_VLAN
10143         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10144 #endif
10145         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10146         dev->features |= NETIF_F_TSO6;
10147
10148         return 0;
10149
10150 err_out_unmap:
10151         if (bp->regview) {
10152                 iounmap(bp->regview);
10153                 bp->regview = NULL;
10154         }
10155         if (bp->doorbells) {
10156                 iounmap(bp->doorbells);
10157                 bp->doorbells = NULL;
10158         }
10159
10160 err_out_release:
10161         if (atomic_read(&pdev->enable_cnt) == 1)
10162                 pci_release_regions(pdev);
10163
10164 err_out_disable:
10165         pci_disable_device(pdev);
10166         pci_set_drvdata(pdev, NULL);
10167
10168 err_out:
10169         return rc;
10170 }
10171
10172 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10173 {
10174         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10175
10176         val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10177         return val;
10178 }
10179
10180 /* return value of 1=2.5GHz 2=5GHz */
10181 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10182 {
10183         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10184
10185         val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10186         return val;
10187 }
10188
10189 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10190                                     const struct pci_device_id *ent)
10191 {
10192         static int version_printed;
10193         struct net_device *dev = NULL;
10194         struct bnx2x *bp;
10195         int rc;
10196
10197         if (version_printed++ == 0)
10198                 printk(KERN_INFO "%s", version);
10199
10200         /* dev zeroed in init_etherdev */
10201         dev = alloc_etherdev(sizeof(*bp));
10202         if (!dev) {
10203                 printk(KERN_ERR PFX "Cannot allocate net device\n");
10204                 return -ENOMEM;
10205         }
10206
10207         bp = netdev_priv(dev);
10208         bp->msglevel = debug;
10209
10210         rc = bnx2x_init_dev(pdev, dev);
10211         if (rc < 0) {
10212                 free_netdev(dev);
10213                 return rc;
10214         }
10215
10216         rc = register_netdev(dev);
10217         if (rc) {
10218                 dev_err(&pdev->dev, "Cannot register net device\n");
10219                 goto init_one_exit;
10220         }
10221
10222         pci_set_drvdata(pdev, dev);
10223
10224         rc = bnx2x_init_bp(bp);
10225         if (rc) {
10226                 unregister_netdev(dev);
10227                 goto init_one_exit;
10228         }
10229
10230         netif_carrier_off(dev);
10231
10232         bp->common.name = board_info[ent->driver_data].name;
10233         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10234                " IRQ %d, ", dev->name, bp->common.name,
10235                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10236                bnx2x_get_pcie_width(bp),
10237                (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10238                dev->base_addr, bp->pdev->irq);
10239         printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
10240         return 0;
10241
10242 init_one_exit:
10243         if (bp->regview)
10244                 iounmap(bp->regview);
10245
10246         if (bp->doorbells)
10247                 iounmap(bp->doorbells);
10248
10249         free_netdev(dev);
10250
10251         if (atomic_read(&pdev->enable_cnt) == 1)
10252                 pci_release_regions(pdev);
10253
10254         pci_disable_device(pdev);
10255         pci_set_drvdata(pdev, NULL);
10256
10257         return rc;
10258 }
10259
10260 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10261 {
10262         struct net_device *dev = pci_get_drvdata(pdev);
10263         struct bnx2x *bp;
10264
10265         if (!dev) {
10266                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10267                 return;
10268         }
10269         bp = netdev_priv(dev);
10270
10271         unregister_netdev(dev);
10272
10273         if (bp->regview)
10274                 iounmap(bp->regview);
10275
10276         if (bp->doorbells)
10277                 iounmap(bp->doorbells);
10278
10279         free_netdev(dev);
10280
10281         if (atomic_read(&pdev->enable_cnt) == 1)
10282                 pci_release_regions(pdev);
10283
10284         pci_disable_device(pdev);
10285         pci_set_drvdata(pdev, NULL);
10286 }
10287
10288 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10289 {
10290         struct net_device *dev = pci_get_drvdata(pdev);
10291         struct bnx2x *bp;
10292
10293         if (!dev) {
10294                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10295                 return -ENODEV;
10296         }
10297         bp = netdev_priv(dev);
10298
10299         rtnl_lock();
10300
10301         pci_save_state(pdev);
10302
10303         if (!netif_running(dev)) {
10304                 rtnl_unlock();
10305                 return 0;
10306         }
10307
10308         netif_device_detach(dev);
10309
10310         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10311
10312         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10313
10314         rtnl_unlock();
10315
10316         return 0;
10317 }
10318
10319 static int bnx2x_resume(struct pci_dev *pdev)
10320 {
10321         struct net_device *dev = pci_get_drvdata(pdev);
10322         struct bnx2x *bp;
10323         int rc;
10324
10325         if (!dev) {
10326                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10327                 return -ENODEV;
10328         }
10329         bp = netdev_priv(dev);
10330
10331         rtnl_lock();
10332
10333         pci_restore_state(pdev);
10334
10335         if (!netif_running(dev)) {
10336                 rtnl_unlock();
10337                 return 0;
10338         }
10339
10340         bnx2x_set_power_state(bp, PCI_D0);
10341         netif_device_attach(dev);
10342
10343         rc = bnx2x_nic_load(bp, LOAD_OPEN);
10344
10345         rtnl_unlock();
10346
10347         return rc;
10348 }
10349
10350 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10351 {
10352         int i;
10353
10354         bp->state = BNX2X_STATE_ERROR;
10355
10356         bp->rx_mode = BNX2X_RX_MODE_NONE;
10357
10358         bnx2x_netif_stop(bp, 0);
10359
10360         del_timer_sync(&bp->timer);
10361         bp->stats_state = STATS_STATE_DISABLED;
10362         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10363
10364         /* Release IRQs */
10365         bnx2x_free_irq(bp);
10366
10367         if (CHIP_IS_E1(bp)) {
10368                 struct mac_configuration_cmd *config =
10369                                                 bnx2x_sp(bp, mcast_config);
10370
10371                 for (i = 0; i < config->hdr.length_6b; i++)
10372                         CAM_INVALIDATE(config->config_table[i]);
10373         }
10374
10375         /* Free SKBs, SGEs, TPA pool and driver internals */
10376         bnx2x_free_skbs(bp);
10377         for_each_queue(bp, i)
10378                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10379         bnx2x_free_mem(bp);
10380
10381         bp->state = BNX2X_STATE_CLOSED;
10382
10383         netif_carrier_off(bp->dev);
10384
10385         return 0;
10386 }
10387
10388 static void bnx2x_eeh_recover(struct bnx2x *bp)
10389 {
10390         u32 val;
10391
10392         mutex_init(&bp->port.phy_mutex);
10393
10394         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10395         bp->link_params.shmem_base = bp->common.shmem_base;
10396         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10397
10398         if (!bp->common.shmem_base ||
10399             (bp->common.shmem_base < 0xA0000) ||
10400             (bp->common.shmem_base >= 0xC0000)) {
10401                 BNX2X_DEV_INFO("MCP not active\n");
10402                 bp->flags |= NO_MCP_FLAG;
10403                 return;
10404         }
10405
10406         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10407         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10408                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10409                 BNX2X_ERR("BAD MCP validity signature\n");
10410
10411         if (!BP_NOMCP(bp)) {
10412                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10413                               & DRV_MSG_SEQ_NUMBER_MASK);
10414                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10415         }
10416 }
10417
10418 /**
10419  * bnx2x_io_error_detected - called when PCI error is detected
10420  * @pdev: Pointer to PCI device
10421  * @state: The current pci connection state
10422  *
10423  * This function is called after a PCI bus error affecting
10424  * this device has been detected.
10425  */
10426 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10427                                                 pci_channel_state_t state)
10428 {
10429         struct net_device *dev = pci_get_drvdata(pdev);
10430         struct bnx2x *bp = netdev_priv(dev);
10431
10432         rtnl_lock();
10433
10434         netif_device_detach(dev);
10435
10436         if (netif_running(dev))
10437                 bnx2x_eeh_nic_unload(bp);
10438
10439         pci_disable_device(pdev);
10440
10441         rtnl_unlock();
10442
10443         /* Request a slot reset */
10444         return PCI_ERS_RESULT_NEED_RESET;
10445 }
10446
10447 /**
10448  * bnx2x_io_slot_reset - called after the PCI bus has been reset
10449  * @pdev: Pointer to PCI device
10450  *
10451  * Restart the card from scratch, as if from a cold-boot.
10452  */
10453 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10454 {
10455         struct net_device *dev = pci_get_drvdata(pdev);
10456         struct bnx2x *bp = netdev_priv(dev);
10457
10458         rtnl_lock();
10459
10460         if (pci_enable_device(pdev)) {
10461                 dev_err(&pdev->dev,
10462                         "Cannot re-enable PCI device after reset\n");
10463                 rtnl_unlock();
10464                 return PCI_ERS_RESULT_DISCONNECT;
10465         }
10466
10467         pci_set_master(pdev);
10468         pci_restore_state(pdev);
10469
10470         if (netif_running(dev))
10471                 bnx2x_set_power_state(bp, PCI_D0);
10472
10473         rtnl_unlock();
10474
10475         return PCI_ERS_RESULT_RECOVERED;
10476 }
10477
10478 /**
10479  * bnx2x_io_resume - called when traffic can start flowing again
10480  * @pdev: Pointer to PCI device
10481  *
10482  * This callback is called when the error recovery driver tells us that
10483  * its OK to resume normal operation.
10484  */
10485 static void bnx2x_io_resume(struct pci_dev *pdev)
10486 {
10487         struct net_device *dev = pci_get_drvdata(pdev);
10488         struct bnx2x *bp = netdev_priv(dev);
10489
10490         rtnl_lock();
10491
10492         bnx2x_eeh_recover(bp);
10493
10494         if (netif_running(dev))
10495                 bnx2x_nic_load(bp, LOAD_NORMAL);
10496
10497         netif_device_attach(dev);
10498
10499         rtnl_unlock();
10500 }
10501
10502 static struct pci_error_handlers bnx2x_err_handler = {
10503         .error_detected = bnx2x_io_error_detected,
10504         .slot_reset = bnx2x_io_slot_reset,
10505         .resume = bnx2x_io_resume,
10506 };
10507
10508 static struct pci_driver bnx2x_pci_driver = {
10509         .name        = DRV_MODULE_NAME,
10510         .id_table    = bnx2x_pci_tbl,
10511         .probe       = bnx2x_init_one,
10512         .remove      = __devexit_p(bnx2x_remove_one),
10513         .suspend     = bnx2x_suspend,
10514         .resume      = bnx2x_resume,
10515         .err_handler = &bnx2x_err_handler,
10516 };
10517
10518 static int __init bnx2x_init(void)
10519 {
10520         return pci_register_driver(&bnx2x_pci_driver);
10521 }
10522
10523 static void __exit bnx2x_cleanup(void)
10524 {
10525         pci_unregister_driver(&bnx2x_pci_driver);
10526 }
10527
10528 module_init(bnx2x_init);
10529 module_exit(bnx2x_cleanup);
10530