Merge branch 'master' of /home/davem/src/GIT/linux-2.6/
[linux-2.6] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53 #include "bnx2x_reg.h"
54 #include "bnx2x_fw_defs.h"
55 #include "bnx2x_hsi.h"
56 #include "bnx2x_link.h"
57 #include "bnx2x.h"
58 #include "bnx2x_init.h"
59
60 #define DRV_MODULE_VERSION      "1.45.26"
61 #define DRV_MODULE_RELDATE      "2009/01/26"
62 #define BNX2X_BC_VER            0x040200
63
64 /* Time in jiffies before concluding the transmitter is hung */
65 #define TX_TIMEOUT              (5*HZ)
66
67 static char version[] __devinitdata =
68         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
69         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
71 MODULE_AUTHOR("Eliezer Tamir");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
75
76 static int disable_tpa;
77 static int use_inta;
78 static int poll;
79 static int debug;
80 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
81 static int use_multi;
82
83 module_param(disable_tpa, int, 0);
84 module_param(use_inta, int, 0);
85 module_param(poll, int, 0);
86 module_param(debug, int, 0);
87 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
88 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
89 MODULE_PARM_DESC(poll, "use polling (for debug)");
90 MODULE_PARM_DESC(debug, "default debug msglevel");
91
92 #ifdef BNX2X_MULTI
93 module_param(use_multi, int, 0);
94 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
95 #endif
96 static struct workqueue_struct *bnx2x_wq;
97
98 enum bnx2x_board_type {
99         BCM57710 = 0,
100         BCM57711 = 1,
101         BCM57711E = 2,
102 };
103
104 /* indexed by board_type, above */
105 static struct {
106         char *name;
107 } board_info[] __devinitdata = {
108         { "Broadcom NetXtreme II BCM57710 XGb" },
109         { "Broadcom NetXtreme II BCM57711 XGb" },
110         { "Broadcom NetXtreme II BCM57711E XGb" }
111 };
112
113
114 static const struct pci_device_id bnx2x_pci_tbl[] = {
115         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
117         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
121         { 0 }
122 };
123
124 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
125
126 /****************************************************************************
127 * General service functions
128 ****************************************************************************/
129
130 /* used only at init
131  * locking is done by mcp
132  */
133 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
134 {
135         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138                                PCICFG_VENDOR_ID_OFFSET);
139 }
140
141 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
142 {
143         u32 val;
144
145         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148                                PCICFG_VENDOR_ID_OFFSET);
149
150         return val;
151 }
152
153 static const u32 dmae_reg_go_c[] = {
154         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
158 };
159
160 /* copy command into DMAE command memory and set DMAE command go */
161 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
162                             int idx)
163 {
164         u32 cmd_offset;
165         int i;
166
167         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
170
171                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
173         }
174         REG_WR(bp, dmae_reg_go_c[idx], 1);
175 }
176
177 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
178                       u32 len32)
179 {
180         struct dmae_command *dmae = &bp->init_dmae;
181         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
182         int cnt = 200;
183
184         if (!bp->dmae_ready) {
185                 u32 *data = bnx2x_sp(bp, wb_data[0]);
186
187                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
188                    "  using indirect\n", dst_addr, len32);
189                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
190                 return;
191         }
192
193         mutex_lock(&bp->dmae_mutex);
194
195         memset(dmae, 0, sizeof(struct dmae_command));
196
197         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
200 #ifdef __BIG_ENDIAN
201                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
202 #else
203                         DMAE_CMD_ENDIANITY_DW_SWAP |
204 #endif
205                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
207         dmae->src_addr_lo = U64_LO(dma_addr);
208         dmae->src_addr_hi = U64_HI(dma_addr);
209         dmae->dst_addr_lo = dst_addr >> 2;
210         dmae->dst_addr_hi = 0;
211         dmae->len = len32;
212         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
214         dmae->comp_val = DMAE_COMP_VAL;
215
216         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
217            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
218                     "dst_addr [%x:%08x (%08x)]\n"
219            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
220            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
223         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
224            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
226
227         *wb_comp = 0;
228
229         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
230
231         udelay(5);
232
233         while (*wb_comp != DMAE_COMP_VAL) {
234                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
235
236                 if (!cnt) {
237                         BNX2X_ERR("dmae timeout!\n");
238                         break;
239                 }
240                 cnt--;
241                 /* adjust delay for emulation/FPGA */
242                 if (CHIP_REV_IS_SLOW(bp))
243                         msleep(100);
244                 else
245                         udelay(5);
246         }
247
248         mutex_unlock(&bp->dmae_mutex);
249 }
250
251 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
252 {
253         struct dmae_command *dmae = &bp->init_dmae;
254         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
255         int cnt = 200;
256
257         if (!bp->dmae_ready) {
258                 u32 *data = bnx2x_sp(bp, wb_data[0]);
259                 int i;
260
261                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
262                    "  using indirect\n", src_addr, len32);
263                 for (i = 0; i < len32; i++)
264                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
265                 return;
266         }
267
268         mutex_lock(&bp->dmae_mutex);
269
270         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271         memset(dmae, 0, sizeof(struct dmae_command));
272
273         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
276 #ifdef __BIG_ENDIAN
277                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
278 #else
279                         DMAE_CMD_ENDIANITY_DW_SWAP |
280 #endif
281                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
283         dmae->src_addr_lo = src_addr >> 2;
284         dmae->src_addr_hi = 0;
285         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
287         dmae->len = len32;
288         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
290         dmae->comp_val = DMAE_COMP_VAL;
291
292         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
293            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
294                     "dst_addr [%x:%08x (%08x)]\n"
295            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
296            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
299
300         *wb_comp = 0;
301
302         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
303
304         udelay(5);
305
306         while (*wb_comp != DMAE_COMP_VAL) {
307
308                 if (!cnt) {
309                         BNX2X_ERR("dmae timeout!\n");
310                         break;
311                 }
312                 cnt--;
313                 /* adjust delay for emulation/FPGA */
314                 if (CHIP_REV_IS_SLOW(bp))
315                         msleep(100);
316                 else
317                         udelay(5);
318         }
319         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
320            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
322
323         mutex_unlock(&bp->dmae_mutex);
324 }
325
326 /* used only for slowpath so not inlined */
327 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
328 {
329         u32 wb_write[2];
330
331         wb_write[0] = val_hi;
332         wb_write[1] = val_lo;
333         REG_WR_DMAE(bp, reg, wb_write, 2);
334 }
335
336 #ifdef USE_WB_RD
337 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
338 {
339         u32 wb_data[2];
340
341         REG_RD_DMAE(bp, reg, wb_data, 2);
342
343         return HILO_U64(wb_data[0], wb_data[1]);
344 }
345 #endif
346
347 static int bnx2x_mc_assert(struct bnx2x *bp)
348 {
349         char last_idx;
350         int i, rc = 0;
351         u32 row0, row1, row2, row3;
352
353         /* XSTORM */
354         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
356         if (last_idx)
357                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
358
359         /* print the asserts */
360         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
361
362                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363                               XSTORM_ASSERT_LIST_OFFSET(i));
364                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
370
371                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373                                   " 0x%08x 0x%08x 0x%08x\n",
374                                   i, row3, row2, row1, row0);
375                         rc++;
376                 } else {
377                         break;
378                 }
379         }
380
381         /* TSTORM */
382         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
384         if (last_idx)
385                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
386
387         /* print the asserts */
388         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
389
390                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391                               TSTORM_ASSERT_LIST_OFFSET(i));
392                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
398
399                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401                                   " 0x%08x 0x%08x 0x%08x\n",
402                                   i, row3, row2, row1, row0);
403                         rc++;
404                 } else {
405                         break;
406                 }
407         }
408
409         /* CSTORM */
410         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
412         if (last_idx)
413                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
414
415         /* print the asserts */
416         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
417
418                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419                               CSTORM_ASSERT_LIST_OFFSET(i));
420                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
426
427                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429                                   " 0x%08x 0x%08x 0x%08x\n",
430                                   i, row3, row2, row1, row0);
431                         rc++;
432                 } else {
433                         break;
434                 }
435         }
436
437         /* USTORM */
438         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439                            USTORM_ASSERT_LIST_INDEX_OFFSET);
440         if (last_idx)
441                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
442
443         /* print the asserts */
444         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
445
446                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447                               USTORM_ASSERT_LIST_OFFSET(i));
448                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
450                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
452                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
454
455                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457                                   " 0x%08x 0x%08x 0x%08x\n",
458                                   i, row3, row2, row1, row0);
459                         rc++;
460                 } else {
461                         break;
462                 }
463         }
464
465         return rc;
466 }
467
468 static void bnx2x_fw_dump(struct bnx2x *bp)
469 {
470         u32 mark, offset;
471         u32 data[9];
472         int word;
473
474         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
475         mark = ((mark + 0x3) & ~0x3);
476         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
477
478         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479                 for (word = 0; word < 8; word++)
480                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
481                                                   offset + 4*word));
482                 data[8] = 0x0;
483                 printk(KERN_CONT "%s", (char *)data);
484         }
485         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486                 for (word = 0; word < 8; word++)
487                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
488                                                   offset + 4*word));
489                 data[8] = 0x0;
490                 printk(KERN_CONT "%s", (char *)data);
491         }
492         printk("\n" KERN_ERR PFX "end of fw dump\n");
493 }
494
495 static void bnx2x_panic_dump(struct bnx2x *bp)
496 {
497         int i;
498         u16 j, start, end;
499
500         bp->stats_state = STATS_STATE_DISABLED;
501         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
502
503         BNX2X_ERR("begin crash dump -----------------\n");
504
505         for_each_queue(bp, i) {
506                 struct bnx2x_fastpath *fp = &bp->fp[i];
507                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
508
509                 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
510                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
511                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
512                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
513                 BNX2X_ERR("          rx_bd_prod(%x)  rx_bd_cons(%x)"
514                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
515                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
516                           fp->rx_bd_prod, fp->rx_bd_cons,
517                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519                 BNX2X_ERR("          rx_sge_prod(%x)  last_max_sge(%x)"
520                           "  fp_c_idx(%x)  *sb_c_idx(%x)  fp_u_idx(%x)"
521                           "  *sb_u_idx(%x)  bd data(%x,%x)\n",
522                           fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523                           fp->status_blk->c_status_block.status_block_index,
524                           fp->fp_u_idx,
525                           fp->status_blk->u_status_block.status_block_index,
526                           hw_prods->packets_prod, hw_prods->bds_prod);
527
528                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530                 for (j = start; j < end; j++) {
531                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
532
533                         BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534                                   sw_bd->skb, sw_bd->first_bd);
535                 }
536
537                 start = TX_BD(fp->tx_bd_cons - 10);
538                 end = TX_BD(fp->tx_bd_cons + 254);
539                 for (j = start; j < end; j++) {
540                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
541
542                         BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543                                   j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
544                 }
545
546                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548                 for (j = start; j < end; j++) {
549                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
551
552                         BNX2X_ERR("rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
553                                   j, rx_bd[1], rx_bd[0], sw_bd->skb);
554                 }
555
556                 start = RX_SGE(fp->rx_sge_prod);
557                 end = RX_SGE(fp->last_max_sge);
558                 for (j = start; j < end; j++) {
559                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
561
562                         BNX2X_ERR("rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
563                                   j, rx_sge[1], rx_sge[0], sw_page->page);
564                 }
565
566                 start = RCQ_BD(fp->rx_comp_cons - 10);
567                 end = RCQ_BD(fp->rx_comp_cons + 503);
568                 for (j = start; j < end; j++) {
569                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
570
571                         BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572                                   j, cqe[0], cqe[1], cqe[2], cqe[3]);
573                 }
574         }
575
576         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
577                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
578                   "  spq_prod_idx(%u)\n",
579                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
580                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
581
582         bnx2x_fw_dump(bp);
583         bnx2x_mc_assert(bp);
584         BNX2X_ERR("end crash dump -----------------\n");
585 }
586
587 static void bnx2x_int_enable(struct bnx2x *bp)
588 {
589         int port = BP_PORT(bp);
590         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591         u32 val = REG_RD(bp, addr);
592         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
593
594         if (msix) {
595                 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
596                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
597                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
598         } else {
599                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
600                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
601                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
602                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
603
604                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
605                    val, port, addr, msix);
606
607                 REG_WR(bp, addr, val);
608
609                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
610         }
611
612         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
613            val, port, addr, msix);
614
615         REG_WR(bp, addr, val);
616
617         if (CHIP_IS_E1H(bp)) {
618                 /* init leading/trailing edge */
619                 if (IS_E1HMF(bp)) {
620                         val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
621                         if (bp->port.pmf)
622                                 /* enable nig attention */
623                                 val |= 0x0100;
624                 } else
625                         val = 0xffff;
626
627                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
628                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
629         }
630 }
631
632 static void bnx2x_int_disable(struct bnx2x *bp)
633 {
634         int port = BP_PORT(bp);
635         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
636         u32 val = REG_RD(bp, addr);
637
638         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
639                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
640                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
641                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
642
643         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
644            val, port, addr);
645
646         REG_WR(bp, addr, val);
647         if (REG_RD(bp, addr) != val)
648                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
649 }
650
651 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
652 {
653         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
654         int i;
655
656         /* disable interrupt handling */
657         atomic_inc(&bp->intr_sem);
658         if (disable_hw)
659                 /* prevent the HW from sending interrupts */
660                 bnx2x_int_disable(bp);
661
662         /* make sure all ISRs are done */
663         if (msix) {
664                 for_each_queue(bp, i)
665                         synchronize_irq(bp->msix_table[i].vector);
666
667                 /* one more for the Slow Path IRQ */
668                 synchronize_irq(bp->msix_table[i].vector);
669         } else
670                 synchronize_irq(bp->pdev->irq);
671
672         /* make sure sp_task is not running */
673         cancel_delayed_work(&bp->sp_task);
674         flush_workqueue(bnx2x_wq);
675 }
676
677 /* fast path */
678
679 /*
680  * General service functions
681  */
682
683 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
684                                 u8 storm, u16 index, u8 op, u8 update)
685 {
686         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687                        COMMAND_REG_INT_ACK);
688         struct igu_ack_register igu_ack;
689
690         igu_ack.status_block_index = index;
691         igu_ack.sb_id_and_flags =
692                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
693                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
694                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
696
697         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698            (*(u32 *)&igu_ack), hc_addr);
699         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
700 }
701
702 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
703 {
704         struct host_status_block *fpsb = fp->status_blk;
705         u16 rc = 0;
706
707         barrier(); /* status block is written to by the chip */
708         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
709                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
710                 rc |= 1;
711         }
712         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
713                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
714                 rc |= 2;
715         }
716         return rc;
717 }
718
719 static u16 bnx2x_ack_int(struct bnx2x *bp)
720 {
721         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722                        COMMAND_REG_SIMD_MASK);
723         u32 result = REG_RD(bp, hc_addr);
724
725         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
726            result, hc_addr);
727
728         return result;
729 }
730
731
732 /*
733  * fast path service functions
734  */
735
736 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
737 {
738         u16 tx_cons_sb;
739
740         /* Tell compiler that status block fields can change */
741         barrier();
742         tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
743         return (fp->tx_pkt_cons != tx_cons_sb);
744 }
745
746 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
747 {
748         /* Tell compiler that consumer and producer can change */
749         barrier();
750         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
751
752 }
753
754 /* free skb in the packet ring at pos idx
755  * return idx of last bd freed
756  */
757 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
758                              u16 idx)
759 {
760         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
761         struct eth_tx_bd *tx_bd;
762         struct sk_buff *skb = tx_buf->skb;
763         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
764         int nbd;
765
766         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
767            idx, tx_buf, skb);
768
769         /* unmap first bd */
770         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
771         tx_bd = &fp->tx_desc_ring[bd_idx];
772         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
773                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
774
775         nbd = le16_to_cpu(tx_bd->nbd) - 1;
776         new_cons = nbd + tx_buf->first_bd;
777 #ifdef BNX2X_STOP_ON_ERROR
778         if (nbd > (MAX_SKB_FRAGS + 2)) {
779                 BNX2X_ERR("BAD nbd!\n");
780                 bnx2x_panic();
781         }
782 #endif
783
784         /* Skip a parse bd and the TSO split header bd
785            since they have no mapping */
786         if (nbd)
787                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
788
789         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
790                                            ETH_TX_BD_FLAGS_TCP_CSUM |
791                                            ETH_TX_BD_FLAGS_SW_LSO)) {
792                 if (--nbd)
793                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
794                 tx_bd = &fp->tx_desc_ring[bd_idx];
795                 /* is this a TSO split header bd? */
796                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
797                         if (--nbd)
798                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
799                 }
800         }
801
802         /* now free frags */
803         while (nbd > 0) {
804
805                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
806                 tx_bd = &fp->tx_desc_ring[bd_idx];
807                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
808                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
809                 if (--nbd)
810                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
811         }
812
813         /* release skb */
814         WARN_ON(!skb);
815         dev_kfree_skb(skb);
816         tx_buf->first_bd = 0;
817         tx_buf->skb = NULL;
818
819         return new_cons;
820 }
821
822 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
823 {
824         s16 used;
825         u16 prod;
826         u16 cons;
827
828         barrier(); /* Tell compiler that prod and cons can change */
829         prod = fp->tx_bd_prod;
830         cons = fp->tx_bd_cons;
831
832         /* NUM_TX_RINGS = number of "next-page" entries
833            It will be used as a threshold */
834         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
835
836 #ifdef BNX2X_STOP_ON_ERROR
837         WARN_ON(used < 0);
838         WARN_ON(used > fp->bp->tx_ring_size);
839         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
840 #endif
841
842         return (s16)(fp->bp->tx_ring_size) - used;
843 }
844
845 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
846 {
847         struct bnx2x *bp = fp->bp;
848         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
849         int done = 0;
850
851 #ifdef BNX2X_STOP_ON_ERROR
852         if (unlikely(bp->panic))
853                 return;
854 #endif
855
856         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
857         sw_cons = fp->tx_pkt_cons;
858
859         while (sw_cons != hw_cons) {
860                 u16 pkt_cons;
861
862                 pkt_cons = TX_BD(sw_cons);
863
864                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
865
866                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
867                    hw_cons, sw_cons, pkt_cons);
868
869 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
870                         rmb();
871                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
872                 }
873 */
874                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
875                 sw_cons++;
876                 done++;
877
878                 if (done == work)
879                         break;
880         }
881
882         fp->tx_pkt_cons = sw_cons;
883         fp->tx_bd_cons = bd_cons;
884
885         /* Need to make the tx_cons update visible to start_xmit()
886          * before checking for netif_queue_stopped().  Without the
887          * memory barrier, there is a small possibility that start_xmit()
888          * will miss it and cause the queue to be stopped forever.
889          */
890         smp_mb();
891
892         /* TBD need a thresh? */
893         if (unlikely(netif_queue_stopped(bp->dev))) {
894
895                 netif_tx_lock(bp->dev);
896
897                 if (netif_queue_stopped(bp->dev) &&
898                     (bp->state == BNX2X_STATE_OPEN) &&
899                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
900                         netif_wake_queue(bp->dev);
901
902                 netif_tx_unlock(bp->dev);
903         }
904 }
905
906
907 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
908                            union eth_rx_cqe *rr_cqe)
909 {
910         struct bnx2x *bp = fp->bp;
911         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
912         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
913
914         DP(BNX2X_MSG_SP,
915            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
916            FP_IDX(fp), cid, command, bp->state,
917            rr_cqe->ramrod_cqe.ramrod_type);
918
919         bp->spq_left++;
920
921         if (FP_IDX(fp)) {
922                 switch (command | fp->state) {
923                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
924                                                 BNX2X_FP_STATE_OPENING):
925                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
926                            cid);
927                         fp->state = BNX2X_FP_STATE_OPEN;
928                         break;
929
930                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
931                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
932                            cid);
933                         fp->state = BNX2X_FP_STATE_HALTED;
934                         break;
935
936                 default:
937                         BNX2X_ERR("unexpected MC reply (%d)  "
938                                   "fp->state is %x\n", command, fp->state);
939                         break;
940                 }
941                 mb(); /* force bnx2x_wait_ramrod() to see the change */
942                 return;
943         }
944
945         switch (command | bp->state) {
946         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
947                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
948                 bp->state = BNX2X_STATE_OPEN;
949                 break;
950
951         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
952                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
953                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
954                 fp->state = BNX2X_FP_STATE_HALTED;
955                 break;
956
957         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
958                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
959                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
960                 break;
961
962
963         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
964         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
965                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
966                 bp->set_mac_pending = 0;
967                 break;
968
969         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
970                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
971                 break;
972
973         default:
974                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
975                           command, bp->state);
976                 break;
977         }
978         mb(); /* force bnx2x_wait_ramrod() to see the change */
979 }
980
981 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
982                                      struct bnx2x_fastpath *fp, u16 index)
983 {
984         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
985         struct page *page = sw_buf->page;
986         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
987
988         /* Skip "next page" elements */
989         if (!page)
990                 return;
991
992         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
993                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
994         __free_pages(page, PAGES_PER_SGE_SHIFT);
995
996         sw_buf->page = NULL;
997         sge->addr_hi = 0;
998         sge->addr_lo = 0;
999 }
1000
1001 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1002                                            struct bnx2x_fastpath *fp, int last)
1003 {
1004         int i;
1005
1006         for (i = 0; i < last; i++)
1007                 bnx2x_free_rx_sge(bp, fp, i);
1008 }
1009
1010 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1011                                      struct bnx2x_fastpath *fp, u16 index)
1012 {
1013         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1014         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1015         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1016         dma_addr_t mapping;
1017
1018         if (unlikely(page == NULL))
1019                 return -ENOMEM;
1020
1021         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1022                                PCI_DMA_FROMDEVICE);
1023         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1024                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1025                 return -ENOMEM;
1026         }
1027
1028         sw_buf->page = page;
1029         pci_unmap_addr_set(sw_buf, mapping, mapping);
1030
1031         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1032         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1033
1034         return 0;
1035 }
1036
1037 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1038                                      struct bnx2x_fastpath *fp, u16 index)
1039 {
1040         struct sk_buff *skb;
1041         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1042         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1043         dma_addr_t mapping;
1044
1045         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1046         if (unlikely(skb == NULL))
1047                 return -ENOMEM;
1048
1049         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1050                                  PCI_DMA_FROMDEVICE);
1051         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1052                 dev_kfree_skb(skb);
1053                 return -ENOMEM;
1054         }
1055
1056         rx_buf->skb = skb;
1057         pci_unmap_addr_set(rx_buf, mapping, mapping);
1058
1059         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1060         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1061
1062         return 0;
1063 }
1064
1065 /* note that we are not allocating a new skb,
1066  * we are just moving one from cons to prod
1067  * we are not creating a new mapping,
1068  * so there is no need to check for dma_mapping_error().
1069  */
1070 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1071                                struct sk_buff *skb, u16 cons, u16 prod)
1072 {
1073         struct bnx2x *bp = fp->bp;
1074         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1075         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1076         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1077         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1078
1079         pci_dma_sync_single_for_device(bp->pdev,
1080                                        pci_unmap_addr(cons_rx_buf, mapping),
1081                                        bp->rx_offset + RX_COPY_THRESH,
1082                                        PCI_DMA_FROMDEVICE);
1083
1084         prod_rx_buf->skb = cons_rx_buf->skb;
1085         pci_unmap_addr_set(prod_rx_buf, mapping,
1086                            pci_unmap_addr(cons_rx_buf, mapping));
1087         *prod_bd = *cons_bd;
1088 }
1089
1090 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1091                                              u16 idx)
1092 {
1093         u16 last_max = fp->last_max_sge;
1094
1095         if (SUB_S16(idx, last_max) > 0)
1096                 fp->last_max_sge = idx;
1097 }
1098
1099 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1100 {
1101         int i, j;
1102
1103         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1104                 int idx = RX_SGE_CNT * i - 1;
1105
1106                 for (j = 0; j < 2; j++) {
1107                         SGE_MASK_CLEAR_BIT(fp, idx);
1108                         idx--;
1109                 }
1110         }
1111 }
1112
1113 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1114                                   struct eth_fast_path_rx_cqe *fp_cqe)
1115 {
1116         struct bnx2x *bp = fp->bp;
1117         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1118                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1119                       SGE_PAGE_SHIFT;
1120         u16 last_max, last_elem, first_elem;
1121         u16 delta = 0;
1122         u16 i;
1123
1124         if (!sge_len)
1125                 return;
1126
1127         /* First mark all used pages */
1128         for (i = 0; i < sge_len; i++)
1129                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1130
1131         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1132            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1133
1134         /* Here we assume that the last SGE index is the biggest */
1135         prefetch((void *)(fp->sge_mask));
1136         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1137
1138         last_max = RX_SGE(fp->last_max_sge);
1139         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1140         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1141
1142         /* If ring is not full */
1143         if (last_elem + 1 != first_elem)
1144                 last_elem++;
1145
1146         /* Now update the prod */
1147         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1148                 if (likely(fp->sge_mask[i]))
1149                         break;
1150
1151                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1152                 delta += RX_SGE_MASK_ELEM_SZ;
1153         }
1154
1155         if (delta > 0) {
1156                 fp->rx_sge_prod += delta;
1157                 /* clear page-end entries */
1158                 bnx2x_clear_sge_mask_next_elems(fp);
1159         }
1160
1161         DP(NETIF_MSG_RX_STATUS,
1162            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1163            fp->last_max_sge, fp->rx_sge_prod);
1164 }
1165
1166 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1167 {
1168         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1169         memset(fp->sge_mask, 0xff,
1170                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1171
1172         /* Clear the two last indices in the page to 1:
1173            these are the indices that correspond to the "next" element,
1174            hence will never be indicated and should be removed from
1175            the calculations. */
1176         bnx2x_clear_sge_mask_next_elems(fp);
1177 }
1178
1179 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1180                             struct sk_buff *skb, u16 cons, u16 prod)
1181 {
1182         struct bnx2x *bp = fp->bp;
1183         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1184         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1185         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1186         dma_addr_t mapping;
1187
1188         /* move empty skb from pool to prod and map it */
1189         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1190         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1191                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1192         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1193
1194         /* move partial skb from cons to pool (don't unmap yet) */
1195         fp->tpa_pool[queue] = *cons_rx_buf;
1196
1197         /* mark bin state as start - print error if current state != stop */
1198         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1199                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1200
1201         fp->tpa_state[queue] = BNX2X_TPA_START;
1202
1203         /* point prod_bd to new skb */
1204         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1205         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1206
1207 #ifdef BNX2X_STOP_ON_ERROR
1208         fp->tpa_queue_used |= (1 << queue);
1209 #ifdef __powerpc64__
1210         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1211 #else
1212         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1213 #endif
1214            fp->tpa_queue_used);
1215 #endif
1216 }
1217
1218 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1219                                struct sk_buff *skb,
1220                                struct eth_fast_path_rx_cqe *fp_cqe,
1221                                u16 cqe_idx)
1222 {
1223         struct sw_rx_page *rx_pg, old_rx_pg;
1224         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1225         u32 i, frag_len, frag_size, pages;
1226         int err;
1227         int j;
1228
1229         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1230         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1231
1232         /* This is needed in order to enable forwarding support */
1233         if (frag_size)
1234                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1235                                                max(frag_size, (u32)len_on_bd));
1236
1237 #ifdef BNX2X_STOP_ON_ERROR
1238         if (pages >
1239             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1240                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1241                           pages, cqe_idx);
1242                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1243                           fp_cqe->pkt_len, len_on_bd);
1244                 bnx2x_panic();
1245                 return -EINVAL;
1246         }
1247 #endif
1248
1249         /* Run through the SGL and compose the fragmented skb */
1250         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1251                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1252
1253                 /* FW gives the indices of the SGE as if the ring is an array
1254                    (meaning that "next" element will consume 2 indices) */
1255                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1256                 rx_pg = &fp->rx_page_ring[sge_idx];
1257                 old_rx_pg = *rx_pg;
1258
1259                 /* If we fail to allocate a substitute page, we simply stop
1260                    where we are and drop the whole packet */
1261                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1262                 if (unlikely(err)) {
1263                         bp->eth_stats.rx_skb_alloc_failed++;
1264                         return err;
1265                 }
1266
1267                 /* Unmap the page as we r going to pass it to the stack */
1268                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1269                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1270
1271                 /* Add one frag and update the appropriate fields in the skb */
1272                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1273
1274                 skb->data_len += frag_len;
1275                 skb->truesize += frag_len;
1276                 skb->len += frag_len;
1277
1278                 frag_size -= frag_len;
1279         }
1280
1281         return 0;
1282 }
1283
1284 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1285                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1286                            u16 cqe_idx)
1287 {
1288         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1289         struct sk_buff *skb = rx_buf->skb;
1290         /* alloc new skb */
1291         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1292
1293         /* Unmap skb in the pool anyway, as we are going to change
1294            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1295            fails. */
1296         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1297                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1298
1299         if (likely(new_skb)) {
1300                 /* fix ip xsum and give it to the stack */
1301                 /* (no need to map the new skb) */
1302 #ifdef BCM_VLAN
1303                 int is_vlan_cqe =
1304                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1305                          PARSING_FLAGS_VLAN);
1306                 int is_not_hwaccel_vlan_cqe =
1307                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1308 #endif
1309
1310                 prefetch(skb);
1311                 prefetch(((char *)(skb)) + 128);
1312
1313 #ifdef BNX2X_STOP_ON_ERROR
1314                 if (pad + len > bp->rx_buf_size) {
1315                         BNX2X_ERR("skb_put is about to fail...  "
1316                                   "pad %d  len %d  rx_buf_size %d\n",
1317                                   pad, len, bp->rx_buf_size);
1318                         bnx2x_panic();
1319                         return;
1320                 }
1321 #endif
1322
1323                 skb_reserve(skb, pad);
1324                 skb_put(skb, len);
1325
1326                 skb->protocol = eth_type_trans(skb, bp->dev);
1327                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1328                 skb_record_rx_queue(skb, queue);
1329
1330                 {
1331                         struct iphdr *iph;
1332
1333                         iph = (struct iphdr *)skb->data;
1334 #ifdef BCM_VLAN
1335                         /* If there is no Rx VLAN offloading -
1336                            take VLAN tag into an account */
1337                         if (unlikely(is_not_hwaccel_vlan_cqe))
1338                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1339 #endif
1340                         iph->check = 0;
1341                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1342                 }
1343
1344                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1345                                          &cqe->fast_path_cqe, cqe_idx)) {
1346 #ifdef BCM_VLAN
1347                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1348                             (!is_not_hwaccel_vlan_cqe))
1349                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1350                                                 le16_to_cpu(cqe->fast_path_cqe.
1351                                                             vlan_tag));
1352                         else
1353 #endif
1354                                 netif_receive_skb(skb);
1355                 } else {
1356                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1357                            " - dropping packet!\n");
1358                         dev_kfree_skb(skb);
1359                 }
1360
1361
1362                 /* put new skb in bin */
1363                 fp->tpa_pool[queue].skb = new_skb;
1364
1365         } else {
1366                 /* else drop the packet and keep the buffer in the bin */
1367                 DP(NETIF_MSG_RX_STATUS,
1368                    "Failed to allocate new skb - dropping packet!\n");
1369                 bp->eth_stats.rx_skb_alloc_failed++;
1370         }
1371
1372         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1373 }
1374
1375 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1376                                         struct bnx2x_fastpath *fp,
1377                                         u16 bd_prod, u16 rx_comp_prod,
1378                                         u16 rx_sge_prod)
1379 {
1380         struct tstorm_eth_rx_producers rx_prods = {0};
1381         int i;
1382
1383         /* Update producers */
1384         rx_prods.bd_prod = bd_prod;
1385         rx_prods.cqe_prod = rx_comp_prod;
1386         rx_prods.sge_prod = rx_sge_prod;
1387
1388         /*
1389          * Make sure that the BD and SGE data is updated before updating the
1390          * producers since FW might read the BD/SGE right after the producer
1391          * is updated.
1392          * This is only applicable for weak-ordered memory model archs such
1393          * as IA-64. The following barrier is also mandatory since FW will
1394          * assumes BDs must have buffers.
1395          */
1396         wmb();
1397
1398         for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1399                 REG_WR(bp, BAR_TSTRORM_INTMEM +
1400                        TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1401                        ((u32 *)&rx_prods)[i]);
1402
1403         mmiowb(); /* keep prod updates ordered */
1404
1405         DP(NETIF_MSG_RX_STATUS,
1406            "Wrote: bd_prod %u  cqe_prod %u  sge_prod %u\n",
1407            bd_prod, rx_comp_prod, rx_sge_prod);
1408 }
1409
1410 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1411 {
1412         struct bnx2x *bp = fp->bp;
1413         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1414         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1415         int rx_pkt = 0;
1416
1417 #ifdef BNX2X_STOP_ON_ERROR
1418         if (unlikely(bp->panic))
1419                 return 0;
1420 #endif
1421
1422         /* CQ "next element" is of the size of the regular element,
1423            that's why it's ok here */
1424         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1425         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1426                 hw_comp_cons++;
1427
1428         bd_cons = fp->rx_bd_cons;
1429         bd_prod = fp->rx_bd_prod;
1430         bd_prod_fw = bd_prod;
1431         sw_comp_cons = fp->rx_comp_cons;
1432         sw_comp_prod = fp->rx_comp_prod;
1433
1434         /* Memory barrier necessary as speculative reads of the rx
1435          * buffer can be ahead of the index in the status block
1436          */
1437         rmb();
1438
1439         DP(NETIF_MSG_RX_STATUS,
1440            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1441            FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1442
1443         while (sw_comp_cons != hw_comp_cons) {
1444                 struct sw_rx_bd *rx_buf = NULL;
1445                 struct sk_buff *skb;
1446                 union eth_rx_cqe *cqe;
1447                 u8 cqe_fp_flags;
1448                 u16 len, pad;
1449
1450                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1451                 bd_prod = RX_BD(bd_prod);
1452                 bd_cons = RX_BD(bd_cons);
1453
1454                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1455                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1456
1457                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1458                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1459                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1460                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1461                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1462                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1463
1464                 /* is this a slowpath msg? */
1465                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1466                         bnx2x_sp_event(fp, cqe);
1467                         goto next_cqe;
1468
1469                 /* this is an rx packet */
1470                 } else {
1471                         rx_buf = &fp->rx_buf_ring[bd_cons];
1472                         skb = rx_buf->skb;
1473                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1474                         pad = cqe->fast_path_cqe.placement_offset;
1475
1476                         /* If CQE is marked both TPA_START and TPA_END
1477                            it is a non-TPA CQE */
1478                         if ((!fp->disable_tpa) &&
1479                             (TPA_TYPE(cqe_fp_flags) !=
1480                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1481                                 u16 queue = cqe->fast_path_cqe.queue_index;
1482
1483                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1484                                         DP(NETIF_MSG_RX_STATUS,
1485                                            "calling tpa_start on queue %d\n",
1486                                            queue);
1487
1488                                         bnx2x_tpa_start(fp, queue, skb,
1489                                                         bd_cons, bd_prod);
1490                                         goto next_rx;
1491                                 }
1492
1493                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1494                                         DP(NETIF_MSG_RX_STATUS,
1495                                            "calling tpa_stop on queue %d\n",
1496                                            queue);
1497
1498                                         if (!BNX2X_RX_SUM_FIX(cqe))
1499                                                 BNX2X_ERR("STOP on none TCP "
1500                                                           "data\n");
1501
1502                                         /* This is a size of the linear data
1503                                            on this skb */
1504                                         len = le16_to_cpu(cqe->fast_path_cqe.
1505                                                                 len_on_bd);
1506                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1507                                                     len, cqe, comp_ring_cons);
1508 #ifdef BNX2X_STOP_ON_ERROR
1509                                         if (bp->panic)
1510                                                 return -EINVAL;
1511 #endif
1512
1513                                         bnx2x_update_sge_prod(fp,
1514                                                         &cqe->fast_path_cqe);
1515                                         goto next_cqe;
1516                                 }
1517                         }
1518
1519                         pci_dma_sync_single_for_device(bp->pdev,
1520                                         pci_unmap_addr(rx_buf, mapping),
1521                                                        pad + RX_COPY_THRESH,
1522                                                        PCI_DMA_FROMDEVICE);
1523                         prefetch(skb);
1524                         prefetch(((char *)(skb)) + 128);
1525
1526                         /* is this an error packet? */
1527                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1528                                 DP(NETIF_MSG_RX_ERR,
1529                                    "ERROR  flags %x  rx packet %u\n",
1530                                    cqe_fp_flags, sw_comp_cons);
1531                                 bp->eth_stats.rx_err_discard_pkt++;
1532                                 goto reuse_rx;
1533                         }
1534
1535                         /* Since we don't have a jumbo ring
1536                          * copy small packets if mtu > 1500
1537                          */
1538                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1539                             (len <= RX_COPY_THRESH)) {
1540                                 struct sk_buff *new_skb;
1541
1542                                 new_skb = netdev_alloc_skb(bp->dev,
1543                                                            len + pad);
1544                                 if (new_skb == NULL) {
1545                                         DP(NETIF_MSG_RX_ERR,
1546                                            "ERROR  packet dropped "
1547                                            "because of alloc failure\n");
1548                                         bp->eth_stats.rx_skb_alloc_failed++;
1549                                         goto reuse_rx;
1550                                 }
1551
1552                                 /* aligned copy */
1553                                 skb_copy_from_linear_data_offset(skb, pad,
1554                                                     new_skb->data + pad, len);
1555                                 skb_reserve(new_skb, pad);
1556                                 skb_put(new_skb, len);
1557
1558                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1559
1560                                 skb = new_skb;
1561
1562                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1563                                 pci_unmap_single(bp->pdev,
1564                                         pci_unmap_addr(rx_buf, mapping),
1565                                                  bp->rx_buf_size,
1566                                                  PCI_DMA_FROMDEVICE);
1567                                 skb_reserve(skb, pad);
1568                                 skb_put(skb, len);
1569
1570                         } else {
1571                                 DP(NETIF_MSG_RX_ERR,
1572                                    "ERROR  packet dropped because "
1573                                    "of alloc failure\n");
1574                                 bp->eth_stats.rx_skb_alloc_failed++;
1575 reuse_rx:
1576                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1577                                 goto next_rx;
1578                         }
1579
1580                         skb->protocol = eth_type_trans(skb, bp->dev);
1581
1582                         skb->ip_summed = CHECKSUM_NONE;
1583                         if (bp->rx_csum) {
1584                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1585                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1586                                 else
1587                                         bp->eth_stats.hw_csum_err++;
1588                         }
1589                 }
1590
1591 #ifdef BCM_VLAN
1592                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1593                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1594                      PARSING_FLAGS_VLAN))
1595                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1596                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1597                 else
1598 #endif
1599                         netif_receive_skb(skb);
1600
1601
1602 next_rx:
1603                 rx_buf->skb = NULL;
1604
1605                 bd_cons = NEXT_RX_IDX(bd_cons);
1606                 bd_prod = NEXT_RX_IDX(bd_prod);
1607                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1608                 rx_pkt++;
1609 next_cqe:
1610                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1611                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1612
1613                 if (rx_pkt == budget)
1614                         break;
1615         } /* while */
1616
1617         fp->rx_bd_cons = bd_cons;
1618         fp->rx_bd_prod = bd_prod_fw;
1619         fp->rx_comp_cons = sw_comp_cons;
1620         fp->rx_comp_prod = sw_comp_prod;
1621
1622         /* Update producers */
1623         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1624                              fp->rx_sge_prod);
1625
1626         fp->rx_pkt += rx_pkt;
1627         fp->rx_calls++;
1628
1629         return rx_pkt;
1630 }
1631
1632 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1633 {
1634         struct bnx2x_fastpath *fp = fp_cookie;
1635         struct bnx2x *bp = fp->bp;
1636         int index = FP_IDX(fp);
1637
1638         /* Return here if interrupt is disabled */
1639         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1640                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1641                 return IRQ_HANDLED;
1642         }
1643
1644         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1645            index, FP_SB_ID(fp));
1646         bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1647
1648 #ifdef BNX2X_STOP_ON_ERROR
1649         if (unlikely(bp->panic))
1650                 return IRQ_HANDLED;
1651 #endif
1652
1653         prefetch(fp->rx_cons_sb);
1654         prefetch(fp->tx_cons_sb);
1655         prefetch(&fp->status_blk->c_status_block.status_block_index);
1656         prefetch(&fp->status_blk->u_status_block.status_block_index);
1657
1658         napi_schedule(&bnx2x_fp(bp, index, napi));
1659
1660         return IRQ_HANDLED;
1661 }
1662
1663 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1664 {
1665         struct net_device *dev = dev_instance;
1666         struct bnx2x *bp = netdev_priv(dev);
1667         u16 status = bnx2x_ack_int(bp);
1668         u16 mask;
1669
1670         /* Return here if interrupt is shared and it's not for us */
1671         if (unlikely(status == 0)) {
1672                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1673                 return IRQ_NONE;
1674         }
1675         DP(NETIF_MSG_INTR, "got an interrupt  status %u\n", status);
1676
1677         /* Return here if interrupt is disabled */
1678         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1679                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1680                 return IRQ_HANDLED;
1681         }
1682
1683 #ifdef BNX2X_STOP_ON_ERROR
1684         if (unlikely(bp->panic))
1685                 return IRQ_HANDLED;
1686 #endif
1687
1688         mask = 0x2 << bp->fp[0].sb_id;
1689         if (status & mask) {
1690                 struct bnx2x_fastpath *fp = &bp->fp[0];
1691
1692                 prefetch(fp->rx_cons_sb);
1693                 prefetch(fp->tx_cons_sb);
1694                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1695                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1696
1697                 napi_schedule(&bnx2x_fp(bp, 0, napi));
1698
1699                 status &= ~mask;
1700         }
1701
1702
1703         if (unlikely(status & 0x1)) {
1704                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1705
1706                 status &= ~0x1;
1707                 if (!status)
1708                         return IRQ_HANDLED;
1709         }
1710
1711         if (status)
1712                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1713                    status);
1714
1715         return IRQ_HANDLED;
1716 }
1717
1718 /* end of fast path */
1719
1720 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1721
1722 /* Link */
1723
1724 /*
1725  * General service functions
1726  */
1727
1728 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1729 {
1730         u32 lock_status;
1731         u32 resource_bit = (1 << resource);
1732         int func = BP_FUNC(bp);
1733         u32 hw_lock_control_reg;
1734         int cnt;
1735
1736         /* Validating that the resource is within range */
1737         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1738                 DP(NETIF_MSG_HW,
1739                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1740                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1741                 return -EINVAL;
1742         }
1743
1744         if (func <= 5) {
1745                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1746         } else {
1747                 hw_lock_control_reg =
1748                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1749         }
1750
1751         /* Validating that the resource is not already taken */
1752         lock_status = REG_RD(bp, hw_lock_control_reg);
1753         if (lock_status & resource_bit) {
1754                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1755                    lock_status, resource_bit);
1756                 return -EEXIST;
1757         }
1758
1759         /* Try for 5 second every 5ms */
1760         for (cnt = 0; cnt < 1000; cnt++) {
1761                 /* Try to acquire the lock */
1762                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1763                 lock_status = REG_RD(bp, hw_lock_control_reg);
1764                 if (lock_status & resource_bit)
1765                         return 0;
1766
1767                 msleep(5);
1768         }
1769         DP(NETIF_MSG_HW, "Timeout\n");
1770         return -EAGAIN;
1771 }
1772
1773 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1774 {
1775         u32 lock_status;
1776         u32 resource_bit = (1 << resource);
1777         int func = BP_FUNC(bp);
1778         u32 hw_lock_control_reg;
1779
1780         /* Validating that the resource is within range */
1781         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1782                 DP(NETIF_MSG_HW,
1783                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1784                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1785                 return -EINVAL;
1786         }
1787
1788         if (func <= 5) {
1789                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1790         } else {
1791                 hw_lock_control_reg =
1792                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1793         }
1794
1795         /* Validating that the resource is currently taken */
1796         lock_status = REG_RD(bp, hw_lock_control_reg);
1797         if (!(lock_status & resource_bit)) {
1798                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1799                    lock_status, resource_bit);
1800                 return -EFAULT;
1801         }
1802
1803         REG_WR(bp, hw_lock_control_reg, resource_bit);
1804         return 0;
1805 }
1806
1807 /* HW Lock for shared dual port PHYs */
1808 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1809 {
1810         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1811
1812         mutex_lock(&bp->port.phy_mutex);
1813
1814         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1815             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1816                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1817 }
1818
1819 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1820 {
1821         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1822
1823         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1824             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1825                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1826
1827         mutex_unlock(&bp->port.phy_mutex);
1828 }
1829
1830 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1831 {
1832         /* The GPIO should be swapped if swap register is set and active */
1833         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1834                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1835         int gpio_shift = gpio_num +
1836                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1837         u32 gpio_mask = (1 << gpio_shift);
1838         u32 gpio_reg;
1839
1840         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1841                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1842                 return -EINVAL;
1843         }
1844
1845         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1846         /* read GPIO and mask except the float bits */
1847         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1848
1849         switch (mode) {
1850         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1851                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1852                    gpio_num, gpio_shift);
1853                 /* clear FLOAT and set CLR */
1854                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1855                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1856                 break;
1857
1858         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1859                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1860                    gpio_num, gpio_shift);
1861                 /* clear FLOAT and set SET */
1862                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1863                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1864                 break;
1865
1866         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1867                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1868                    gpio_num, gpio_shift);
1869                 /* set FLOAT */
1870                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1871                 break;
1872
1873         default:
1874                 break;
1875         }
1876
1877         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1878         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1879
1880         return 0;
1881 }
1882
1883 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1884 {
1885         u32 spio_mask = (1 << spio_num);
1886         u32 spio_reg;
1887
1888         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1889             (spio_num > MISC_REGISTERS_SPIO_7)) {
1890                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1891                 return -EINVAL;
1892         }
1893
1894         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1895         /* read SPIO and mask except the float bits */
1896         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1897
1898         switch (mode) {
1899         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1900                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1901                 /* clear FLOAT and set CLR */
1902                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1903                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1904                 break;
1905
1906         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1907                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1908                 /* clear FLOAT and set SET */
1909                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1910                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1911                 break;
1912
1913         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1914                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1915                 /* set FLOAT */
1916                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1917                 break;
1918
1919         default:
1920                 break;
1921         }
1922
1923         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1924         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1925
1926         return 0;
1927 }
1928
1929 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1930 {
1931         switch (bp->link_vars.ieee_fc &
1932                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1933         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1934                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1935                                           ADVERTISED_Pause);
1936                 break;
1937         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1938                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1939                                          ADVERTISED_Pause);
1940                 break;
1941         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1942                 bp->port.advertising |= ADVERTISED_Asym_Pause;
1943                 break;
1944         default:
1945                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1946                                           ADVERTISED_Pause);
1947                 break;
1948         }
1949 }
1950
1951 static void bnx2x_link_report(struct bnx2x *bp)
1952 {
1953         if (bp->link_vars.link_up) {
1954                 if (bp->state == BNX2X_STATE_OPEN)
1955                         netif_carrier_on(bp->dev);
1956                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1957
1958                 printk("%d Mbps ", bp->link_vars.line_speed);
1959
1960                 if (bp->link_vars.duplex == DUPLEX_FULL)
1961                         printk("full duplex");
1962                 else
1963                         printk("half duplex");
1964
1965                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1966                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
1967                                 printk(", receive ");
1968                                 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1969                                         printk("& transmit ");
1970                         } else {
1971                                 printk(", transmit ");
1972                         }
1973                         printk("flow control ON");
1974                 }
1975                 printk("\n");
1976
1977         } else { /* link_down */
1978                 netif_carrier_off(bp->dev);
1979                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1980         }
1981 }
1982
1983 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1984 {
1985         if (!BP_NOMCP(bp)) {
1986                 u8 rc;
1987
1988                 /* Initialize link parameters structure variables */
1989                 /* It is recommended to turn off RX FC for jumbo frames
1990                    for better performance */
1991                 if (IS_E1HMF(bp))
1992                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1993                 else if (bp->dev->mtu > 5000)
1994                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1995                 else
1996                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1997
1998                 bnx2x_acquire_phy_lock(bp);
1999                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2000                 bnx2x_release_phy_lock(bp);
2001
2002                 bnx2x_calc_fc_adv(bp);
2003
2004                 if (bp->link_vars.link_up)
2005                         bnx2x_link_report(bp);
2006
2007
2008                 return rc;
2009         }
2010         BNX2X_ERR("Bootcode is missing -not initializing link\n");
2011         return -EINVAL;
2012 }
2013
2014 static void bnx2x_link_set(struct bnx2x *bp)
2015 {
2016         if (!BP_NOMCP(bp)) {
2017                 bnx2x_acquire_phy_lock(bp);
2018                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2019                 bnx2x_release_phy_lock(bp);
2020
2021                 bnx2x_calc_fc_adv(bp);
2022         } else
2023                 BNX2X_ERR("Bootcode is missing -not setting link\n");
2024 }
2025
2026 static void bnx2x__link_reset(struct bnx2x *bp)
2027 {
2028         if (!BP_NOMCP(bp)) {
2029                 bnx2x_acquire_phy_lock(bp);
2030                 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
2031                 bnx2x_release_phy_lock(bp);
2032         } else
2033                 BNX2X_ERR("Bootcode is missing -not resetting link\n");
2034 }
2035
2036 static u8 bnx2x_link_test(struct bnx2x *bp)
2037 {
2038         u8 rc;
2039
2040         bnx2x_acquire_phy_lock(bp);
2041         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2042         bnx2x_release_phy_lock(bp);
2043
2044         return rc;
2045 }
2046
2047 /* Calculates the sum of vn_min_rates.
2048    It's needed for further normalizing of the min_rates.
2049
2050    Returns:
2051      sum of vn_min_rates
2052        or
2053      0 - if all the min_rates are 0.
2054      In the later case fairness algorithm should be deactivated.
2055      If not all min_rates are zero then those that are zeroes will
2056      be set to 1.
2057  */
2058 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2059 {
2060         int i, port = BP_PORT(bp);
2061         u32 wsum = 0;
2062         int all_zero = 1;
2063
2064         for (i = 0; i < E1HVN_MAX; i++) {
2065                 u32 vn_cfg =
2066                         SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2067                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2068                                      FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2069                 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2070                         /* If min rate is zero - set it to 1 */
2071                         if (!vn_min_rate)
2072                                 vn_min_rate = DEF_MIN_RATE;
2073                         else
2074                                 all_zero = 0;
2075
2076                         wsum += vn_min_rate;
2077                 }
2078         }
2079
2080         /* ... only if all min rates are zeros - disable FAIRNESS */
2081         if (all_zero)
2082                 return 0;
2083
2084         return wsum;
2085 }
2086
2087 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2088                                    int en_fness,
2089                                    u16 port_rate,
2090                                    struct cmng_struct_per_port *m_cmng_port)
2091 {
2092         u32 r_param = port_rate / 8;
2093         int port = BP_PORT(bp);
2094         int i;
2095
2096         memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2097
2098         /* Enable minmax only if we are in e1hmf mode */
2099         if (IS_E1HMF(bp)) {
2100                 u32 fair_periodic_timeout_usec;
2101                 u32 t_fair;
2102
2103                 /* Enable rate shaping and fairness */
2104                 m_cmng_port->flags.cmng_vn_enable = 1;
2105                 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2106                 m_cmng_port->flags.rate_shaping_enable = 1;
2107
2108                 if (!en_fness)
2109                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2110                            "  fairness will be disabled\n");
2111
2112                 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2113                 m_cmng_port->rs_vars.rs_periodic_timeout =
2114                                                 RS_PERIODIC_TIMEOUT_USEC / 4;
2115
2116                 /* this is the threshold below which no timer arming will occur
2117                    1.25 coefficient is for the threshold to be a little bigger
2118                    than the real time, to compensate for timer in-accuracy */
2119                 m_cmng_port->rs_vars.rs_threshold =
2120                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2121
2122                 /* resolution of fairness timer */
2123                 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2124                 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2125                 t_fair = T_FAIR_COEF / port_rate;
2126
2127                 /* this is the threshold below which we won't arm
2128                    the timer anymore */
2129                 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2130
2131                 /* we multiply by 1e3/8 to get bytes/msec.
2132                    We don't want the credits to pass a credit
2133                    of the T_FAIR*FAIR_MEM (algorithm resolution) */
2134                 m_cmng_port->fair_vars.upper_bound =
2135                                                 r_param * t_fair * FAIR_MEM;
2136                 /* since each tick is 4 usec */
2137                 m_cmng_port->fair_vars.fairness_timeout =
2138                                                 fair_periodic_timeout_usec / 4;
2139
2140         } else {
2141                 /* Disable rate shaping and fairness */
2142                 m_cmng_port->flags.cmng_vn_enable = 0;
2143                 m_cmng_port->flags.fairness_enable = 0;
2144                 m_cmng_port->flags.rate_shaping_enable = 0;
2145
2146                 DP(NETIF_MSG_IFUP,
2147                    "Single function mode  minmax will be disabled\n");
2148         }
2149
2150         /* Store it to internal memory */
2151         for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2152                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2153                        XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2154                        ((u32 *)(m_cmng_port))[i]);
2155 }
2156
2157 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2158                                    u32 wsum, u16 port_rate,
2159                                  struct cmng_struct_per_port *m_cmng_port)
2160 {
2161         struct rate_shaping_vars_per_vn m_rs_vn;
2162         struct fairness_vars_per_vn m_fair_vn;
2163         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2164         u16 vn_min_rate, vn_max_rate;
2165         int i;
2166
2167         /* If function is hidden - set min and max to zeroes */
2168         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2169                 vn_min_rate = 0;
2170                 vn_max_rate = 0;
2171
2172         } else {
2173                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2174                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2175                 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2176                    if current min rate is zero - set it to 1.
2177                    This is a requirement of the algorithm. */
2178                 if ((vn_min_rate == 0) && wsum)
2179                         vn_min_rate = DEF_MIN_RATE;
2180                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2181                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2182         }
2183
2184         DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d  vn_max_rate=%d  "
2185            "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2186
2187         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2188         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2189
2190         /* global vn counter - maximal Mbps for this vn */
2191         m_rs_vn.vn_counter.rate = vn_max_rate;
2192
2193         /* quota - number of bytes transmitted in this period */
2194         m_rs_vn.vn_counter.quota =
2195                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2196
2197 #ifdef BNX2X_PER_PROT_QOS
2198         /* per protocol counter */
2199         for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2200                 /* maximal Mbps for this protocol */
2201                 m_rs_vn.protocol_counters[protocol].rate =
2202                                                 protocol_max_rate[protocol];
2203                 /* the quota in each timer period -
2204                    number of bytes transmitted in this period */
2205                 m_rs_vn.protocol_counters[protocol].quota =
2206                         (u32)(rs_periodic_timeout_usec *
2207                           ((double)m_rs_vn.
2208                                    protocol_counters[protocol].rate/8));
2209         }
2210 #endif
2211
2212         if (wsum) {
2213                 /* credit for each period of the fairness algorithm:
2214                    number of bytes in T_FAIR (the vn share the port rate).
2215                    wsum should not be larger than 10000, thus
2216                    T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2217                 m_fair_vn.vn_credit_delta =
2218                         max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2219                             (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2220                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2221                    m_fair_vn.vn_credit_delta);
2222         }
2223
2224 #ifdef BNX2X_PER_PROT_QOS
2225         do {
2226                 u32 protocolWeightSum = 0;
2227
2228                 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2229                         protocolWeightSum +=
2230                                         drvInit.protocol_min_rate[protocol];
2231                 /* per protocol counter -
2232                    NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2233                 if (protocolWeightSum > 0) {
2234                         for (protocol = 0;
2235                              protocol < NUM_OF_PROTOCOLS; protocol++)
2236                                 /* credit for each period of the
2237                                    fairness algorithm - number of bytes in
2238                                    T_FAIR (the protocol share the vn rate) */
2239                                 m_fair_vn.protocol_credit_delta[protocol] =
2240                                         (u32)((vn_min_rate / 8) * t_fair *
2241                                         protocol_min_rate / protocolWeightSum);
2242                 }
2243         } while (0);
2244 #endif
2245
2246         /* Store it to internal memory */
2247         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2248                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2249                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2250                        ((u32 *)(&m_rs_vn))[i]);
2251
2252         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2253                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2254                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2255                        ((u32 *)(&m_fair_vn))[i]);
2256 }
2257
2258 /* This function is called upon link interrupt */
2259 static void bnx2x_link_attn(struct bnx2x *bp)
2260 {
2261         int vn;
2262
2263         /* Make sure that we are synced with the current statistics */
2264         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2265
2266         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2267
2268         if (bp->link_vars.link_up) {
2269
2270                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2271                         struct host_port_stats *pstats;
2272
2273                         pstats = bnx2x_sp(bp, port_stats);
2274                         /* reset old bmac stats */
2275                         memset(&(pstats->mac_stx[0]), 0,
2276                                sizeof(struct mac_stx));
2277                 }
2278                 if ((bp->state == BNX2X_STATE_OPEN) ||
2279                     (bp->state == BNX2X_STATE_DISABLED))
2280                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2281         }
2282
2283         /* indicate link status */
2284         bnx2x_link_report(bp);
2285
2286         if (IS_E1HMF(bp)) {
2287                 int func;
2288
2289                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2290                         if (vn == BP_E1HVN(bp))
2291                                 continue;
2292
2293                         func = ((vn << 1) | BP_PORT(bp));
2294
2295                         /* Set the attention towards other drivers
2296                            on the same port */
2297                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2298                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2299                 }
2300         }
2301
2302         if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2303                 struct cmng_struct_per_port m_cmng_port;
2304                 u32 wsum;
2305                 int port = BP_PORT(bp);
2306
2307                 /* Init RATE SHAPING and FAIRNESS contexts */
2308                 wsum = bnx2x_calc_vn_wsum(bp);
2309                 bnx2x_init_port_minmax(bp, (int)wsum,
2310                                         bp->link_vars.line_speed,
2311                                         &m_cmng_port);
2312                 if (IS_E1HMF(bp))
2313                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2314                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
2315                                         wsum, bp->link_vars.line_speed,
2316                                                      &m_cmng_port);
2317         }
2318 }
2319
2320 static void bnx2x__link_status_update(struct bnx2x *bp)
2321 {
2322         if (bp->state != BNX2X_STATE_OPEN)
2323                 return;
2324
2325         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2326
2327         if (bp->link_vars.link_up)
2328                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2329         else
2330                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2331
2332         /* indicate link status */
2333         bnx2x_link_report(bp);
2334 }
2335
2336 static void bnx2x_pmf_update(struct bnx2x *bp)
2337 {
2338         int port = BP_PORT(bp);
2339         u32 val;
2340
2341         bp->port.pmf = 1;
2342         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2343
2344         /* enable nig attention */
2345         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2346         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2347         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2348
2349         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2350 }
2351
2352 /* end of Link */
2353
2354 /* slow path */
2355
2356 /*
2357  * General service functions
2358  */
2359
2360 /* the slow path queue is odd since completions arrive on the fastpath ring */
2361 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2362                          u32 data_hi, u32 data_lo, int common)
2363 {
2364         int func = BP_FUNC(bp);
2365
2366         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2367            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2368            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2369            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2370            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2371
2372 #ifdef BNX2X_STOP_ON_ERROR
2373         if (unlikely(bp->panic))
2374                 return -EIO;
2375 #endif
2376
2377         spin_lock_bh(&bp->spq_lock);
2378
2379         if (!bp->spq_left) {
2380                 BNX2X_ERR("BUG! SPQ ring full!\n");
2381                 spin_unlock_bh(&bp->spq_lock);
2382                 bnx2x_panic();
2383                 return -EBUSY;
2384         }
2385
2386         /* CID needs port number to be encoded int it */
2387         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2388                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2389                                      HW_CID(bp, cid)));
2390         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2391         if (common)
2392                 bp->spq_prod_bd->hdr.type |=
2393                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2394
2395         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2396         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2397
2398         bp->spq_left--;
2399
2400         if (bp->spq_prod_bd == bp->spq_last_bd) {
2401                 bp->spq_prod_bd = bp->spq;
2402                 bp->spq_prod_idx = 0;
2403                 DP(NETIF_MSG_TIMER, "end of spq\n");
2404
2405         } else {
2406                 bp->spq_prod_bd++;
2407                 bp->spq_prod_idx++;
2408         }
2409
2410         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2411                bp->spq_prod_idx);
2412
2413         spin_unlock_bh(&bp->spq_lock);
2414         return 0;
2415 }
2416
2417 /* acquire split MCP access lock register */
2418 static int bnx2x_acquire_alr(struct bnx2x *bp)
2419 {
2420         u32 i, j, val;
2421         int rc = 0;
2422
2423         might_sleep();
2424         i = 100;
2425         for (j = 0; j < i*10; j++) {
2426                 val = (1UL << 31);
2427                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2428                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2429                 if (val & (1L << 31))
2430                         break;
2431
2432                 msleep(5);
2433         }
2434         if (!(val & (1L << 31))) {
2435                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2436                 rc = -EBUSY;
2437         }
2438
2439         return rc;
2440 }
2441
2442 /* release split MCP access lock register */
2443 static void bnx2x_release_alr(struct bnx2x *bp)
2444 {
2445         u32 val = 0;
2446
2447         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2448 }
2449
2450 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2451 {
2452         struct host_def_status_block *def_sb = bp->def_status_blk;
2453         u16 rc = 0;
2454
2455         barrier(); /* status block is written to by the chip */
2456         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2457                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2458                 rc |= 1;
2459         }
2460         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2461                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2462                 rc |= 2;
2463         }
2464         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2465                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2466                 rc |= 4;
2467         }
2468         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2469                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2470                 rc |= 8;
2471         }
2472         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2473                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2474                 rc |= 16;
2475         }
2476         return rc;
2477 }
2478
2479 /*
2480  * slow path service functions
2481  */
2482
2483 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2484 {
2485         int port = BP_PORT(bp);
2486         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2487                        COMMAND_REG_ATTN_BITS_SET);
2488         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2489                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2490         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2491                                        NIG_REG_MASK_INTERRUPT_PORT0;
2492         u32 aeu_mask;
2493
2494         if (bp->attn_state & asserted)
2495                 BNX2X_ERR("IGU ERROR\n");
2496
2497         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2498         aeu_mask = REG_RD(bp, aeu_addr);
2499
2500         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2501            aeu_mask, asserted);
2502         aeu_mask &= ~(asserted & 0xff);
2503         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2504
2505         REG_WR(bp, aeu_addr, aeu_mask);
2506         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2507
2508         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2509         bp->attn_state |= asserted;
2510         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2511
2512         if (asserted & ATTN_HARD_WIRED_MASK) {
2513                 if (asserted & ATTN_NIG_FOR_FUNC) {
2514
2515                         bnx2x_acquire_phy_lock(bp);
2516
2517                         /* save nig interrupt mask */
2518                         bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2519                         REG_WR(bp, nig_int_mask_addr, 0);
2520
2521                         bnx2x_link_attn(bp);
2522
2523                         /* handle unicore attn? */
2524                 }
2525                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2526                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2527
2528                 if (asserted & GPIO_2_FUNC)
2529                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2530
2531                 if (asserted & GPIO_3_FUNC)
2532                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2533
2534                 if (asserted & GPIO_4_FUNC)
2535                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2536
2537                 if (port == 0) {
2538                         if (asserted & ATTN_GENERAL_ATTN_1) {
2539                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2540                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2541                         }
2542                         if (asserted & ATTN_GENERAL_ATTN_2) {
2543                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2544                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2545                         }
2546                         if (asserted & ATTN_GENERAL_ATTN_3) {
2547                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2548                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2549                         }
2550                 } else {
2551                         if (asserted & ATTN_GENERAL_ATTN_4) {
2552                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2553                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2554                         }
2555                         if (asserted & ATTN_GENERAL_ATTN_5) {
2556                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2557                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2558                         }
2559                         if (asserted & ATTN_GENERAL_ATTN_6) {
2560                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2561                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2562                         }
2563                 }
2564
2565         } /* if hardwired */
2566
2567         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2568            asserted, hc_addr);
2569         REG_WR(bp, hc_addr, asserted);
2570
2571         /* now set back the mask */
2572         if (asserted & ATTN_NIG_FOR_FUNC) {
2573                 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2574                 bnx2x_release_phy_lock(bp);
2575         }
2576 }
2577
2578 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2579 {
2580         int port = BP_PORT(bp);
2581         int reg_offset;
2582         u32 val;
2583
2584         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2585                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2586
2587         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2588
2589                 val = REG_RD(bp, reg_offset);
2590                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2591                 REG_WR(bp, reg_offset, val);
2592
2593                 BNX2X_ERR("SPIO5 hw attention\n");
2594
2595                 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2596                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
2597                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2598                         /* Fan failure attention */
2599
2600                         /* The PHY reset is controlled by GPIO 1 */
2601                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2602                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2603                         /* Low power mode is controlled by GPIO 2 */
2604                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2605                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2606                         /* mark the failure */
2607                         bp->link_params.ext_phy_config &=
2608                                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2609                         bp->link_params.ext_phy_config |=
2610                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2611                         SHMEM_WR(bp,
2612                                  dev_info.port_hw_config[port].
2613                                                         external_phy_config,
2614                                  bp->link_params.ext_phy_config);
2615                         /* log the failure */
2616                         printk(KERN_ERR PFX "Fan Failure on Network"
2617                                " Controller %s has caused the driver to"
2618                                " shutdown the card to prevent permanent"
2619                                " damage.  Please contact Dell Support for"
2620                                " assistance\n", bp->dev->name);
2621                         break;
2622
2623                 default:
2624                         break;
2625                 }
2626         }
2627
2628         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2629
2630                 val = REG_RD(bp, reg_offset);
2631                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2632                 REG_WR(bp, reg_offset, val);
2633
2634                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2635                           (attn & HW_INTERRUT_ASSERT_SET_0));
2636                 bnx2x_panic();
2637         }
2638 }
2639
2640 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2641 {
2642         u32 val;
2643
2644         if (attn & BNX2X_DOORQ_ASSERT) {
2645
2646                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2647                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2648                 /* DORQ discard attention */
2649                 if (val & 0x2)
2650                         BNX2X_ERR("FATAL error from DORQ\n");
2651         }
2652
2653         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2654
2655                 int port = BP_PORT(bp);
2656                 int reg_offset;
2657
2658                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2659                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2660
2661                 val = REG_RD(bp, reg_offset);
2662                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2663                 REG_WR(bp, reg_offset, val);
2664
2665                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2666                           (attn & HW_INTERRUT_ASSERT_SET_1));
2667                 bnx2x_panic();
2668         }
2669 }
2670
2671 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2672 {
2673         u32 val;
2674
2675         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2676
2677                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2678                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2679                 /* CFC error attention */
2680                 if (val & 0x2)
2681                         BNX2X_ERR("FATAL error from CFC\n");
2682         }
2683
2684         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2685
2686                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2687                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2688                 /* RQ_USDMDP_FIFO_OVERFLOW */
2689                 if (val & 0x18000)
2690                         BNX2X_ERR("FATAL error from PXP\n");
2691         }
2692
2693         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2694
2695                 int port = BP_PORT(bp);
2696                 int reg_offset;
2697
2698                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2699                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2700
2701                 val = REG_RD(bp, reg_offset);
2702                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2703                 REG_WR(bp, reg_offset, val);
2704
2705                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2706                           (attn & HW_INTERRUT_ASSERT_SET_2));
2707                 bnx2x_panic();
2708         }
2709 }
2710
2711 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2712 {
2713         u32 val;
2714
2715         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2716
2717                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2718                         int func = BP_FUNC(bp);
2719
2720                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2721                         bnx2x__link_status_update(bp);
2722                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2723                                                         DRV_STATUS_PMF)
2724                                 bnx2x_pmf_update(bp);
2725
2726                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2727
2728                         BNX2X_ERR("MC assert!\n");
2729                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2730                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2731                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2732                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2733                         bnx2x_panic();
2734
2735                 } else if (attn & BNX2X_MCP_ASSERT) {
2736
2737                         BNX2X_ERR("MCP assert!\n");
2738                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2739                         bnx2x_fw_dump(bp);
2740
2741                 } else
2742                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2743         }
2744
2745         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2746                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2747                 if (attn & BNX2X_GRC_TIMEOUT) {
2748                         val = CHIP_IS_E1H(bp) ?
2749                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2750                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2751                 }
2752                 if (attn & BNX2X_GRC_RSV) {
2753                         val = CHIP_IS_E1H(bp) ?
2754                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2755                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2756                 }
2757                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2758         }
2759 }
2760
2761 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2762 {
2763         struct attn_route attn;
2764         struct attn_route group_mask;
2765         int port = BP_PORT(bp);
2766         int index;
2767         u32 reg_addr;
2768         u32 val;
2769         u32 aeu_mask;
2770
2771         /* need to take HW lock because MCP or other port might also
2772            try to handle this event */
2773         bnx2x_acquire_alr(bp);
2774
2775         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2776         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2777         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2778         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2779         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2780            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2781
2782         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2783                 if (deasserted & (1 << index)) {
2784                         group_mask = bp->attn_group[index];
2785
2786                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2787                            index, group_mask.sig[0], group_mask.sig[1],
2788                            group_mask.sig[2], group_mask.sig[3]);
2789
2790                         bnx2x_attn_int_deasserted3(bp,
2791                                         attn.sig[3] & group_mask.sig[3]);
2792                         bnx2x_attn_int_deasserted1(bp,
2793                                         attn.sig[1] & group_mask.sig[1]);
2794                         bnx2x_attn_int_deasserted2(bp,
2795                                         attn.sig[2] & group_mask.sig[2]);
2796                         bnx2x_attn_int_deasserted0(bp,
2797                                         attn.sig[0] & group_mask.sig[0]);
2798
2799                         if ((attn.sig[0] & group_mask.sig[0] &
2800                                                 HW_PRTY_ASSERT_SET_0) ||
2801                             (attn.sig[1] & group_mask.sig[1] &
2802                                                 HW_PRTY_ASSERT_SET_1) ||
2803                             (attn.sig[2] & group_mask.sig[2] &
2804                                                 HW_PRTY_ASSERT_SET_2))
2805                                 BNX2X_ERR("FATAL HW block parity attention\n");
2806                 }
2807         }
2808
2809         bnx2x_release_alr(bp);
2810
2811         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2812
2813         val = ~deasserted;
2814         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2815            val, reg_addr);
2816         REG_WR(bp, reg_addr, val);
2817
2818         if (~bp->attn_state & deasserted)
2819                 BNX2X_ERR("IGU ERROR\n");
2820
2821         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2822                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2823
2824         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2825         aeu_mask = REG_RD(bp, reg_addr);
2826
2827         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2828            aeu_mask, deasserted);
2829         aeu_mask |= (deasserted & 0xff);
2830         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2831
2832         REG_WR(bp, reg_addr, aeu_mask);
2833         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2834
2835         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2836         bp->attn_state &= ~deasserted;
2837         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2838 }
2839
2840 static void bnx2x_attn_int(struct bnx2x *bp)
2841 {
2842         /* read local copy of bits */
2843         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2844                                                                 attn_bits);
2845         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2846                                                                 attn_bits_ack);
2847         u32 attn_state = bp->attn_state;
2848
2849         /* look for changed bits */
2850         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2851         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2852
2853         DP(NETIF_MSG_HW,
2854            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2855            attn_bits, attn_ack, asserted, deasserted);
2856
2857         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2858                 BNX2X_ERR("BAD attention state\n");
2859
2860         /* handle bits that were raised */
2861         if (asserted)
2862                 bnx2x_attn_int_asserted(bp, asserted);
2863
2864         if (deasserted)
2865                 bnx2x_attn_int_deasserted(bp, deasserted);
2866 }
2867
2868 static void bnx2x_sp_task(struct work_struct *work)
2869 {
2870         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2871         u16 status;
2872
2873
2874         /* Return here if interrupt is disabled */
2875         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2876                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2877                 return;
2878         }
2879
2880         status = bnx2x_update_dsb_idx(bp);
2881 /*      if (status == 0)                                     */
2882 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2883
2884         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2885
2886         /* HW attentions */
2887         if (status & 0x1)
2888                 bnx2x_attn_int(bp);
2889
2890         /* CStorm events: query_stats, port delete ramrod */
2891         if (status & 0x2)
2892                 bp->stats_pending = 0;
2893
2894         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2895                      IGU_INT_NOP, 1);
2896         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2897                      IGU_INT_NOP, 1);
2898         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2899                      IGU_INT_NOP, 1);
2900         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2901                      IGU_INT_NOP, 1);
2902         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2903                      IGU_INT_ENABLE, 1);
2904
2905 }
2906
2907 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2908 {
2909         struct net_device *dev = dev_instance;
2910         struct bnx2x *bp = netdev_priv(dev);
2911
2912         /* Return here if interrupt is disabled */
2913         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2914                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2915                 return IRQ_HANDLED;
2916         }
2917
2918         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2919
2920 #ifdef BNX2X_STOP_ON_ERROR
2921         if (unlikely(bp->panic))
2922                 return IRQ_HANDLED;
2923 #endif
2924
2925         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2926
2927         return IRQ_HANDLED;
2928 }
2929
2930 /* end of slow path */
2931
2932 /* Statistics */
2933
2934 /****************************************************************************
2935 * Macros
2936 ****************************************************************************/
2937
2938 /* sum[hi:lo] += add[hi:lo] */
2939 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2940         do { \
2941                 s_lo += a_lo; \
2942                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2943         } while (0)
2944
2945 /* difference = minuend - subtrahend */
2946 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2947         do { \
2948                 if (m_lo < s_lo) { \
2949                         /* underflow */ \
2950                         d_hi = m_hi - s_hi; \
2951                         if (d_hi > 0) { \
2952                                 /* we can 'loan' 1 */ \
2953                                 d_hi--; \
2954                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2955                         } else { \
2956                                 /* m_hi <= s_hi */ \
2957                                 d_hi = 0; \
2958                                 d_lo = 0; \
2959                         } \
2960                 } else { \
2961                         /* m_lo >= s_lo */ \
2962                         if (m_hi < s_hi) { \
2963                                 d_hi = 0; \
2964                                 d_lo = 0; \
2965                         } else { \
2966                                 /* m_hi >= s_hi */ \
2967                                 d_hi = m_hi - s_hi; \
2968                                 d_lo = m_lo - s_lo; \
2969                         } \
2970                 } \
2971         } while (0)
2972
2973 #define UPDATE_STAT64(s, t) \
2974         do { \
2975                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2976                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2977                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2978                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2979                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2980                        pstats->mac_stx[1].t##_lo, diff.lo); \
2981         } while (0)
2982
2983 #define UPDATE_STAT64_NIG(s, t) \
2984         do { \
2985                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2986                         diff.lo, new->s##_lo, old->s##_lo); \
2987                 ADD_64(estats->t##_hi, diff.hi, \
2988                        estats->t##_lo, diff.lo); \
2989         } while (0)
2990
2991 /* sum[hi:lo] += add */
2992 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2993         do { \
2994                 s_lo += a; \
2995                 s_hi += (s_lo < a) ? 1 : 0; \
2996         } while (0)
2997
2998 #define UPDATE_EXTEND_STAT(s) \
2999         do { \
3000                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3001                               pstats->mac_stx[1].s##_lo, \
3002                               new->s); \
3003         } while (0)
3004
3005 #define UPDATE_EXTEND_TSTAT(s, t) \
3006         do { \
3007                 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
3008                 old_tclient->s = le32_to_cpu(tclient->s); \
3009                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
3010         } while (0)
3011
3012 #define UPDATE_EXTEND_XSTAT(s, t) \
3013         do { \
3014                 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
3015                 old_xclient->s = le32_to_cpu(xclient->s); \
3016                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
3017         } while (0)
3018
3019 /*
3020  * General service functions
3021  */
3022
3023 static inline long bnx2x_hilo(u32 *hiref)
3024 {
3025         u32 lo = *(hiref + 1);
3026 #if (BITS_PER_LONG == 64)
3027         u32 hi = *hiref;
3028
3029         return HILO_U64(hi, lo);
3030 #else
3031         return lo;
3032 #endif
3033 }
3034
3035 /*
3036  * Init service functions
3037  */
3038
3039 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3040 {
3041         if (!bp->stats_pending) {
3042                 struct eth_query_ramrod_data ramrod_data = {0};
3043                 int rc;
3044
3045                 ramrod_data.drv_counter = bp->stats_counter++;
3046                 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3047                 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3048
3049                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3050                                    ((u32 *)&ramrod_data)[1],
3051                                    ((u32 *)&ramrod_data)[0], 0);
3052                 if (rc == 0) {
3053                         /* stats ramrod has it's own slot on the spq */
3054                         bp->spq_left++;
3055                         bp->stats_pending = 1;
3056                 }
3057         }
3058 }
3059
3060 static void bnx2x_stats_init(struct bnx2x *bp)
3061 {
3062         int port = BP_PORT(bp);
3063
3064         bp->executer_idx = 0;
3065         bp->stats_counter = 0;
3066
3067         /* port stats */
3068         if (!BP_NOMCP(bp))
3069                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3070         else
3071                 bp->port.port_stx = 0;
3072         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3073
3074         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3075         bp->port.old_nig_stats.brb_discard =
3076                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3077         bp->port.old_nig_stats.brb_truncate =
3078                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3079         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3080                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3081         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3082                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3083
3084         /* function stats */
3085         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3086         memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3087         memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3088         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3089
3090         bp->stats_state = STATS_STATE_DISABLED;
3091         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3092                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3093 }
3094
3095 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3096 {
3097         struct dmae_command *dmae = &bp->stats_dmae;
3098         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3099
3100         *stats_comp = DMAE_COMP_VAL;
3101
3102         /* loader */
3103         if (bp->executer_idx) {
3104                 int loader_idx = PMF_DMAE_C(bp);
3105
3106                 memset(dmae, 0, sizeof(struct dmae_command));
3107
3108                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3109                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3110                                 DMAE_CMD_DST_RESET |
3111 #ifdef __BIG_ENDIAN
3112                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3113 #else
3114                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3115 #endif
3116                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3117                                                DMAE_CMD_PORT_0) |
3118                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3119                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3120                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3121                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3122                                      sizeof(struct dmae_command) *
3123                                      (loader_idx + 1)) >> 2;
3124                 dmae->dst_addr_hi = 0;
3125                 dmae->len = sizeof(struct dmae_command) >> 2;
3126                 if (CHIP_IS_E1(bp))
3127                         dmae->len--;
3128                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3129                 dmae->comp_addr_hi = 0;
3130                 dmae->comp_val = 1;
3131
3132                 *stats_comp = 0;
3133                 bnx2x_post_dmae(bp, dmae, loader_idx);
3134
3135         } else if (bp->func_stx) {
3136                 *stats_comp = 0;
3137                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3138         }
3139 }
3140
3141 static int bnx2x_stats_comp(struct bnx2x *bp)
3142 {
3143         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3144         int cnt = 10;
3145
3146         might_sleep();
3147         while (*stats_comp != DMAE_COMP_VAL) {
3148                 if (!cnt) {
3149                         BNX2X_ERR("timeout waiting for stats finished\n");
3150                         break;
3151                 }
3152                 cnt--;
3153                 msleep(1);
3154         }
3155         return 1;
3156 }
3157
3158 /*
3159  * Statistics service functions
3160  */
3161
3162 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3163 {
3164         struct dmae_command *dmae;
3165         u32 opcode;
3166         int loader_idx = PMF_DMAE_C(bp);
3167         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3168
3169         /* sanity */
3170         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3171                 BNX2X_ERR("BUG!\n");
3172                 return;
3173         }
3174
3175         bp->executer_idx = 0;
3176
3177         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3178                   DMAE_CMD_C_ENABLE |
3179                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3180 #ifdef __BIG_ENDIAN
3181                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3182 #else
3183                   DMAE_CMD_ENDIANITY_DW_SWAP |
3184 #endif
3185                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3186                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3187
3188         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3189         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3190         dmae->src_addr_lo = bp->port.port_stx >> 2;
3191         dmae->src_addr_hi = 0;
3192         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3193         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3194         dmae->len = DMAE_LEN32_RD_MAX;
3195         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3196         dmae->comp_addr_hi = 0;
3197         dmae->comp_val = 1;
3198
3199         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3200         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3201         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3202         dmae->src_addr_hi = 0;
3203         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3204                                    DMAE_LEN32_RD_MAX * 4);
3205         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3206                                    DMAE_LEN32_RD_MAX * 4);
3207         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3208         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3209         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3210         dmae->comp_val = DMAE_COMP_VAL;
3211
3212         *stats_comp = 0;
3213         bnx2x_hw_stats_post(bp);
3214         bnx2x_stats_comp(bp);
3215 }
3216
3217 static void bnx2x_port_stats_init(struct bnx2x *bp)
3218 {
3219         struct dmae_command *dmae;
3220         int port = BP_PORT(bp);
3221         int vn = BP_E1HVN(bp);
3222         u32 opcode;
3223         int loader_idx = PMF_DMAE_C(bp);
3224         u32 mac_addr;
3225         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3226
3227         /* sanity */
3228         if (!bp->link_vars.link_up || !bp->port.pmf) {
3229                 BNX2X_ERR("BUG!\n");
3230                 return;
3231         }
3232
3233         bp->executer_idx = 0;
3234
3235         /* MCP */
3236         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3237                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3238                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3239 #ifdef __BIG_ENDIAN
3240                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3241 #else
3242                   DMAE_CMD_ENDIANITY_DW_SWAP |
3243 #endif
3244                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3245                   (vn << DMAE_CMD_E1HVN_SHIFT));
3246
3247         if (bp->port.port_stx) {
3248
3249                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3250                 dmae->opcode = opcode;
3251                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3252                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3253                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3254                 dmae->dst_addr_hi = 0;
3255                 dmae->len = sizeof(struct host_port_stats) >> 2;
3256                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3257                 dmae->comp_addr_hi = 0;
3258                 dmae->comp_val = 1;
3259         }
3260
3261         if (bp->func_stx) {
3262
3263                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3264                 dmae->opcode = opcode;
3265                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3266                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3267                 dmae->dst_addr_lo = bp->func_stx >> 2;
3268                 dmae->dst_addr_hi = 0;
3269                 dmae->len = sizeof(struct host_func_stats) >> 2;
3270                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3271                 dmae->comp_addr_hi = 0;
3272                 dmae->comp_val = 1;
3273         }
3274
3275         /* MAC */
3276         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3277                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3278                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3279 #ifdef __BIG_ENDIAN
3280                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3281 #else
3282                   DMAE_CMD_ENDIANITY_DW_SWAP |
3283 #endif
3284                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3285                   (vn << DMAE_CMD_E1HVN_SHIFT));
3286
3287         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3288
3289                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3290                                    NIG_REG_INGRESS_BMAC0_MEM);
3291
3292                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3293                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3294                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3295                 dmae->opcode = opcode;
3296                 dmae->src_addr_lo = (mac_addr +
3297                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3298                 dmae->src_addr_hi = 0;
3299                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3300                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3301                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3302                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3303                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3304                 dmae->comp_addr_hi = 0;
3305                 dmae->comp_val = 1;
3306
3307                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3308                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3309                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3310                 dmae->opcode = opcode;
3311                 dmae->src_addr_lo = (mac_addr +
3312                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3313                 dmae->src_addr_hi = 0;
3314                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3315                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3316                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3317                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3318                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3319                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3320                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3321                 dmae->comp_addr_hi = 0;
3322                 dmae->comp_val = 1;
3323
3324         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3325
3326                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3327
3328                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3329                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3330                 dmae->opcode = opcode;
3331                 dmae->src_addr_lo = (mac_addr +
3332                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3333                 dmae->src_addr_hi = 0;
3334                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3335                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3336                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3337                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3338                 dmae->comp_addr_hi = 0;
3339                 dmae->comp_val = 1;
3340
3341                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3342                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3343                 dmae->opcode = opcode;
3344                 dmae->src_addr_lo = (mac_addr +
3345                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3346                 dmae->src_addr_hi = 0;
3347                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3348                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3349                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3350                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3351                 dmae->len = 1;
3352                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3353                 dmae->comp_addr_hi = 0;
3354                 dmae->comp_val = 1;
3355
3356                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3357                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3358                 dmae->opcode = opcode;
3359                 dmae->src_addr_lo = (mac_addr +
3360                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3361                 dmae->src_addr_hi = 0;
3362                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3363                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3364                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3365                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3366                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3367                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3368                 dmae->comp_addr_hi = 0;
3369                 dmae->comp_val = 1;
3370         }
3371
3372         /* NIG */
3373         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3374         dmae->opcode = opcode;
3375         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3376                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3377         dmae->src_addr_hi = 0;
3378         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3379         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3380         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3381         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3382         dmae->comp_addr_hi = 0;
3383         dmae->comp_val = 1;
3384
3385         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3386         dmae->opcode = opcode;
3387         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3388                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3389         dmae->src_addr_hi = 0;
3390         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3391                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3392         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3393                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3394         dmae->len = (2*sizeof(u32)) >> 2;
3395         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3396         dmae->comp_addr_hi = 0;
3397         dmae->comp_val = 1;
3398
3399         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3400         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3401                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3402                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3403 #ifdef __BIG_ENDIAN
3404                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3405 #else
3406                         DMAE_CMD_ENDIANITY_DW_SWAP |
3407 #endif
3408                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3409                         (vn << DMAE_CMD_E1HVN_SHIFT));
3410         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3411                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3412         dmae->src_addr_hi = 0;
3413         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3414                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3415         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3416                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3417         dmae->len = (2*sizeof(u32)) >> 2;
3418         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3419         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3420         dmae->comp_val = DMAE_COMP_VAL;
3421
3422         *stats_comp = 0;
3423 }
3424
3425 static void bnx2x_func_stats_init(struct bnx2x *bp)
3426 {
3427         struct dmae_command *dmae = &bp->stats_dmae;
3428         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3429
3430         /* sanity */
3431         if (!bp->func_stx) {
3432                 BNX2X_ERR("BUG!\n");
3433                 return;
3434         }
3435
3436         bp->executer_idx = 0;
3437         memset(dmae, 0, sizeof(struct dmae_command));
3438
3439         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3440                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3441                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3442 #ifdef __BIG_ENDIAN
3443                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3444 #else
3445                         DMAE_CMD_ENDIANITY_DW_SWAP |
3446 #endif
3447                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3448                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3449         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3450         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3451         dmae->dst_addr_lo = bp->func_stx >> 2;
3452         dmae->dst_addr_hi = 0;
3453         dmae->len = sizeof(struct host_func_stats) >> 2;
3454         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3455         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3456         dmae->comp_val = DMAE_COMP_VAL;
3457
3458         *stats_comp = 0;
3459 }
3460
3461 static void bnx2x_stats_start(struct bnx2x *bp)
3462 {
3463         if (bp->port.pmf)
3464                 bnx2x_port_stats_init(bp);
3465
3466         else if (bp->func_stx)
3467                 bnx2x_func_stats_init(bp);
3468
3469         bnx2x_hw_stats_post(bp);
3470         bnx2x_storm_stats_post(bp);
3471 }
3472
3473 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3474 {
3475         bnx2x_stats_comp(bp);
3476         bnx2x_stats_pmf_update(bp);
3477         bnx2x_stats_start(bp);
3478 }
3479
3480 static void bnx2x_stats_restart(struct bnx2x *bp)
3481 {
3482         bnx2x_stats_comp(bp);
3483         bnx2x_stats_start(bp);
3484 }
3485
3486 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3487 {
3488         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3489         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3490         struct regpair diff;
3491
3492         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3493         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3494         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3495         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3496         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3497         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3498         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3499         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3500         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3501         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3502         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3503         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3504         UPDATE_STAT64(tx_stat_gt127,
3505                                 tx_stat_etherstatspkts65octetsto127octets);
3506         UPDATE_STAT64(tx_stat_gt255,
3507                                 tx_stat_etherstatspkts128octetsto255octets);
3508         UPDATE_STAT64(tx_stat_gt511,
3509                                 tx_stat_etherstatspkts256octetsto511octets);
3510         UPDATE_STAT64(tx_stat_gt1023,
3511                                 tx_stat_etherstatspkts512octetsto1023octets);
3512         UPDATE_STAT64(tx_stat_gt1518,
3513                                 tx_stat_etherstatspkts1024octetsto1522octets);
3514         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3515         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3516         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3517         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3518         UPDATE_STAT64(tx_stat_gterr,
3519                                 tx_stat_dot3statsinternalmactransmiterrors);
3520         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3521 }
3522
3523 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3524 {
3525         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3526         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3527
3528         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3529         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3530         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3531         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3532         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3533         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3534         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3535         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3536         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3537         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3538         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3539         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3540         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3541         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3542         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3543         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3544         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3545         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3546         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3547         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3548         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3549         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3550         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3551         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3552         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3553         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3554         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3555         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3556         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3557         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3558         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3559 }
3560
3561 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3562 {
3563         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3564         struct nig_stats *old = &(bp->port.old_nig_stats);
3565         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3566         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3567         struct regpair diff;
3568
3569         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3570                 bnx2x_bmac_stats_update(bp);
3571
3572         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3573                 bnx2x_emac_stats_update(bp);
3574
3575         else { /* unreached */
3576                 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3577                 return -1;
3578         }
3579
3580         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3581                       new->brb_discard - old->brb_discard);
3582         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3583                       new->brb_truncate - old->brb_truncate);
3584
3585         UPDATE_STAT64_NIG(egress_mac_pkt0,
3586                                         etherstatspkts1024octetsto1522octets);
3587         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3588
3589         memcpy(old, new, sizeof(struct nig_stats));
3590
3591         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3592                sizeof(struct mac_stx));
3593         estats->brb_drop_hi = pstats->brb_drop_hi;
3594         estats->brb_drop_lo = pstats->brb_drop_lo;
3595
3596         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3597
3598         return 0;
3599 }
3600
3601 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3602 {
3603         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3604         int cl_id = BP_CL_ID(bp);
3605         struct tstorm_per_port_stats *tport =
3606                                 &stats->tstorm_common.port_statistics;
3607         struct tstorm_per_client_stats *tclient =
3608                         &stats->tstorm_common.client_statistics[cl_id];
3609         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3610         struct xstorm_per_client_stats *xclient =
3611                         &stats->xstorm_common.client_statistics[cl_id];
3612         struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3613         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3614         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3615         u32 diff;
3616
3617         /* are storm stats valid? */
3618         if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3619                                                         bp->stats_counter) {
3620                 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3621                    "  tstorm counter (%d) != stats_counter (%d)\n",
3622                    tclient->stats_counter, bp->stats_counter);
3623                 return -1;
3624         }
3625         if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3626                                                         bp->stats_counter) {
3627                 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3628                    "  xstorm counter (%d) != stats_counter (%d)\n",
3629                    xclient->stats_counter, bp->stats_counter);
3630                 return -2;
3631         }
3632
3633         fstats->total_bytes_received_hi =
3634         fstats->valid_bytes_received_hi =
3635                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3636         fstats->total_bytes_received_lo =
3637         fstats->valid_bytes_received_lo =
3638                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3639
3640         estats->error_bytes_received_hi =
3641                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3642         estats->error_bytes_received_lo =
3643                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3644         ADD_64(estats->error_bytes_received_hi,
3645                estats->rx_stat_ifhcinbadoctets_hi,
3646                estats->error_bytes_received_lo,
3647                estats->rx_stat_ifhcinbadoctets_lo);
3648
3649         ADD_64(fstats->total_bytes_received_hi,
3650                estats->error_bytes_received_hi,
3651                fstats->total_bytes_received_lo,
3652                estats->error_bytes_received_lo);
3653
3654         UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3655         UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3656                                 total_multicast_packets_received);
3657         UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3658                                 total_broadcast_packets_received);
3659
3660         fstats->total_bytes_transmitted_hi =
3661                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3662         fstats->total_bytes_transmitted_lo =
3663                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3664
3665         UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3666                                 total_unicast_packets_transmitted);
3667         UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3668                                 total_multicast_packets_transmitted);
3669         UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3670                                 total_broadcast_packets_transmitted);
3671
3672         memcpy(estats, &(fstats->total_bytes_received_hi),
3673                sizeof(struct host_func_stats) - 2*sizeof(u32));
3674
3675         estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3676         estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3677         estats->brb_truncate_discard =
3678                                 le32_to_cpu(tport->brb_truncate_discard);
3679         estats->mac_discard = le32_to_cpu(tport->mac_discard);
3680
3681         old_tclient->rcv_unicast_bytes.hi =
3682                                 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3683         old_tclient->rcv_unicast_bytes.lo =
3684                                 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3685         old_tclient->rcv_broadcast_bytes.hi =
3686                                 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3687         old_tclient->rcv_broadcast_bytes.lo =
3688                                 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3689         old_tclient->rcv_multicast_bytes.hi =
3690                                 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3691         old_tclient->rcv_multicast_bytes.lo =
3692                                 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3693         old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3694
3695         old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3696         old_tclient->packets_too_big_discard =
3697                                 le32_to_cpu(tclient->packets_too_big_discard);
3698         estats->no_buff_discard =
3699         old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3700         old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3701
3702         old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3703         old_xclient->unicast_bytes_sent.hi =
3704                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3705         old_xclient->unicast_bytes_sent.lo =
3706                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3707         old_xclient->multicast_bytes_sent.hi =
3708                                 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3709         old_xclient->multicast_bytes_sent.lo =
3710                                 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3711         old_xclient->broadcast_bytes_sent.hi =
3712                                 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3713         old_xclient->broadcast_bytes_sent.lo =
3714                                 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3715
3716         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3717
3718         return 0;
3719 }
3720
3721 static void bnx2x_net_stats_update(struct bnx2x *bp)
3722 {
3723         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3724         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3725         struct net_device_stats *nstats = &bp->dev->stats;
3726
3727         nstats->rx_packets =
3728                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3729                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3730                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3731
3732         nstats->tx_packets =
3733                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3734                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3735                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3736
3737         nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3738
3739         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3740
3741         nstats->rx_dropped = old_tclient->checksum_discard +
3742                              estats->mac_discard;
3743         nstats->tx_dropped = 0;
3744
3745         nstats->multicast =
3746                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3747
3748         nstats->collisions =
3749                         estats->tx_stat_dot3statssinglecollisionframes_lo +
3750                         estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3751                         estats->tx_stat_dot3statslatecollisions_lo +
3752                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3753
3754         estats->jabber_packets_received =
3755                                 old_tclient->packets_too_big_discard +
3756                                 estats->rx_stat_dot3statsframestoolong_lo;
3757
3758         nstats->rx_length_errors =
3759                                 estats->rx_stat_etherstatsundersizepkts_lo +
3760                                 estats->jabber_packets_received;
3761         nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3762         nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3763         nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3764         nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3765         nstats->rx_missed_errors = estats->xxoverflow_discard;
3766
3767         nstats->rx_errors = nstats->rx_length_errors +
3768                             nstats->rx_over_errors +
3769                             nstats->rx_crc_errors +
3770                             nstats->rx_frame_errors +
3771                             nstats->rx_fifo_errors +
3772                             nstats->rx_missed_errors;
3773
3774         nstats->tx_aborted_errors =
3775                         estats->tx_stat_dot3statslatecollisions_lo +
3776                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3777         nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3778         nstats->tx_fifo_errors = 0;
3779         nstats->tx_heartbeat_errors = 0;
3780         nstats->tx_window_errors = 0;
3781
3782         nstats->tx_errors = nstats->tx_aborted_errors +
3783                             nstats->tx_carrier_errors;
3784 }
3785
3786 static void bnx2x_stats_update(struct bnx2x *bp)
3787 {
3788         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3789         int update = 0;
3790
3791         if (*stats_comp != DMAE_COMP_VAL)
3792                 return;
3793
3794         if (bp->port.pmf)
3795                 update = (bnx2x_hw_stats_update(bp) == 0);
3796
3797         update |= (bnx2x_storm_stats_update(bp) == 0);
3798
3799         if (update)
3800                 bnx2x_net_stats_update(bp);
3801
3802         else {
3803                 if (bp->stats_pending) {
3804                         bp->stats_pending++;
3805                         if (bp->stats_pending == 3) {
3806                                 BNX2X_ERR("stats not updated for 3 times\n");
3807                                 bnx2x_panic();
3808                                 return;
3809                         }
3810                 }
3811         }
3812
3813         if (bp->msglevel & NETIF_MSG_TIMER) {
3814                 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3815                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3816                 struct net_device_stats *nstats = &bp->dev->stats;
3817                 int i;
3818
3819                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3820                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
3821                                   "  tx pkt (%lx)\n",
3822                        bnx2x_tx_avail(bp->fp),
3823                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3824                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
3825                                   "  rx pkt (%lx)\n",
3826                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3827                              bp->fp->rx_comp_cons),
3828                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3829                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u\n",
3830                        netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
3831                        estats->driver_xoff, estats->brb_drop_lo);
3832                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
3833                         "packets_too_big_discard %u  no_buff_discard %u  "
3834                         "mac_discard %u  mac_filter_discard %u  "
3835                         "xxovrflow_discard %u  brb_truncate_discard %u  "
3836                         "ttl0_discard %u\n",
3837                        old_tclient->checksum_discard,
3838                        old_tclient->packets_too_big_discard,
3839                        old_tclient->no_buff_discard, estats->mac_discard,
3840                        estats->mac_filter_discard, estats->xxoverflow_discard,
3841                        estats->brb_truncate_discard,
3842                        old_tclient->ttl0_discard);
3843
3844                 for_each_queue(bp, i) {
3845                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3846                                bnx2x_fp(bp, i, tx_pkt),
3847                                bnx2x_fp(bp, i, rx_pkt),
3848                                bnx2x_fp(bp, i, rx_calls));
3849                 }
3850         }
3851
3852         bnx2x_hw_stats_post(bp);
3853         bnx2x_storm_stats_post(bp);
3854 }
3855
3856 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3857 {
3858         struct dmae_command *dmae;
3859         u32 opcode;
3860         int loader_idx = PMF_DMAE_C(bp);
3861         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3862
3863         bp->executer_idx = 0;
3864
3865         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3866                   DMAE_CMD_C_ENABLE |
3867                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3868 #ifdef __BIG_ENDIAN
3869                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3870 #else
3871                   DMAE_CMD_ENDIANITY_DW_SWAP |
3872 #endif
3873                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3874                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3875
3876         if (bp->port.port_stx) {
3877
3878                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3879                 if (bp->func_stx)
3880                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3881                 else
3882                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3883                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3884                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3885                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3886                 dmae->dst_addr_hi = 0;
3887                 dmae->len = sizeof(struct host_port_stats) >> 2;
3888                 if (bp->func_stx) {
3889                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3890                         dmae->comp_addr_hi = 0;
3891                         dmae->comp_val = 1;
3892                 } else {
3893                         dmae->comp_addr_lo =
3894                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3895                         dmae->comp_addr_hi =
3896                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3897                         dmae->comp_val = DMAE_COMP_VAL;
3898
3899                         *stats_comp = 0;
3900                 }
3901         }
3902
3903         if (bp->func_stx) {
3904
3905                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3906                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3907                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3908                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3909                 dmae->dst_addr_lo = bp->func_stx >> 2;
3910                 dmae->dst_addr_hi = 0;
3911                 dmae->len = sizeof(struct host_func_stats) >> 2;
3912                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3913                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3914                 dmae->comp_val = DMAE_COMP_VAL;
3915
3916                 *stats_comp = 0;
3917         }
3918 }
3919
3920 static void bnx2x_stats_stop(struct bnx2x *bp)
3921 {
3922         int update = 0;
3923
3924         bnx2x_stats_comp(bp);
3925
3926         if (bp->port.pmf)
3927                 update = (bnx2x_hw_stats_update(bp) == 0);
3928
3929         update |= (bnx2x_storm_stats_update(bp) == 0);
3930
3931         if (update) {
3932                 bnx2x_net_stats_update(bp);
3933
3934                 if (bp->port.pmf)
3935                         bnx2x_port_stats_stop(bp);
3936
3937                 bnx2x_hw_stats_post(bp);
3938                 bnx2x_stats_comp(bp);
3939         }
3940 }
3941
3942 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3943 {
3944 }
3945
3946 static const struct {
3947         void (*action)(struct bnx2x *bp);
3948         enum bnx2x_stats_state next_state;
3949 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3950 /* state        event   */
3951 {
3952 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3953 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
3954 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3955 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3956 },
3957 {
3958 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
3959 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
3960 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
3961 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
3962 }
3963 };
3964
3965 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3966 {
3967         enum bnx2x_stats_state state = bp->stats_state;
3968
3969         bnx2x_stats_stm[state][event].action(bp);
3970         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3971
3972         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3973                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3974                    state, event, bp->stats_state);
3975 }
3976
3977 static void bnx2x_timer(unsigned long data)
3978 {
3979         struct bnx2x *bp = (struct bnx2x *) data;
3980
3981         if (!netif_running(bp->dev))
3982                 return;
3983
3984         if (atomic_read(&bp->intr_sem) != 0)
3985                 goto timer_restart;
3986
3987         if (poll) {
3988                 struct bnx2x_fastpath *fp = &bp->fp[0];
3989                 int rc;
3990
3991                 bnx2x_tx_int(fp, 1000);
3992                 rc = bnx2x_rx_int(fp, 1000);
3993         }
3994
3995         if (!BP_NOMCP(bp)) {
3996                 int func = BP_FUNC(bp);
3997                 u32 drv_pulse;
3998                 u32 mcp_pulse;
3999
4000                 ++bp->fw_drv_pulse_wr_seq;
4001                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4002                 /* TBD - add SYSTEM_TIME */
4003                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4004                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4005
4006                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4007                              MCP_PULSE_SEQ_MASK);
4008                 /* The delta between driver pulse and mcp response
4009                  * should be 1 (before mcp response) or 0 (after mcp response)
4010                  */
4011                 if ((drv_pulse != mcp_pulse) &&
4012                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4013                         /* someone lost a heartbeat... */
4014                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4015                                   drv_pulse, mcp_pulse);
4016                 }
4017         }
4018
4019         if ((bp->state == BNX2X_STATE_OPEN) ||
4020             (bp->state == BNX2X_STATE_DISABLED))
4021                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4022
4023 timer_restart:
4024         mod_timer(&bp->timer, jiffies + bp->current_interval);
4025 }
4026
4027 /* end of Statistics */
4028
4029 /* nic init */
4030
4031 /*
4032  * nic init service functions
4033  */
4034
4035 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4036 {
4037         int port = BP_PORT(bp);
4038
4039         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4040                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4041                         sizeof(struct ustorm_status_block)/4);
4042         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4043                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4044                         sizeof(struct cstorm_status_block)/4);
4045 }
4046
4047 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4048                           dma_addr_t mapping, int sb_id)
4049 {
4050         int port = BP_PORT(bp);
4051         int func = BP_FUNC(bp);
4052         int index;
4053         u64 section;
4054
4055         /* USTORM */
4056         section = ((u64)mapping) + offsetof(struct host_status_block,
4057                                             u_status_block);
4058         sb->u_status_block.status_block_id = sb_id;
4059
4060         REG_WR(bp, BAR_USTRORM_INTMEM +
4061                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4062         REG_WR(bp, BAR_USTRORM_INTMEM +
4063                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4064                U64_HI(section));
4065         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4066                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4067
4068         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4069                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4070                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4071
4072         /* CSTORM */
4073         section = ((u64)mapping) + offsetof(struct host_status_block,
4074                                             c_status_block);
4075         sb->c_status_block.status_block_id = sb_id;
4076
4077         REG_WR(bp, BAR_CSTRORM_INTMEM +
4078                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4079         REG_WR(bp, BAR_CSTRORM_INTMEM +
4080                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4081                U64_HI(section));
4082         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4083                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4084
4085         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4086                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4087                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4088
4089         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4090 }
4091
4092 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4093 {
4094         int func = BP_FUNC(bp);
4095
4096         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4097                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4098                         sizeof(struct ustorm_def_status_block)/4);
4099         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4100                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4101                         sizeof(struct cstorm_def_status_block)/4);
4102         bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4103                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4104                         sizeof(struct xstorm_def_status_block)/4);
4105         bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4106                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4107                         sizeof(struct tstorm_def_status_block)/4);
4108 }
4109
4110 static void bnx2x_init_def_sb(struct bnx2x *bp,
4111                               struct host_def_status_block *def_sb,
4112                               dma_addr_t mapping, int sb_id)
4113 {
4114         int port = BP_PORT(bp);
4115         int func = BP_FUNC(bp);
4116         int index, val, reg_offset;
4117         u64 section;
4118
4119         /* ATTN */
4120         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4121                                             atten_status_block);
4122         def_sb->atten_status_block.status_block_id = sb_id;
4123
4124         bp->attn_state = 0;
4125
4126         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4127                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4128
4129         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4130                 bp->attn_group[index].sig[0] = REG_RD(bp,
4131                                                      reg_offset + 0x10*index);
4132                 bp->attn_group[index].sig[1] = REG_RD(bp,
4133                                                reg_offset + 0x4 + 0x10*index);
4134                 bp->attn_group[index].sig[2] = REG_RD(bp,
4135                                                reg_offset + 0x8 + 0x10*index);
4136                 bp->attn_group[index].sig[3] = REG_RD(bp,
4137                                                reg_offset + 0xc + 0x10*index);
4138         }
4139
4140         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4141                              HC_REG_ATTN_MSG0_ADDR_L);
4142
4143         REG_WR(bp, reg_offset, U64_LO(section));
4144         REG_WR(bp, reg_offset + 4, U64_HI(section));
4145
4146         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4147
4148         val = REG_RD(bp, reg_offset);
4149         val |= sb_id;
4150         REG_WR(bp, reg_offset, val);
4151
4152         /* USTORM */
4153         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4154                                             u_def_status_block);
4155         def_sb->u_def_status_block.status_block_id = sb_id;
4156
4157         REG_WR(bp, BAR_USTRORM_INTMEM +
4158                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4159         REG_WR(bp, BAR_USTRORM_INTMEM +
4160                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4161                U64_HI(section));
4162         REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4163                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4164
4165         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4166                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4167                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4168
4169         /* CSTORM */
4170         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4171                                             c_def_status_block);
4172         def_sb->c_def_status_block.status_block_id = sb_id;
4173
4174         REG_WR(bp, BAR_CSTRORM_INTMEM +
4175                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4176         REG_WR(bp, BAR_CSTRORM_INTMEM +
4177                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4178                U64_HI(section));
4179         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4180                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4181
4182         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4183                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4184                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4185
4186         /* TSTORM */
4187         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4188                                             t_def_status_block);
4189         def_sb->t_def_status_block.status_block_id = sb_id;
4190
4191         REG_WR(bp, BAR_TSTRORM_INTMEM +
4192                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4193         REG_WR(bp, BAR_TSTRORM_INTMEM +
4194                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4195                U64_HI(section));
4196         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4197                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4198
4199         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4200                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4201                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4202
4203         /* XSTORM */
4204         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4205                                             x_def_status_block);
4206         def_sb->x_def_status_block.status_block_id = sb_id;
4207
4208         REG_WR(bp, BAR_XSTRORM_INTMEM +
4209                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4210         REG_WR(bp, BAR_XSTRORM_INTMEM +
4211                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4212                U64_HI(section));
4213         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4214                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4215
4216         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4217                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4218                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4219
4220         bp->stats_pending = 0;
4221         bp->set_mac_pending = 0;
4222
4223         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4224 }
4225
4226 static void bnx2x_update_coalesce(struct bnx2x *bp)
4227 {
4228         int port = BP_PORT(bp);
4229         int i;
4230
4231         for_each_queue(bp, i) {
4232                 int sb_id = bp->fp[i].sb_id;
4233
4234                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4235                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4236                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4237                                                     U_SB_ETH_RX_CQ_INDEX),
4238                         bp->rx_ticks/12);
4239                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4240                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4241                                                      U_SB_ETH_RX_CQ_INDEX),
4242                          bp->rx_ticks ? 0 : 1);
4243                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4244                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4245                                                      U_SB_ETH_RX_BD_INDEX),
4246                          bp->rx_ticks ? 0 : 1);
4247
4248                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4249                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4250                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4251                                                     C_SB_ETH_TX_CQ_INDEX),
4252                         bp->tx_ticks/12);
4253                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4254                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4255                                                      C_SB_ETH_TX_CQ_INDEX),
4256                          bp->tx_ticks ? 0 : 1);
4257         }
4258 }
4259
4260 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4261                                        struct bnx2x_fastpath *fp, int last)
4262 {
4263         int i;
4264
4265         for (i = 0; i < last; i++) {
4266                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4267                 struct sk_buff *skb = rx_buf->skb;
4268
4269                 if (skb == NULL) {
4270                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4271                         continue;
4272                 }
4273
4274                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4275                         pci_unmap_single(bp->pdev,
4276                                          pci_unmap_addr(rx_buf, mapping),
4277                                          bp->rx_buf_size,
4278                                          PCI_DMA_FROMDEVICE);
4279
4280                 dev_kfree_skb(skb);
4281                 rx_buf->skb = NULL;
4282         }
4283 }
4284
4285 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4286 {
4287         int func = BP_FUNC(bp);
4288         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4289                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4290         u16 ring_prod, cqe_ring_prod;
4291         int i, j;
4292
4293         bp->rx_buf_size = bp->dev->mtu;
4294         bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4295                 BCM_RX_ETH_PAYLOAD_ALIGN;
4296
4297         if (bp->flags & TPA_ENABLE_FLAG) {
4298                 DP(NETIF_MSG_IFUP,
4299                    "rx_buf_size %d  effective_mtu %d\n",
4300                    bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
4301
4302                 for_each_queue(bp, j) {
4303                         struct bnx2x_fastpath *fp = &bp->fp[j];
4304
4305                         for (i = 0; i < max_agg_queues; i++) {
4306                                 fp->tpa_pool[i].skb =
4307                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4308                                 if (!fp->tpa_pool[i].skb) {
4309                                         BNX2X_ERR("Failed to allocate TPA "
4310                                                   "skb pool for queue[%d] - "
4311                                                   "disabling TPA on this "
4312                                                   "queue!\n", j);
4313                                         bnx2x_free_tpa_pool(bp, fp, i);
4314                                         fp->disable_tpa = 1;
4315                                         break;
4316                                 }
4317                                 pci_unmap_addr_set((struct sw_rx_bd *)
4318                                                         &bp->fp->tpa_pool[i],
4319                                                    mapping, 0);
4320                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4321                         }
4322                 }
4323         }
4324
4325         for_each_queue(bp, j) {
4326                 struct bnx2x_fastpath *fp = &bp->fp[j];
4327
4328                 fp->rx_bd_cons = 0;
4329                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4330                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4331
4332                 /* "next page" elements initialization */
4333                 /* SGE ring */
4334                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4335                         struct eth_rx_sge *sge;
4336
4337                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4338                         sge->addr_hi =
4339                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4340                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4341                         sge->addr_lo =
4342                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4343                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4344                 }
4345
4346                 bnx2x_init_sge_ring_bit_mask(fp);
4347
4348                 /* RX BD ring */
4349                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4350                         struct eth_rx_bd *rx_bd;
4351
4352                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4353                         rx_bd->addr_hi =
4354                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4355                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4356                         rx_bd->addr_lo =
4357                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4358                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4359                 }
4360
4361                 /* CQ ring */
4362                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4363                         struct eth_rx_cqe_next_page *nextpg;
4364
4365                         nextpg = (struct eth_rx_cqe_next_page *)
4366                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4367                         nextpg->addr_hi =
4368                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4369                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4370                         nextpg->addr_lo =
4371                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4372                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4373                 }
4374
4375                 /* Allocate SGEs and initialize the ring elements */
4376                 for (i = 0, ring_prod = 0;
4377                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4378
4379                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4380                                 BNX2X_ERR("was only able to allocate "
4381                                           "%d rx sges\n", i);
4382                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4383                                 /* Cleanup already allocated elements */
4384                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4385                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4386                                 fp->disable_tpa = 1;
4387                                 ring_prod = 0;
4388                                 break;
4389                         }
4390                         ring_prod = NEXT_SGE_IDX(ring_prod);
4391                 }
4392                 fp->rx_sge_prod = ring_prod;
4393
4394                 /* Allocate BDs and initialize BD ring */
4395                 fp->rx_comp_cons = 0;
4396                 cqe_ring_prod = ring_prod = 0;
4397                 for (i = 0; i < bp->rx_ring_size; i++) {
4398                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4399                                 BNX2X_ERR("was only able to allocate "
4400                                           "%d rx skbs\n", i);
4401                                 bp->eth_stats.rx_skb_alloc_failed++;
4402                                 break;
4403                         }
4404                         ring_prod = NEXT_RX_IDX(ring_prod);
4405                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4406                         WARN_ON(ring_prod <= i);
4407                 }
4408
4409                 fp->rx_bd_prod = ring_prod;
4410                 /* must not have more available CQEs than BDs */
4411                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4412                                        cqe_ring_prod);
4413                 fp->rx_pkt = fp->rx_calls = 0;
4414
4415                 /* Warning!
4416                  * this will generate an interrupt (to the TSTORM)
4417                  * must only be done after chip is initialized
4418                  */
4419                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4420                                      fp->rx_sge_prod);
4421                 if (j != 0)
4422                         continue;
4423
4424                 REG_WR(bp, BAR_USTRORM_INTMEM +
4425                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4426                        U64_LO(fp->rx_comp_mapping));
4427                 REG_WR(bp, BAR_USTRORM_INTMEM +
4428                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4429                        U64_HI(fp->rx_comp_mapping));
4430         }
4431 }
4432
4433 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4434 {
4435         int i, j;
4436
4437         for_each_queue(bp, j) {
4438                 struct bnx2x_fastpath *fp = &bp->fp[j];
4439
4440                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4441                         struct eth_tx_bd *tx_bd =
4442                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4443
4444                         tx_bd->addr_hi =
4445                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4446                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4447                         tx_bd->addr_lo =
4448                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4449                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4450                 }
4451
4452                 fp->tx_pkt_prod = 0;
4453                 fp->tx_pkt_cons = 0;
4454                 fp->tx_bd_prod = 0;
4455                 fp->tx_bd_cons = 0;
4456                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4457                 fp->tx_pkt = 0;
4458         }
4459 }
4460
4461 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4462 {
4463         int func = BP_FUNC(bp);
4464
4465         spin_lock_init(&bp->spq_lock);
4466
4467         bp->spq_left = MAX_SPQ_PENDING;
4468         bp->spq_prod_idx = 0;
4469         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4470         bp->spq_prod_bd = bp->spq;
4471         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4472
4473         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4474                U64_LO(bp->spq_mapping));
4475         REG_WR(bp,
4476                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4477                U64_HI(bp->spq_mapping));
4478
4479         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4480                bp->spq_prod_idx);
4481 }
4482
4483 static void bnx2x_init_context(struct bnx2x *bp)
4484 {
4485         int i;
4486
4487         for_each_queue(bp, i) {
4488                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4489                 struct bnx2x_fastpath *fp = &bp->fp[i];
4490                 u8 sb_id = FP_SB_ID(fp);
4491
4492                 context->xstorm_st_context.tx_bd_page_base_hi =
4493                                                 U64_HI(fp->tx_desc_mapping);
4494                 context->xstorm_st_context.tx_bd_page_base_lo =
4495                                                 U64_LO(fp->tx_desc_mapping);
4496                 context->xstorm_st_context.db_data_addr_hi =
4497                                                 U64_HI(fp->tx_prods_mapping);
4498                 context->xstorm_st_context.db_data_addr_lo =
4499                                                 U64_LO(fp->tx_prods_mapping);
4500                 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4501                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4502
4503                 context->ustorm_st_context.common.sb_index_numbers =
4504                                                 BNX2X_RX_SB_INDEX_NUM;
4505                 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4506                 context->ustorm_st_context.common.status_block_id = sb_id;
4507                 context->ustorm_st_context.common.flags =
4508                         USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4509                 context->ustorm_st_context.common.mc_alignment_size =
4510                         BCM_RX_ETH_PAYLOAD_ALIGN;
4511                 context->ustorm_st_context.common.bd_buff_size =
4512                                                 bp->rx_buf_size;
4513                 context->ustorm_st_context.common.bd_page_base_hi =
4514                                                 U64_HI(fp->rx_desc_mapping);
4515                 context->ustorm_st_context.common.bd_page_base_lo =
4516                                                 U64_LO(fp->rx_desc_mapping);
4517                 if (!fp->disable_tpa) {
4518                         context->ustorm_st_context.common.flags |=
4519                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4520                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4521                         context->ustorm_st_context.common.sge_buff_size =
4522                                         (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4523                         context->ustorm_st_context.common.sge_page_base_hi =
4524                                                 U64_HI(fp->rx_sge_mapping);
4525                         context->ustorm_st_context.common.sge_page_base_lo =
4526                                                 U64_LO(fp->rx_sge_mapping);
4527                 }
4528
4529                 context->cstorm_st_context.sb_index_number =
4530                                                 C_SB_ETH_TX_CQ_INDEX;
4531                 context->cstorm_st_context.status_block_id = sb_id;
4532
4533                 context->xstorm_ag_context.cdu_reserved =
4534                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4535                                                CDU_REGION_NUMBER_XCM_AG,
4536                                                ETH_CONNECTION_TYPE);
4537                 context->ustorm_ag_context.cdu_usage =
4538                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4539                                                CDU_REGION_NUMBER_UCM_AG,
4540                                                ETH_CONNECTION_TYPE);
4541         }
4542 }
4543
4544 static void bnx2x_init_ind_table(struct bnx2x *bp)
4545 {
4546         int func = BP_FUNC(bp);
4547         int i;
4548
4549         if (!is_multi(bp))
4550                 return;
4551
4552         DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4553         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4554                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4555                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4556                         BP_CL_ID(bp) + (i % bp->num_queues));
4557 }
4558
4559 static void bnx2x_set_client_config(struct bnx2x *bp)
4560 {
4561         struct tstorm_eth_client_config tstorm_client = {0};
4562         int port = BP_PORT(bp);
4563         int i;
4564
4565         tstorm_client.mtu = bp->dev->mtu;
4566         tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4567         tstorm_client.config_flags =
4568                                 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4569 #ifdef BCM_VLAN
4570         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4571                 tstorm_client.config_flags |=
4572                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4573                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4574         }
4575 #endif
4576
4577         if (bp->flags & TPA_ENABLE_FLAG) {
4578                 tstorm_client.max_sges_for_packet =
4579                         SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4580                 tstorm_client.max_sges_for_packet =
4581                         ((tstorm_client.max_sges_for_packet +
4582                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4583                         PAGES_PER_SGE_SHIFT;
4584
4585                 tstorm_client.config_flags |=
4586                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4587         }
4588
4589         for_each_queue(bp, i) {
4590                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4591                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4592                        ((u32 *)&tstorm_client)[0]);
4593                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4594                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4595                        ((u32 *)&tstorm_client)[1]);
4596         }
4597
4598         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4599            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4600 }
4601
4602 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4603 {
4604         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4605         int mode = bp->rx_mode;
4606         int mask = (1 << BP_L_ID(bp));
4607         int func = BP_FUNC(bp);
4608         int i;
4609
4610         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
4611
4612         switch (mode) {
4613         case BNX2X_RX_MODE_NONE: /* no Rx */
4614                 tstorm_mac_filter.ucast_drop_all = mask;
4615                 tstorm_mac_filter.mcast_drop_all = mask;
4616                 tstorm_mac_filter.bcast_drop_all = mask;
4617                 break;
4618         case BNX2X_RX_MODE_NORMAL:
4619                 tstorm_mac_filter.bcast_accept_all = mask;
4620                 break;
4621         case BNX2X_RX_MODE_ALLMULTI:
4622                 tstorm_mac_filter.mcast_accept_all = mask;
4623                 tstorm_mac_filter.bcast_accept_all = mask;
4624                 break;
4625         case BNX2X_RX_MODE_PROMISC:
4626                 tstorm_mac_filter.ucast_accept_all = mask;
4627                 tstorm_mac_filter.mcast_accept_all = mask;
4628                 tstorm_mac_filter.bcast_accept_all = mask;
4629                 break;
4630         default:
4631                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4632                 break;
4633         }
4634
4635         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4636                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4637                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4638                        ((u32 *)&tstorm_mac_filter)[i]);
4639
4640 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4641                    ((u32 *)&tstorm_mac_filter)[i]); */
4642         }
4643
4644         if (mode != BNX2X_RX_MODE_NONE)
4645                 bnx2x_set_client_config(bp);
4646 }
4647
4648 static void bnx2x_init_internal_common(struct bnx2x *bp)
4649 {
4650         int i;
4651
4652         if (bp->flags & TPA_ENABLE_FLAG) {
4653                 struct tstorm_eth_tpa_exist tpa = {0};
4654
4655                 tpa.tpa_exist = 1;
4656
4657                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4658                        ((u32 *)&tpa)[0]);
4659                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4660                        ((u32 *)&tpa)[1]);
4661         }
4662
4663         /* Zero this manually as its initialization is
4664            currently missing in the initTool */
4665         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4666                 REG_WR(bp, BAR_USTRORM_INTMEM +
4667                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4668 }
4669
4670 static void bnx2x_init_internal_port(struct bnx2x *bp)
4671 {
4672         int port = BP_PORT(bp);
4673
4674         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4675         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4676         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4677         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4678 }
4679
4680 static void bnx2x_init_internal_func(struct bnx2x *bp)
4681 {
4682         struct tstorm_eth_function_common_config tstorm_config = {0};
4683         struct stats_indication_flags stats_flags = {0};
4684         int port = BP_PORT(bp);
4685         int func = BP_FUNC(bp);
4686         int i;
4687         u16 max_agg_size;
4688
4689         if (is_multi(bp)) {
4690                 tstorm_config.config_flags = MULTI_FLAGS;
4691                 tstorm_config.rss_result_mask = MULTI_MASK;
4692         }
4693
4694         tstorm_config.leading_client_id = BP_L_ID(bp);
4695
4696         REG_WR(bp, BAR_TSTRORM_INTMEM +
4697                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4698                (*(u32 *)&tstorm_config));
4699
4700         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4701         bnx2x_set_storm_rx_mode(bp);
4702
4703         /* reset xstorm per client statistics */
4704         for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4705                 REG_WR(bp, BAR_XSTRORM_INTMEM +
4706                        XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4707                        i*4, 0);
4708         }
4709         /* reset tstorm per client statistics */
4710         for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4711                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4712                        TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4713                        i*4, 0);
4714         }
4715
4716         /* Init statistics related context */
4717         stats_flags.collect_eth = 1;
4718
4719         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4720                ((u32 *)&stats_flags)[0]);
4721         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4722                ((u32 *)&stats_flags)[1]);
4723
4724         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4725                ((u32 *)&stats_flags)[0]);
4726         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4727                ((u32 *)&stats_flags)[1]);
4728
4729         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4730                ((u32 *)&stats_flags)[0]);
4731         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4732                ((u32 *)&stats_flags)[1]);
4733
4734         REG_WR(bp, BAR_XSTRORM_INTMEM +
4735                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4736                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4737         REG_WR(bp, BAR_XSTRORM_INTMEM +
4738                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4739                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4740
4741         REG_WR(bp, BAR_TSTRORM_INTMEM +
4742                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4743                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4744         REG_WR(bp, BAR_TSTRORM_INTMEM +
4745                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4746                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4747
4748         if (CHIP_IS_E1H(bp)) {
4749                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4750                         IS_E1HMF(bp));
4751                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4752                         IS_E1HMF(bp));
4753                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4754                         IS_E1HMF(bp));
4755                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4756                         IS_E1HMF(bp));
4757
4758                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4759                          bp->e1hov);
4760         }
4761
4762         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4763         max_agg_size =
4764                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4765                           SGE_PAGE_SIZE * PAGES_PER_SGE),
4766                     (u32)0xffff);
4767         for_each_queue(bp, i) {
4768                 struct bnx2x_fastpath *fp = &bp->fp[i];
4769
4770                 REG_WR(bp, BAR_USTRORM_INTMEM +
4771                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4772                        U64_LO(fp->rx_comp_mapping));
4773                 REG_WR(bp, BAR_USTRORM_INTMEM +
4774                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4775                        U64_HI(fp->rx_comp_mapping));
4776
4777                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4778                          USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4779                          max_agg_size);
4780         }
4781 }
4782
4783 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4784 {
4785         switch (load_code) {
4786         case FW_MSG_CODE_DRV_LOAD_COMMON:
4787                 bnx2x_init_internal_common(bp);
4788                 /* no break */
4789
4790         case FW_MSG_CODE_DRV_LOAD_PORT:
4791                 bnx2x_init_internal_port(bp);
4792                 /* no break */
4793
4794         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4795                 bnx2x_init_internal_func(bp);
4796                 break;
4797
4798         default:
4799                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4800                 break;
4801         }
4802 }
4803
4804 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4805 {
4806         int i;
4807
4808         for_each_queue(bp, i) {
4809                 struct bnx2x_fastpath *fp = &bp->fp[i];
4810
4811                 fp->bp = bp;
4812                 fp->state = BNX2X_FP_STATE_CLOSED;
4813                 fp->index = i;
4814                 fp->cl_id = BP_L_ID(bp) + i;
4815                 fp->sb_id = fp->cl_id;
4816                 DP(NETIF_MSG_IFUP,
4817                    "bnx2x_init_sb(%p,%p) index %d  cl_id %d  sb %d\n",
4818                    bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4819                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4820                               FP_SB_ID(fp));
4821                 bnx2x_update_fpsb_idx(fp);
4822         }
4823
4824         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4825                           DEF_SB_ID);
4826         bnx2x_update_dsb_idx(bp);
4827         bnx2x_update_coalesce(bp);
4828         bnx2x_init_rx_rings(bp);
4829         bnx2x_init_tx_ring(bp);
4830         bnx2x_init_sp_ring(bp);
4831         bnx2x_init_context(bp);
4832         bnx2x_init_internal(bp, load_code);
4833         bnx2x_init_ind_table(bp);
4834         bnx2x_stats_init(bp);
4835
4836         /* At this point, we are ready for interrupts */
4837         atomic_set(&bp->intr_sem, 0);
4838
4839         /* flush all before enabling interrupts */
4840         mb();
4841         mmiowb();
4842
4843         bnx2x_int_enable(bp);
4844 }
4845
4846 /* end of nic init */
4847
4848 /*
4849  * gzip service functions
4850  */
4851
4852 static int bnx2x_gunzip_init(struct bnx2x *bp)
4853 {
4854         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4855                                               &bp->gunzip_mapping);
4856         if (bp->gunzip_buf  == NULL)
4857                 goto gunzip_nomem1;
4858
4859         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4860         if (bp->strm  == NULL)
4861                 goto gunzip_nomem2;
4862
4863         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4864                                       GFP_KERNEL);
4865         if (bp->strm->workspace == NULL)
4866                 goto gunzip_nomem3;
4867
4868         return 0;
4869
4870 gunzip_nomem3:
4871         kfree(bp->strm);
4872         bp->strm = NULL;
4873
4874 gunzip_nomem2:
4875         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4876                             bp->gunzip_mapping);
4877         bp->gunzip_buf = NULL;
4878
4879 gunzip_nomem1:
4880         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4881                " un-compression\n", bp->dev->name);
4882         return -ENOMEM;
4883 }
4884
4885 static void bnx2x_gunzip_end(struct bnx2x *bp)
4886 {
4887         kfree(bp->strm->workspace);
4888
4889         kfree(bp->strm);
4890         bp->strm = NULL;
4891
4892         if (bp->gunzip_buf) {
4893                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4894                                     bp->gunzip_mapping);
4895                 bp->gunzip_buf = NULL;
4896         }
4897 }
4898
4899 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4900 {
4901         int n, rc;
4902
4903         /* check gzip header */
4904         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4905                 return -EINVAL;
4906
4907         n = 10;
4908
4909 #define FNAME                           0x8
4910
4911         if (zbuf[3] & FNAME)
4912                 while ((zbuf[n++] != 0) && (n < len));
4913
4914         bp->strm->next_in = zbuf + n;
4915         bp->strm->avail_in = len - n;
4916         bp->strm->next_out = bp->gunzip_buf;
4917         bp->strm->avail_out = FW_BUF_SIZE;
4918
4919         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4920         if (rc != Z_OK)
4921                 return rc;
4922
4923         rc = zlib_inflate(bp->strm, Z_FINISH);
4924         if ((rc != Z_OK) && (rc != Z_STREAM_END))
4925                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4926                        bp->dev->name, bp->strm->msg);
4927
4928         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4929         if (bp->gunzip_outlen & 0x3)
4930                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4931                                     " gunzip_outlen (%d) not aligned\n",
4932                        bp->dev->name, bp->gunzip_outlen);
4933         bp->gunzip_outlen >>= 2;
4934
4935         zlib_inflateEnd(bp->strm);
4936
4937         if (rc == Z_STREAM_END)
4938                 return 0;
4939
4940         return rc;
4941 }
4942
4943 /* nic load/unload */
4944
4945 /*
4946  * General service functions
4947  */
4948
4949 /* send a NIG loopback debug packet */
4950 static void bnx2x_lb_pckt(struct bnx2x *bp)
4951 {
4952         u32 wb_write[3];
4953
4954         /* Ethernet source and destination addresses */
4955         wb_write[0] = 0x55555555;
4956         wb_write[1] = 0x55555555;
4957         wb_write[2] = 0x20;             /* SOP */
4958         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4959
4960         /* NON-IP protocol */
4961         wb_write[0] = 0x09000000;
4962         wb_write[1] = 0x55555555;
4963         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
4964         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4965 }
4966
4967 /* some of the internal memories
4968  * are not directly readable from the driver
4969  * to test them we send debug packets
4970  */
4971 static int bnx2x_int_mem_test(struct bnx2x *bp)
4972 {
4973         int factor;
4974         int count, i;
4975         u32 val = 0;
4976
4977         if (CHIP_REV_IS_FPGA(bp))
4978                 factor = 120;
4979         else if (CHIP_REV_IS_EMUL(bp))
4980                 factor = 200;
4981         else
4982                 factor = 1;
4983
4984         DP(NETIF_MSG_HW, "start part1\n");
4985
4986         /* Disable inputs of parser neighbor blocks */
4987         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4988         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4989         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4990         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4991
4992         /*  Write 0 to parser credits for CFC search request */
4993         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4994
4995         /* send Ethernet packet */
4996         bnx2x_lb_pckt(bp);
4997
4998         /* TODO do i reset NIG statistic? */
4999         /* Wait until NIG register shows 1 packet of size 0x10 */
5000         count = 1000 * factor;
5001         while (count) {
5002
5003                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5004                 val = *bnx2x_sp(bp, wb_data[0]);
5005                 if (val == 0x10)
5006                         break;
5007
5008                 msleep(10);
5009                 count--;
5010         }
5011         if (val != 0x10) {
5012                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5013                 return -1;
5014         }
5015
5016         /* Wait until PRS register shows 1 packet */
5017         count = 1000 * factor;
5018         while (count) {
5019                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5020                 if (val == 1)
5021                         break;
5022
5023                 msleep(10);
5024                 count--;
5025         }
5026         if (val != 0x1) {
5027                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5028                 return -2;
5029         }
5030
5031         /* Reset and init BRB, PRS */
5032         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5033         msleep(50);
5034         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5035         msleep(50);
5036         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5037         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5038
5039         DP(NETIF_MSG_HW, "part2\n");
5040
5041         /* Disable inputs of parser neighbor blocks */
5042         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5043         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5044         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5045         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5046
5047         /* Write 0 to parser credits for CFC search request */
5048         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5049
5050         /* send 10 Ethernet packets */
5051         for (i = 0; i < 10; i++)
5052                 bnx2x_lb_pckt(bp);
5053
5054         /* Wait until NIG register shows 10 + 1
5055            packets of size 11*0x10 = 0xb0 */
5056         count = 1000 * factor;
5057         while (count) {
5058
5059                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5060                 val = *bnx2x_sp(bp, wb_data[0]);
5061                 if (val == 0xb0)
5062                         break;
5063
5064                 msleep(10);
5065                 count--;
5066         }
5067         if (val != 0xb0) {
5068                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5069                 return -3;
5070         }
5071
5072         /* Wait until PRS register shows 2 packets */
5073         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5074         if (val != 2)
5075                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5076
5077         /* Write 1 to parser credits for CFC search request */
5078         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5079
5080         /* Wait until PRS register shows 3 packets */
5081         msleep(10 * factor);
5082         /* Wait until NIG register shows 1 packet of size 0x10 */
5083         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5084         if (val != 3)
5085                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5086
5087         /* clear NIG EOP FIFO */
5088         for (i = 0; i < 11; i++)
5089                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5090         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5091         if (val != 1) {
5092                 BNX2X_ERR("clear of NIG failed\n");
5093                 return -4;
5094         }
5095
5096         /* Reset and init BRB, PRS, NIG */
5097         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5098         msleep(50);
5099         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5100         msleep(50);
5101         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5102         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5103 #ifndef BCM_ISCSI
5104         /* set NIC mode */
5105         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5106 #endif
5107
5108         /* Enable inputs of parser neighbor blocks */
5109         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5110         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5111         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5112         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5113
5114         DP(NETIF_MSG_HW, "done\n");
5115
5116         return 0; /* OK */
5117 }
5118
5119 static void enable_blocks_attention(struct bnx2x *bp)
5120 {
5121         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5122         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5123         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5124         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5125         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5126         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5127         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5128         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5129         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5130 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5131 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5132         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5133         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5134         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5135 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5136 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5137         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5138         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5139         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5140         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5141 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5142 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5143         if (CHIP_REV_IS_FPGA(bp))
5144                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5145         else
5146                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5147         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5148         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5149         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5150 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5151 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5152         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5153         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5154 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5155         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5156 }
5157
5158
5159 static void bnx2x_reset_common(struct bnx2x *bp)
5160 {
5161         /* reset_common */
5162         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5163                0xd3ffff7f);
5164         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5165 }
5166
5167 static int bnx2x_init_common(struct bnx2x *bp)
5168 {
5169         u32 val, i;
5170
5171         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5172
5173         bnx2x_reset_common(bp);
5174         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5175         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5176
5177         bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5178         if (CHIP_IS_E1H(bp))
5179                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5180
5181         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5182         msleep(30);
5183         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5184
5185         bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5186         if (CHIP_IS_E1(bp)) {
5187                 /* enable HW interrupt from PXP on USDM overflow
5188                    bit 16 on INT_MASK_0 */
5189                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5190         }
5191
5192         bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5193         bnx2x_init_pxp(bp);
5194
5195 #ifdef __BIG_ENDIAN
5196         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5197         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5198         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5199         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5200         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5201
5202 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5203         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5204         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5205         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5206         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5207 #endif
5208
5209         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5210 #ifdef BCM_ISCSI
5211         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5212         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5213         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5214 #endif
5215
5216         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5217                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5218
5219         /* let the HW do it's magic ... */
5220         msleep(100);
5221         /* finish PXP init */
5222         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5223         if (val != 1) {
5224                 BNX2X_ERR("PXP2 CFG failed\n");
5225                 return -EBUSY;
5226         }
5227         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5228         if (val != 1) {
5229                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5230                 return -EBUSY;
5231         }
5232
5233         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5234         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5235
5236         bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5237
5238         /* clean the DMAE memory */
5239         bp->dmae_ready = 1;
5240         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5241
5242         bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5243         bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5244         bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5245         bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5246
5247         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5248         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5249         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5250         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5251
5252         bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5253         /* soft reset pulse */
5254         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5255         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5256
5257 #ifdef BCM_ISCSI
5258         bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5259 #endif
5260
5261         bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5262         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5263         if (!CHIP_REV_IS_SLOW(bp)) {
5264                 /* enable hw interrupt from doorbell Q */
5265                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5266         }
5267
5268         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5269         if (CHIP_REV_IS_SLOW(bp)) {
5270                 /* fix for emulation and FPGA for no pause */
5271                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5272                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5273                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5274                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5275         }
5276
5277         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5278         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5279         /* set NIC mode */
5280         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5281         if (CHIP_IS_E1H(bp))
5282                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5283
5284         bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5285         bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5286         bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5287         bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5288
5289         if (CHIP_IS_E1H(bp)) {
5290                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5291                                 STORM_INTMEM_SIZE_E1H/2);
5292                 bnx2x_init_fill(bp,
5293                                 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5294                                 0, STORM_INTMEM_SIZE_E1H/2);
5295                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5296                                 STORM_INTMEM_SIZE_E1H/2);
5297                 bnx2x_init_fill(bp,
5298                                 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5299                                 0, STORM_INTMEM_SIZE_E1H/2);
5300                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5301                                 STORM_INTMEM_SIZE_E1H/2);
5302                 bnx2x_init_fill(bp,
5303                                 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5304                                 0, STORM_INTMEM_SIZE_E1H/2);
5305                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5306                                 STORM_INTMEM_SIZE_E1H/2);
5307                 bnx2x_init_fill(bp,
5308                                 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5309                                 0, STORM_INTMEM_SIZE_E1H/2);
5310         } else { /* E1 */
5311                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5312                                 STORM_INTMEM_SIZE_E1);
5313                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5314                                 STORM_INTMEM_SIZE_E1);
5315                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5316                                 STORM_INTMEM_SIZE_E1);
5317                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5318                                 STORM_INTMEM_SIZE_E1);
5319         }
5320
5321         bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5322         bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5323         bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5324         bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5325
5326         /* sync semi rtc */
5327         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5328                0x80000000);
5329         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5330                0x80000000);
5331
5332         bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5333         bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5334         bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5335
5336         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5337         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5338                 REG_WR(bp, i, 0xc0cac01a);
5339                 /* TODO: replace with something meaningful */
5340         }
5341         if (CHIP_IS_E1H(bp))
5342                 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5343         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5344
5345         if (sizeof(union cdu_context) != 1024)
5346                 /* we currently assume that a context is 1024 bytes */
5347                 printk(KERN_ALERT PFX "please adjust the size of"
5348                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5349
5350         bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5351         val = (4 << 24) + (0 << 12) + 1024;
5352         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5353         if (CHIP_IS_E1(bp)) {
5354                 /* !!! fix pxp client crdit until excel update */
5355                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5356                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5357         }
5358
5359         bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5360         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5361
5362         bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5363         bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5364
5365         /* PXPCS COMMON comes here */
5366         /* Reset PCIE errors for debug */
5367         REG_WR(bp, 0x2814, 0xffffffff);
5368         REG_WR(bp, 0x3820, 0xffffffff);
5369
5370         /* EMAC0 COMMON comes here */
5371         /* EMAC1 COMMON comes here */
5372         /* DBU COMMON comes here */
5373         /* DBG COMMON comes here */
5374
5375         bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5376         if (CHIP_IS_E1H(bp)) {
5377                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5378                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5379         }
5380
5381         if (CHIP_REV_IS_SLOW(bp))
5382                 msleep(200);
5383
5384         /* finish CFC init */
5385         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5386         if (val != 1) {
5387                 BNX2X_ERR("CFC LL_INIT failed\n");
5388                 return -EBUSY;
5389         }
5390         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5391         if (val != 1) {
5392                 BNX2X_ERR("CFC AC_INIT failed\n");
5393                 return -EBUSY;
5394         }
5395         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5396         if (val != 1) {
5397                 BNX2X_ERR("CFC CAM_INIT failed\n");
5398                 return -EBUSY;
5399         }
5400         REG_WR(bp, CFC_REG_DEBUG0, 0);
5401
5402         /* read NIG statistic
5403            to see if this is our first up since powerup */
5404         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5405         val = *bnx2x_sp(bp, wb_data[0]);
5406
5407         /* do internal memory self test */
5408         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5409                 BNX2X_ERR("internal mem self test failed\n");
5410                 return -EBUSY;
5411         }
5412
5413         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5414         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5415         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5416                 /* Fan failure is indicated by SPIO 5 */
5417                 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5418                                MISC_REGISTERS_SPIO_INPUT_HI_Z);
5419
5420                 /* set to active low mode */
5421                 val = REG_RD(bp, MISC_REG_SPIO_INT);
5422                 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5423                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5424                 REG_WR(bp, MISC_REG_SPIO_INT, val);
5425
5426                 /* enable interrupt to signal the IGU */
5427                 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5428                 val |= (1 << MISC_REGISTERS_SPIO_5);
5429                 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5430                 break;
5431
5432         default:
5433                 break;
5434         }
5435
5436         /* clear PXP2 attentions */
5437         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5438
5439         enable_blocks_attention(bp);
5440
5441         if (!BP_NOMCP(bp)) {
5442                 bnx2x_acquire_phy_lock(bp);
5443                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5444                 bnx2x_release_phy_lock(bp);
5445         } else
5446                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5447
5448         return 0;
5449 }
5450
5451 static int bnx2x_init_port(struct bnx2x *bp)
5452 {
5453         int port = BP_PORT(bp);
5454         u32 val;
5455
5456         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
5457
5458         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5459
5460         /* Port PXP comes here */
5461         /* Port PXP2 comes here */
5462 #ifdef BCM_ISCSI
5463         /* Port0  1
5464          * Port1  385 */
5465         i++;
5466         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5467         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5468         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5469         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5470
5471         /* Port0  2
5472          * Port1  386 */
5473         i++;
5474         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5475         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5476         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5477         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5478
5479         /* Port0  3
5480          * Port1  387 */
5481         i++;
5482         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5483         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5484         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5485         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5486 #endif
5487         /* Port CMs come here */
5488
5489         /* Port QM comes here */
5490 #ifdef BCM_ISCSI
5491         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5492         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5493
5494         bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5495                              func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5496 #endif
5497         /* Port DQ comes here */
5498         /* Port BRB1 comes here */
5499         /* Port PRS comes here */
5500         /* Port TSDM comes here */
5501         /* Port CSDM comes here */
5502         /* Port USDM comes here */
5503         /* Port XSDM comes here */
5504         bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5505                              port ? TSEM_PORT1_END : TSEM_PORT0_END);
5506         bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5507                              port ? USEM_PORT1_END : USEM_PORT0_END);
5508         bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5509                              port ? CSEM_PORT1_END : CSEM_PORT0_END);
5510         bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5511                              port ? XSEM_PORT1_END : XSEM_PORT0_END);
5512         /* Port UPB comes here */
5513         /* Port XPB comes here */
5514
5515         bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5516                              port ? PBF_PORT1_END : PBF_PORT0_END);
5517
5518         /* configure PBF to work without PAUSE mtu 9000 */
5519         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5520
5521         /* update threshold */
5522         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5523         /* update init credit */
5524         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5525
5526         /* probe changes */
5527         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5528         msleep(5);
5529         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5530
5531 #ifdef BCM_ISCSI
5532         /* tell the searcher where the T2 table is */
5533         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5534
5535         wb_write[0] = U64_LO(bp->t2_mapping);
5536         wb_write[1] = U64_HI(bp->t2_mapping);
5537         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5538         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5539         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5540         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5541
5542         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5543         /* Port SRCH comes here */
5544 #endif
5545         /* Port CDU comes here */
5546         /* Port CFC comes here */
5547
5548         if (CHIP_IS_E1(bp)) {
5549                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5550                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5551         }
5552         bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5553                              port ? HC_PORT1_END : HC_PORT0_END);
5554
5555         bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5556                                     MISC_AEU_PORT0_START,
5557                              port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5558         /* init aeu_mask_attn_func_0/1:
5559          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5560          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5561          *             bits 4-7 are used for "per vn group attention" */
5562         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5563                (IS_E1HMF(bp) ? 0xF7 : 0x7));
5564
5565         /* Port PXPCS comes here */
5566         /* Port EMAC0 comes here */
5567         /* Port EMAC1 comes here */
5568         /* Port DBU comes here */
5569         /* Port DBG comes here */
5570         bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5571                              port ? NIG_PORT1_END : NIG_PORT0_END);
5572
5573         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5574
5575         if (CHIP_IS_E1H(bp)) {
5576                 u32 wsum;
5577                 struct cmng_struct_per_port m_cmng_port;
5578                 int vn;
5579
5580                 /* 0x2 disable e1hov, 0x1 enable */
5581                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5582                        (IS_E1HMF(bp) ? 0x1 : 0x2));
5583
5584                 /* Init RATE SHAPING and FAIRNESS contexts.
5585                    Initialize as if there is 10G link. */
5586                 wsum = bnx2x_calc_vn_wsum(bp);
5587                 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5588                 if (IS_E1HMF(bp))
5589                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
5590                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
5591                                         wsum, 10000, &m_cmng_port);
5592         }
5593
5594         /* Port MCP comes here */
5595         /* Port DMAE comes here */
5596
5597         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5598         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5599         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5600                 /* add SPIO 5 to group 0 */
5601                 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5602                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5603                 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5604                 break;
5605
5606         default:
5607                 break;
5608         }
5609
5610         bnx2x__link_reset(bp);
5611
5612         return 0;
5613 }
5614
5615 #define ILT_PER_FUNC            (768/2)
5616 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
5617 /* the phys address is shifted right 12 bits and has an added
5618    1=valid bit added to the 53rd bit
5619    then since this is a wide register(TM)
5620    we split it into two 32 bit writes
5621  */
5622 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5623 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
5624 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
5625 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
5626
5627 #define CNIC_ILT_LINES          0
5628
5629 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5630 {
5631         int reg;
5632
5633         if (CHIP_IS_E1H(bp))
5634                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5635         else /* E1 */
5636                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5637
5638         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5639 }
5640
5641 static int bnx2x_init_func(struct bnx2x *bp)
5642 {
5643         int port = BP_PORT(bp);
5644         int func = BP_FUNC(bp);
5645         int i;
5646
5647         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
5648
5649         i = FUNC_ILT_BASE(func);
5650
5651         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5652         if (CHIP_IS_E1H(bp)) {
5653                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5654                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5655         } else /* E1 */
5656                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5657                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5658
5659
5660         if (CHIP_IS_E1H(bp)) {
5661                 for (i = 0; i < 9; i++)
5662                         bnx2x_init_block(bp,
5663                                          cm_start[func][i], cm_end[func][i]);
5664
5665                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5666                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5667         }
5668
5669         /* HC init per function */
5670         if (CHIP_IS_E1H(bp)) {
5671                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5672
5673                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5674                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5675         }
5676         bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5677
5678         if (CHIP_IS_E1H(bp))
5679                 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5680
5681         /* Reset PCIE errors for debug */
5682         REG_WR(bp, 0x2114, 0xffffffff);
5683         REG_WR(bp, 0x2120, 0xffffffff);
5684
5685         return 0;
5686 }
5687
5688 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5689 {
5690         int i, rc = 0;
5691
5692         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
5693            BP_FUNC(bp), load_code);
5694
5695         bp->dmae_ready = 0;
5696         mutex_init(&bp->dmae_mutex);
5697         bnx2x_gunzip_init(bp);
5698
5699         switch (load_code) {
5700         case FW_MSG_CODE_DRV_LOAD_COMMON:
5701                 rc = bnx2x_init_common(bp);
5702                 if (rc)
5703                         goto init_hw_err;
5704                 /* no break */
5705
5706         case FW_MSG_CODE_DRV_LOAD_PORT:
5707                 bp->dmae_ready = 1;
5708                 rc = bnx2x_init_port(bp);
5709                 if (rc)
5710                         goto init_hw_err;
5711                 /* no break */
5712
5713         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5714                 bp->dmae_ready = 1;
5715                 rc = bnx2x_init_func(bp);
5716                 if (rc)
5717                         goto init_hw_err;
5718                 break;
5719
5720         default:
5721                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5722                 break;
5723         }
5724
5725         if (!BP_NOMCP(bp)) {
5726                 int func = BP_FUNC(bp);
5727
5728                 bp->fw_drv_pulse_wr_seq =
5729                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5730                                  DRV_PULSE_SEQ_MASK);
5731                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5732                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  func_stx 0x%x\n",
5733                    bp->fw_drv_pulse_wr_seq, bp->func_stx);
5734         } else
5735                 bp->func_stx = 0;
5736
5737         /* this needs to be done before gunzip end */
5738         bnx2x_zero_def_sb(bp);
5739         for_each_queue(bp, i)
5740                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5741
5742 init_hw_err:
5743         bnx2x_gunzip_end(bp);
5744
5745         return rc;
5746 }
5747
5748 /* send the MCP a request, block until there is a reply */
5749 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5750 {
5751         int func = BP_FUNC(bp);
5752         u32 seq = ++bp->fw_seq;
5753         u32 rc = 0;
5754         u32 cnt = 1;
5755         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5756
5757         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5758         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5759
5760         do {
5761                 /* let the FW do it's magic ... */
5762                 msleep(delay);
5763
5764                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5765
5766                 /* Give the FW up to 2 second (200*10ms) */
5767         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5768
5769         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5770            cnt*delay, rc, seq);
5771
5772         /* is this a reply to our command? */
5773         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5774                 rc &= FW_MSG_CODE_MASK;
5775
5776         } else {
5777                 /* FW BUG! */
5778                 BNX2X_ERR("FW failed to respond!\n");
5779                 bnx2x_fw_dump(bp);
5780                 rc = 0;
5781         }
5782
5783         return rc;
5784 }
5785
5786 static void bnx2x_free_mem(struct bnx2x *bp)
5787 {
5788
5789 #define BNX2X_PCI_FREE(x, y, size) \
5790         do { \
5791                 if (x) { \
5792                         pci_free_consistent(bp->pdev, size, x, y); \
5793                         x = NULL; \
5794                         y = 0; \
5795                 } \
5796         } while (0)
5797
5798 #define BNX2X_FREE(x) \
5799         do { \
5800                 if (x) { \
5801                         vfree(x); \
5802                         x = NULL; \
5803                 } \
5804         } while (0)
5805
5806         int i;
5807
5808         /* fastpath */
5809         for_each_queue(bp, i) {
5810
5811                 /* Status blocks */
5812                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5813                                bnx2x_fp(bp, i, status_blk_mapping),
5814                                sizeof(struct host_status_block) +
5815                                sizeof(struct eth_tx_db_data));
5816
5817                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5818                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5819                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5820                                bnx2x_fp(bp, i, tx_desc_mapping),
5821                                sizeof(struct eth_tx_bd) * NUM_TX_BD);
5822
5823                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5824                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5825                                bnx2x_fp(bp, i, rx_desc_mapping),
5826                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
5827
5828                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5829                                bnx2x_fp(bp, i, rx_comp_mapping),
5830                                sizeof(struct eth_fast_path_rx_cqe) *
5831                                NUM_RCQ_BD);
5832
5833                 /* SGE ring */
5834                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5835                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5836                                bnx2x_fp(bp, i, rx_sge_mapping),
5837                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5838         }
5839         /* end of fastpath */
5840
5841         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5842                        sizeof(struct host_def_status_block));
5843
5844         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5845                        sizeof(struct bnx2x_slowpath));
5846
5847 #ifdef BCM_ISCSI
5848         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5849         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5850         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5851         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5852 #endif
5853         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5854
5855 #undef BNX2X_PCI_FREE
5856 #undef BNX2X_KFREE
5857 }
5858
5859 static int bnx2x_alloc_mem(struct bnx2x *bp)
5860 {
5861
5862 #define BNX2X_PCI_ALLOC(x, y, size) \
5863         do { \
5864                 x = pci_alloc_consistent(bp->pdev, size, y); \
5865                 if (x == NULL) \
5866                         goto alloc_mem_err; \
5867                 memset(x, 0, size); \
5868         } while (0)
5869
5870 #define BNX2X_ALLOC(x, size) \
5871         do { \
5872                 x = vmalloc(size); \
5873                 if (x == NULL) \
5874                         goto alloc_mem_err; \
5875                 memset(x, 0, size); \
5876         } while (0)
5877
5878         int i;
5879
5880         /* fastpath */
5881         for_each_queue(bp, i) {
5882                 bnx2x_fp(bp, i, bp) = bp;
5883
5884                 /* Status blocks */
5885                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5886                                 &bnx2x_fp(bp, i, status_blk_mapping),
5887                                 sizeof(struct host_status_block) +
5888                                 sizeof(struct eth_tx_db_data));
5889
5890                 bnx2x_fp(bp, i, hw_tx_prods) =
5891                                 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5892
5893                 bnx2x_fp(bp, i, tx_prods_mapping) =
5894                                 bnx2x_fp(bp, i, status_blk_mapping) +
5895                                 sizeof(struct host_status_block);
5896
5897                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5898                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5899                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5900                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5901                                 &bnx2x_fp(bp, i, tx_desc_mapping),
5902                                 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5903
5904                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5905                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5906                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5907                                 &bnx2x_fp(bp, i, rx_desc_mapping),
5908                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5909
5910                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5911                                 &bnx2x_fp(bp, i, rx_comp_mapping),
5912                                 sizeof(struct eth_fast_path_rx_cqe) *
5913                                 NUM_RCQ_BD);
5914
5915                 /* SGE ring */
5916                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5917                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5918                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5919                                 &bnx2x_fp(bp, i, rx_sge_mapping),
5920                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5921         }
5922         /* end of fastpath */
5923
5924         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5925                         sizeof(struct host_def_status_block));
5926
5927         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5928                         sizeof(struct bnx2x_slowpath));
5929
5930 #ifdef BCM_ISCSI
5931         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5932
5933         /* Initialize T1 */
5934         for (i = 0; i < 64*1024; i += 64) {
5935                 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5936                 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5937         }
5938
5939         /* allocate searcher T2 table
5940            we allocate 1/4 of alloc num for T2
5941           (which is not entered into the ILT) */
5942         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5943
5944         /* Initialize T2 */
5945         for (i = 0; i < 16*1024; i += 64)
5946                 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5947
5948         /* now fixup the last line in the block to point to the next block */
5949         *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5950
5951         /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5952         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5953
5954         /* QM queues (128*MAX_CONN) */
5955         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5956 #endif
5957
5958         /* Slow path ring */
5959         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5960
5961         return 0;
5962
5963 alloc_mem_err:
5964         bnx2x_free_mem(bp);
5965         return -ENOMEM;
5966
5967 #undef BNX2X_PCI_ALLOC
5968 #undef BNX2X_ALLOC
5969 }
5970
5971 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5972 {
5973         int i;
5974
5975         for_each_queue(bp, i) {
5976                 struct bnx2x_fastpath *fp = &bp->fp[i];
5977
5978                 u16 bd_cons = fp->tx_bd_cons;
5979                 u16 sw_prod = fp->tx_pkt_prod;
5980                 u16 sw_cons = fp->tx_pkt_cons;
5981
5982                 while (sw_cons != sw_prod) {
5983                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5984                         sw_cons++;
5985                 }
5986         }
5987 }
5988
5989 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5990 {
5991         int i, j;
5992
5993         for_each_queue(bp, j) {
5994                 struct bnx2x_fastpath *fp = &bp->fp[j];
5995
5996                 for (i = 0; i < NUM_RX_BD; i++) {
5997                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5998                         struct sk_buff *skb = rx_buf->skb;
5999
6000                         if (skb == NULL)
6001                                 continue;
6002
6003                         pci_unmap_single(bp->pdev,
6004                                          pci_unmap_addr(rx_buf, mapping),
6005                                          bp->rx_buf_size,
6006                                          PCI_DMA_FROMDEVICE);
6007
6008                         rx_buf->skb = NULL;
6009                         dev_kfree_skb(skb);
6010                 }
6011                 if (!fp->disable_tpa)
6012                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6013                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
6014                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
6015         }
6016 }
6017
6018 static void bnx2x_free_skbs(struct bnx2x *bp)
6019 {
6020         bnx2x_free_tx_skbs(bp);
6021         bnx2x_free_rx_skbs(bp);
6022 }
6023
6024 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6025 {
6026         int i, offset = 1;
6027
6028         free_irq(bp->msix_table[0].vector, bp->dev);
6029         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6030            bp->msix_table[0].vector);
6031
6032         for_each_queue(bp, i) {
6033                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
6034                    "state %x\n", i, bp->msix_table[i + offset].vector,
6035                    bnx2x_fp(bp, i, state));
6036
6037                 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
6038                         BNX2X_ERR("IRQ of fp #%d being freed while "
6039                                   "state != closed\n", i);
6040
6041                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6042         }
6043 }
6044
6045 static void bnx2x_free_irq(struct bnx2x *bp)
6046 {
6047         if (bp->flags & USING_MSIX_FLAG) {
6048                 bnx2x_free_msix_irqs(bp);
6049                 pci_disable_msix(bp->pdev);
6050                 bp->flags &= ~USING_MSIX_FLAG;
6051
6052         } else
6053                 free_irq(bp->pdev->irq, bp->dev);
6054 }
6055
6056 static int bnx2x_enable_msix(struct bnx2x *bp)
6057 {
6058         int i, rc, offset;
6059
6060         bp->msix_table[0].entry = 0;
6061         offset = 1;
6062         DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
6063
6064         for_each_queue(bp, i) {
6065                 int igu_vec = offset + i + BP_L_ID(bp);
6066
6067                 bp->msix_table[i + offset].entry = igu_vec;
6068                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6069                    "(fastpath #%u)\n", i + offset, igu_vec, i);
6070         }
6071
6072         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6073                              bp->num_queues + offset);
6074         if (rc) {
6075                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6076                 return -1;
6077         }
6078         bp->flags |= USING_MSIX_FLAG;
6079
6080         return 0;
6081 }
6082
6083 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6084 {
6085         int i, rc, offset = 1;
6086
6087         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6088                          bp->dev->name, bp->dev);
6089         if (rc) {
6090                 BNX2X_ERR("request sp irq failed\n");
6091                 return -EBUSY;
6092         }
6093
6094         for_each_queue(bp, i) {
6095                 rc = request_irq(bp->msix_table[i + offset].vector,
6096                                  bnx2x_msix_fp_int, 0,
6097                                  bp->dev->name, &bp->fp[i]);
6098                 if (rc) {
6099                         BNX2X_ERR("request fp #%d irq failed  rc -%d\n",
6100                                   i + offset, -rc);
6101                         bnx2x_free_msix_irqs(bp);
6102                         return -EBUSY;
6103                 }
6104
6105                 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6106         }
6107
6108         return 0;
6109 }
6110
6111 static int bnx2x_req_irq(struct bnx2x *bp)
6112 {
6113         int rc;
6114
6115         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6116                          bp->dev->name, bp->dev);
6117         if (!rc)
6118                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6119
6120         return rc;
6121 }
6122
6123 static void bnx2x_napi_enable(struct bnx2x *bp)
6124 {
6125         int i;
6126
6127         for_each_queue(bp, i)
6128                 napi_enable(&bnx2x_fp(bp, i, napi));
6129 }
6130
6131 static void bnx2x_napi_disable(struct bnx2x *bp)
6132 {
6133         int i;
6134
6135         for_each_queue(bp, i)
6136                 napi_disable(&bnx2x_fp(bp, i, napi));
6137 }
6138
6139 static void bnx2x_netif_start(struct bnx2x *bp)
6140 {
6141         if (atomic_dec_and_test(&bp->intr_sem)) {
6142                 if (netif_running(bp->dev)) {
6143                         if (bp->state == BNX2X_STATE_OPEN)
6144                                 netif_wake_queue(bp->dev);
6145                         bnx2x_napi_enable(bp);
6146                         bnx2x_int_enable(bp);
6147                 }
6148         }
6149 }
6150
6151 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6152 {
6153         bnx2x_int_disable_sync(bp, disable_hw);
6154         bnx2x_napi_disable(bp);
6155         if (netif_running(bp->dev)) {
6156                 netif_tx_disable(bp->dev);
6157                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6158         }
6159 }
6160
6161 /*
6162  * Init service functions
6163  */
6164
6165 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6166 {
6167         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6168         int port = BP_PORT(bp);
6169
6170         /* CAM allocation
6171          * unicasts 0-31:port0 32-63:port1
6172          * multicast 64-127:port0 128-191:port1
6173          */
6174         config->hdr.length_6b = 2;
6175         config->hdr.offset = port ? 32 : 0;
6176         config->hdr.client_id = BP_CL_ID(bp);
6177         config->hdr.reserved1 = 0;
6178
6179         /* primary MAC */
6180         config->config_table[0].cam_entry.msb_mac_addr =
6181                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6182         config->config_table[0].cam_entry.middle_mac_addr =
6183                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6184         config->config_table[0].cam_entry.lsb_mac_addr =
6185                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6186         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6187         if (set)
6188                 config->config_table[0].target_table_entry.flags = 0;
6189         else
6190                 CAM_INVALIDATE(config->config_table[0]);
6191         config->config_table[0].target_table_entry.client_id = 0;
6192         config->config_table[0].target_table_entry.vlan_id = 0;
6193
6194         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6195            (set ? "setting" : "clearing"),
6196            config->config_table[0].cam_entry.msb_mac_addr,
6197            config->config_table[0].cam_entry.middle_mac_addr,
6198            config->config_table[0].cam_entry.lsb_mac_addr);
6199
6200         /* broadcast */
6201         config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6202         config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6203         config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6204         config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6205         if (set)
6206                 config->config_table[1].target_table_entry.flags =
6207                                 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6208         else
6209                 CAM_INVALIDATE(config->config_table[1]);
6210         config->config_table[1].target_table_entry.client_id = 0;
6211         config->config_table[1].target_table_entry.vlan_id = 0;
6212
6213         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6214                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6215                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6216 }
6217
6218 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6219 {
6220         struct mac_configuration_cmd_e1h *config =
6221                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6222
6223         if (set && (bp->state != BNX2X_STATE_OPEN)) {
6224                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6225                 return;
6226         }
6227
6228         /* CAM allocation for E1H
6229          * unicasts: by func number
6230          * multicast: 20+FUNC*20, 20 each
6231          */
6232         config->hdr.length_6b = 1;
6233         config->hdr.offset = BP_FUNC(bp);
6234         config->hdr.client_id = BP_CL_ID(bp);
6235         config->hdr.reserved1 = 0;
6236
6237         /* primary MAC */
6238         config->config_table[0].msb_mac_addr =
6239                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6240         config->config_table[0].middle_mac_addr =
6241                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6242         config->config_table[0].lsb_mac_addr =
6243                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6244         config->config_table[0].client_id = BP_L_ID(bp);
6245         config->config_table[0].vlan_id = 0;
6246         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6247         if (set)
6248                 config->config_table[0].flags = BP_PORT(bp);
6249         else
6250                 config->config_table[0].flags =
6251                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6252
6253         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID %d\n",
6254            (set ? "setting" : "clearing"),
6255            config->config_table[0].msb_mac_addr,
6256            config->config_table[0].middle_mac_addr,
6257            config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6258
6259         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6260                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6261                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6262 }
6263
6264 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6265                              int *state_p, int poll)
6266 {
6267         /* can take a while if any port is running */
6268         int cnt = 500;
6269
6270         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6271            poll ? "polling" : "waiting", state, idx);
6272
6273         might_sleep();
6274         while (cnt--) {
6275                 if (poll) {
6276                         bnx2x_rx_int(bp->fp, 10);
6277                         /* if index is different from 0
6278                          * the reply for some commands will
6279                          * be on the non default queue
6280                          */
6281                         if (idx)
6282                                 bnx2x_rx_int(&bp->fp[idx], 10);
6283                 }
6284
6285                 mb(); /* state is changed by bnx2x_sp_event() */
6286                 if (*state_p == state)
6287                         return 0;
6288
6289                 msleep(1);
6290         }
6291
6292         /* timeout! */
6293         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6294                   poll ? "polling" : "waiting", state, idx);
6295 #ifdef BNX2X_STOP_ON_ERROR
6296         bnx2x_panic();
6297 #endif
6298
6299         return -EBUSY;
6300 }
6301
6302 static int bnx2x_setup_leading(struct bnx2x *bp)
6303 {
6304         int rc;
6305
6306         /* reset IGU state */
6307         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6308
6309         /* SETUP ramrod */
6310         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6311
6312         /* Wait for completion */
6313         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6314
6315         return rc;
6316 }
6317
6318 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6319 {
6320         /* reset IGU state */
6321         bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6322
6323         /* SETUP ramrod */
6324         bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6325         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6326
6327         /* Wait for completion */
6328         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6329                                  &(bp->fp[index].state), 0);
6330 }
6331
6332 static int bnx2x_poll(struct napi_struct *napi, int budget);
6333 static void bnx2x_set_rx_mode(struct net_device *dev);
6334
6335 /* must be called with rtnl_lock */
6336 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6337 {
6338         u32 load_code;
6339         int i, rc = 0;
6340 #ifdef BNX2X_STOP_ON_ERROR
6341         if (unlikely(bp->panic))
6342                 return -EPERM;
6343 #endif
6344
6345         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6346
6347         if (use_inta) {
6348                 bp->num_queues = 1;
6349
6350         } else {
6351                 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6352                         /* user requested number */
6353                         bp->num_queues = use_multi;
6354
6355                 else if (use_multi)
6356                         bp->num_queues = min_t(u32, num_online_cpus(),
6357                                                BP_MAX_QUEUES(bp));
6358                 else
6359                         bp->num_queues = 1;
6360
6361                 DP(NETIF_MSG_IFUP,
6362                    "set number of queues to %d\n", bp->num_queues);
6363
6364                 /* if we can't use MSI-X we only need one fp,
6365                  * so try to enable MSI-X with the requested number of fp's
6366                  * and fallback to MSI or legacy INTx with one fp
6367                  */
6368                 rc = bnx2x_enable_msix(bp);
6369                 if (rc) {
6370                         /* failed to enable MSI-X */
6371                         bp->num_queues = 1;
6372                         if (use_multi)
6373                                 BNX2X_ERR("Multi requested but failed"
6374                                           " to enable MSI-X\n");
6375                 }
6376         }
6377
6378         if (bnx2x_alloc_mem(bp))
6379                 return -ENOMEM;
6380
6381         for_each_queue(bp, i)
6382                 bnx2x_fp(bp, i, disable_tpa) =
6383                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
6384
6385         for_each_queue(bp, i)
6386                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6387                                bnx2x_poll, 128);
6388
6389 #ifdef BNX2X_STOP_ON_ERROR
6390         for_each_queue(bp, i) {
6391                 struct bnx2x_fastpath *fp = &bp->fp[i];
6392
6393                 fp->poll_no_work = 0;
6394                 fp->poll_calls = 0;
6395                 fp->poll_max_calls = 0;
6396                 fp->poll_complete = 0;
6397                 fp->poll_exit = 0;
6398         }
6399 #endif
6400         bnx2x_napi_enable(bp);
6401
6402         if (bp->flags & USING_MSIX_FLAG) {
6403                 rc = bnx2x_req_msix_irqs(bp);
6404                 if (rc) {
6405                         pci_disable_msix(bp->pdev);
6406                         goto load_error1;
6407                 }
6408                 printk(KERN_INFO PFX "%s: using MSI-X\n", bp->dev->name);
6409         } else {
6410                 bnx2x_ack_int(bp);
6411                 rc = bnx2x_req_irq(bp);
6412                 if (rc) {
6413                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
6414                         goto load_error1;
6415                 }
6416         }
6417
6418         /* Send LOAD_REQUEST command to MCP
6419            Returns the type of LOAD command:
6420            if it is the first port to be initialized
6421            common blocks should be initialized, otherwise - not
6422         */
6423         if (!BP_NOMCP(bp)) {
6424                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6425                 if (!load_code) {
6426                         BNX2X_ERR("MCP response failure, aborting\n");
6427                         rc = -EBUSY;
6428                         goto load_error2;
6429                 }
6430                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6431                         rc = -EBUSY; /* other port in diagnostic mode */
6432                         goto load_error2;
6433                 }
6434
6435         } else {
6436                 int port = BP_PORT(bp);
6437
6438                 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6439                    load_count[0], load_count[1], load_count[2]);
6440                 load_count[0]++;
6441                 load_count[1 + port]++;
6442                 DP(NETIF_MSG_IFUP, "NO MCP new load counts       %d, %d, %d\n",
6443                    load_count[0], load_count[1], load_count[2]);
6444                 if (load_count[0] == 1)
6445                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6446                 else if (load_count[1 + port] == 1)
6447                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6448                 else
6449                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6450         }
6451
6452         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6453             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6454                 bp->port.pmf = 1;
6455         else
6456                 bp->port.pmf = 0;
6457         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6458
6459         /* Initialize HW */
6460         rc = bnx2x_init_hw(bp, load_code);
6461         if (rc) {
6462                 BNX2X_ERR("HW init failed, aborting\n");
6463                 goto load_error2;
6464         }
6465
6466         /* Setup NIC internals and enable interrupts */
6467         bnx2x_nic_init(bp, load_code);
6468
6469         /* Send LOAD_DONE command to MCP */
6470         if (!BP_NOMCP(bp)) {
6471                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6472                 if (!load_code) {
6473                         BNX2X_ERR("MCP response failure, aborting\n");
6474                         rc = -EBUSY;
6475                         goto load_error3;
6476                 }
6477         }
6478
6479         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6480
6481         rc = bnx2x_setup_leading(bp);
6482         if (rc) {
6483                 BNX2X_ERR("Setup leading failed!\n");
6484                 goto load_error3;
6485         }
6486
6487         if (CHIP_IS_E1H(bp))
6488                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6489                         BNX2X_ERR("!!!  mf_cfg function disabled\n");
6490                         bp->state = BNX2X_STATE_DISABLED;
6491                 }
6492
6493         if (bp->state == BNX2X_STATE_OPEN)
6494                 for_each_nondefault_queue(bp, i) {
6495                         rc = bnx2x_setup_multi(bp, i);
6496                         if (rc)
6497                                 goto load_error3;
6498                 }
6499
6500         if (CHIP_IS_E1(bp))
6501                 bnx2x_set_mac_addr_e1(bp, 1);
6502         else
6503                 bnx2x_set_mac_addr_e1h(bp, 1);
6504
6505         if (bp->port.pmf)
6506                 bnx2x_initial_phy_init(bp);
6507
6508         /* Start fast path */
6509         switch (load_mode) {
6510         case LOAD_NORMAL:
6511                 /* Tx queue should be only reenabled */
6512                 netif_wake_queue(bp->dev);
6513                 /* Initialize the receive filter. */
6514                 bnx2x_set_rx_mode(bp->dev);
6515                 break;
6516
6517         case LOAD_OPEN:
6518                 netif_start_queue(bp->dev);
6519                 /* Initialize the receive filter. */
6520                 bnx2x_set_rx_mode(bp->dev);
6521                 break;
6522
6523         case LOAD_DIAG:
6524                 /* Initialize the receive filter. */
6525                 bnx2x_set_rx_mode(bp->dev);
6526                 bp->state = BNX2X_STATE_DIAG;
6527                 break;
6528
6529         default:
6530                 break;
6531         }
6532
6533         if (!bp->port.pmf)
6534                 bnx2x__link_status_update(bp);
6535
6536         /* start the timer */
6537         mod_timer(&bp->timer, jiffies + bp->current_interval);
6538
6539
6540         return 0;
6541
6542 load_error3:
6543         bnx2x_int_disable_sync(bp, 1);
6544         if (!BP_NOMCP(bp)) {
6545                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
6546                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6547         }
6548         bp->port.pmf = 0;
6549         /* Free SKBs, SGEs, TPA pool and driver internals */
6550         bnx2x_free_skbs(bp);
6551         for_each_queue(bp, i)
6552                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6553 load_error2:
6554         /* Release IRQs */
6555         bnx2x_free_irq(bp);
6556 load_error1:
6557         bnx2x_napi_disable(bp);
6558         for_each_queue(bp, i)
6559                 netif_napi_del(&bnx2x_fp(bp, i, napi));
6560         bnx2x_free_mem(bp);
6561
6562         /* TBD we really need to reset the chip
6563            if we want to recover from this */
6564         return rc;
6565 }
6566
6567 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6568 {
6569         int rc;
6570
6571         /* halt the connection */
6572         bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6573         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
6574
6575         /* Wait for completion */
6576         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6577                                &(bp->fp[index].state), 1);
6578         if (rc) /* timeout */
6579                 return rc;
6580
6581         /* delete cfc entry */
6582         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6583
6584         /* Wait for completion */
6585         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6586                                &(bp->fp[index].state), 1);
6587         return rc;
6588 }
6589
6590 static int bnx2x_stop_leading(struct bnx2x *bp)
6591 {
6592         u16 dsb_sp_prod_idx;
6593         /* if the other port is handling traffic,
6594            this can take a lot of time */
6595         int cnt = 500;
6596         int rc;
6597
6598         might_sleep();
6599
6600         /* Send HALT ramrod */
6601         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6602         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6603
6604         /* Wait for completion */
6605         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6606                                &(bp->fp[0].state), 1);
6607         if (rc) /* timeout */
6608                 return rc;
6609
6610         dsb_sp_prod_idx = *bp->dsb_sp_prod;
6611
6612         /* Send PORT_DELETE ramrod */
6613         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6614
6615         /* Wait for completion to arrive on default status block
6616            we are going to reset the chip anyway
6617            so there is not much to do if this times out
6618          */
6619         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6620                 if (!cnt) {
6621                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6622                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6623                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
6624 #ifdef BNX2X_STOP_ON_ERROR
6625                         bnx2x_panic();
6626 #else
6627                         rc = -EBUSY;
6628 #endif
6629                         break;
6630                 }
6631                 cnt--;
6632                 msleep(1);
6633                 rmb(); /* Refresh the dsb_sp_prod */
6634         }
6635         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6636         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6637
6638         return rc;
6639 }
6640
6641 static void bnx2x_reset_func(struct bnx2x *bp)
6642 {
6643         int port = BP_PORT(bp);
6644         int func = BP_FUNC(bp);
6645         int base, i;
6646
6647         /* Configure IGU */
6648         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6649         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6650
6651         REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6652
6653         /* Clear ILT */
6654         base = FUNC_ILT_BASE(func);
6655         for (i = base; i < base + ILT_PER_FUNC; i++)
6656                 bnx2x_ilt_wr(bp, i, 0);
6657 }
6658
6659 static void bnx2x_reset_port(struct bnx2x *bp)
6660 {
6661         int port = BP_PORT(bp);
6662         u32 val;
6663
6664         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6665
6666         /* Do not rcv packets to BRB */
6667         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6668         /* Do not direct rcv packets that are not for MCP to the BRB */
6669         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6670                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6671
6672         /* Configure AEU */
6673         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6674
6675         msleep(100);
6676         /* Check for BRB port occupancy */
6677         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6678         if (val)
6679                 DP(NETIF_MSG_IFDOWN,
6680                    "BRB1 is not empty  %d blocks are occupied\n", val);
6681
6682         /* TODO: Close Doorbell port? */
6683 }
6684
6685 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6686 {
6687         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
6688            BP_FUNC(bp), reset_code);
6689
6690         switch (reset_code) {
6691         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6692                 bnx2x_reset_port(bp);
6693                 bnx2x_reset_func(bp);
6694                 bnx2x_reset_common(bp);
6695                 break;
6696
6697         case FW_MSG_CODE_DRV_UNLOAD_PORT:
6698                 bnx2x_reset_port(bp);
6699                 bnx2x_reset_func(bp);
6700                 break;
6701
6702         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6703                 bnx2x_reset_func(bp);
6704                 break;
6705
6706         default:
6707                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6708                 break;
6709         }
6710 }
6711
6712 /* must be called with rtnl_lock */
6713 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6714 {
6715         int port = BP_PORT(bp);
6716         u32 reset_code = 0;
6717         int i, cnt, rc;
6718
6719         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6720
6721         bp->rx_mode = BNX2X_RX_MODE_NONE;
6722         bnx2x_set_storm_rx_mode(bp);
6723
6724         bnx2x_netif_stop(bp, 1);
6725
6726         del_timer_sync(&bp->timer);
6727         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6728                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6729         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6730
6731         /* Release IRQs */
6732         bnx2x_free_irq(bp);
6733
6734         /* Wait until tx fast path tasks complete */
6735         for_each_queue(bp, i) {
6736                 struct bnx2x_fastpath *fp = &bp->fp[i];
6737
6738                 cnt = 1000;
6739                 smp_rmb();
6740                 while (bnx2x_has_tx_work_unload(fp)) {
6741
6742                         bnx2x_tx_int(fp, 1000);
6743                         if (!cnt) {
6744                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
6745                                           i);
6746 #ifdef BNX2X_STOP_ON_ERROR
6747                                 bnx2x_panic();
6748                                 return -EBUSY;
6749 #else
6750                                 break;
6751 #endif
6752                         }
6753                         cnt--;
6754                         msleep(1);
6755                         smp_rmb();
6756                 }
6757         }
6758         /* Give HW time to discard old tx messages */
6759         msleep(1);
6760
6761         if (CHIP_IS_E1(bp)) {
6762                 struct mac_configuration_cmd *config =
6763                                                 bnx2x_sp(bp, mcast_config);
6764
6765                 bnx2x_set_mac_addr_e1(bp, 0);
6766
6767                 for (i = 0; i < config->hdr.length_6b; i++)
6768                         CAM_INVALIDATE(config->config_table[i]);
6769
6770                 config->hdr.length_6b = i;
6771                 if (CHIP_REV_IS_SLOW(bp))
6772                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6773                 else
6774                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6775                 config->hdr.client_id = BP_CL_ID(bp);
6776                 config->hdr.reserved1 = 0;
6777
6778                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6779                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6780                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6781
6782         } else { /* E1H */
6783                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6784
6785                 bnx2x_set_mac_addr_e1h(bp, 0);
6786
6787                 for (i = 0; i < MC_HASH_SIZE; i++)
6788                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6789         }
6790
6791         if (unload_mode == UNLOAD_NORMAL)
6792                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6793
6794         else if (bp->flags & NO_WOL_FLAG) {
6795                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6796                 if (CHIP_IS_E1H(bp))
6797                         REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6798
6799         } else if (bp->wol) {
6800                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6801                 u8 *mac_addr = bp->dev->dev_addr;
6802                 u32 val;
6803                 /* The mac address is written to entries 1-4 to
6804                    preserve entry 0 which is used by the PMF */
6805                 u8 entry = (BP_E1HVN(bp) + 1)*8;
6806
6807                 val = (mac_addr[0] << 8) | mac_addr[1];
6808                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6809
6810                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6811                       (mac_addr[4] << 8) | mac_addr[5];
6812                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6813
6814                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6815
6816         } else
6817                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6818
6819         /* Close multi and leading connections
6820            Completions for ramrods are collected in a synchronous way */
6821         for_each_nondefault_queue(bp, i)
6822                 if (bnx2x_stop_multi(bp, i))
6823                         goto unload_error;
6824
6825         rc = bnx2x_stop_leading(bp);
6826         if (rc) {
6827                 BNX2X_ERR("Stop leading failed!\n");
6828 #ifdef BNX2X_STOP_ON_ERROR
6829                 return -EBUSY;
6830 #else
6831                 goto unload_error;
6832 #endif
6833         }
6834
6835 unload_error:
6836         if (!BP_NOMCP(bp))
6837                 reset_code = bnx2x_fw_command(bp, reset_code);
6838         else {
6839                 DP(NETIF_MSG_IFDOWN, "NO MCP load counts      %d, %d, %d\n",
6840                    load_count[0], load_count[1], load_count[2]);
6841                 load_count[0]--;
6842                 load_count[1 + port]--;
6843                 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts  %d, %d, %d\n",
6844                    load_count[0], load_count[1], load_count[2]);
6845                 if (load_count[0] == 0)
6846                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6847                 else if (load_count[1 + port] == 0)
6848                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6849                 else
6850                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6851         }
6852
6853         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6854             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6855                 bnx2x__link_reset(bp);
6856
6857         /* Reset the chip */
6858         bnx2x_reset_chip(bp, reset_code);
6859
6860         /* Report UNLOAD_DONE to MCP */
6861         if (!BP_NOMCP(bp))
6862                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6863         bp->port.pmf = 0;
6864
6865         /* Free SKBs, SGEs, TPA pool and driver internals */
6866         bnx2x_free_skbs(bp);
6867         for_each_queue(bp, i)
6868                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6869         for_each_queue(bp, i)
6870                 netif_napi_del(&bnx2x_fp(bp, i, napi));
6871         bnx2x_free_mem(bp);
6872
6873         bp->state = BNX2X_STATE_CLOSED;
6874
6875         netif_carrier_off(bp->dev);
6876
6877         return 0;
6878 }
6879
6880 static void bnx2x_reset_task(struct work_struct *work)
6881 {
6882         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6883
6884 #ifdef BNX2X_STOP_ON_ERROR
6885         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6886                   " so reset not done to allow debug dump,\n"
6887          KERN_ERR " you will need to reboot when done\n");
6888         return;
6889 #endif
6890
6891         rtnl_lock();
6892
6893         if (!netif_running(bp->dev))
6894                 goto reset_task_exit;
6895
6896         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6897         bnx2x_nic_load(bp, LOAD_NORMAL);
6898
6899 reset_task_exit:
6900         rtnl_unlock();
6901 }
6902
6903 /* end of nic load/unload */
6904
6905 /* ethtool_ops */
6906
6907 /*
6908  * Init service functions
6909  */
6910
6911 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6912 {
6913         u32 val;
6914
6915         /* Check if there is any driver already loaded */
6916         val = REG_RD(bp, MISC_REG_UNPREPARED);
6917         if (val == 0x1) {
6918                 /* Check if it is the UNDI driver
6919                  * UNDI driver initializes CID offset for normal bell to 0x7
6920                  */
6921                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6922                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6923                 if (val == 0x7) {
6924                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6925                         /* save our func */
6926                         int func = BP_FUNC(bp);
6927                         u32 swap_en;
6928                         u32 swap_val;
6929
6930                         /* clear the UNDI indication */
6931                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6932
6933                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
6934
6935                         /* try unload UNDI on port 0 */
6936                         bp->func = 0;
6937                         bp->fw_seq =
6938                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6939                                 DRV_MSG_SEQ_NUMBER_MASK);
6940                         reset_code = bnx2x_fw_command(bp, reset_code);
6941
6942                         /* if UNDI is loaded on the other port */
6943                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6944
6945                                 /* send "DONE" for previous unload */
6946                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6947
6948                                 /* unload UNDI on port 1 */
6949                                 bp->func = 1;
6950                                 bp->fw_seq =
6951                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6952                                         DRV_MSG_SEQ_NUMBER_MASK);
6953                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6954
6955                                 bnx2x_fw_command(bp, reset_code);
6956                         }
6957
6958                         /* now it's safe to release the lock */
6959                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6960
6961                         REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6962                                     HC_REG_CONFIG_0), 0x1000);
6963
6964                         /* close input traffic and wait for it */
6965                         /* Do not rcv packets to BRB */
6966                         REG_WR(bp,
6967                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6968                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6969                         /* Do not direct rcv packets that are not for MCP to
6970                          * the BRB */
6971                         REG_WR(bp,
6972                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6973                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6974                         /* clear AEU */
6975                         REG_WR(bp,
6976                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6977                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6978                         msleep(10);
6979
6980                         /* save NIG port swap info */
6981                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6982                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6983                         /* reset device */
6984                         REG_WR(bp,
6985                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6986                                0xd3ffffff);
6987                         REG_WR(bp,
6988                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6989                                0x1403);
6990                         /* take the NIG out of reset and restore swap values */
6991                         REG_WR(bp,
6992                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6993                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
6994                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6995                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6996
6997                         /* send unload done to the MCP */
6998                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6999
7000                         /* restore our func and fw_seq */
7001                         bp->func = func;
7002                         bp->fw_seq =
7003                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7004                                 DRV_MSG_SEQ_NUMBER_MASK);
7005
7006                 } else
7007                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7008         }
7009 }
7010
7011 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7012 {
7013         u32 val, val2, val3, val4, id;
7014         u16 pmc;
7015
7016         /* Get the chip revision id and number. */
7017         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7018         val = REG_RD(bp, MISC_REG_CHIP_NUM);
7019         id = ((val & 0xffff) << 16);
7020         val = REG_RD(bp, MISC_REG_CHIP_REV);
7021         id |= ((val & 0xf) << 12);
7022         val = REG_RD(bp, MISC_REG_CHIP_METAL);
7023         id |= ((val & 0xff) << 4);
7024         val = REG_RD(bp, MISC_REG_BOND_ID);
7025         id |= (val & 0xf);
7026         bp->common.chip_id = id;
7027         bp->link_params.chip_id = bp->common.chip_id;
7028         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7029
7030         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7031         bp->common.flash_size = (NVRAM_1MB_SIZE <<
7032                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
7033         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7034                        bp->common.flash_size, bp->common.flash_size);
7035
7036         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7037         bp->link_params.shmem_base = bp->common.shmem_base;
7038         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7039
7040         if (!bp->common.shmem_base ||
7041             (bp->common.shmem_base < 0xA0000) ||
7042             (bp->common.shmem_base >= 0xC0000)) {
7043                 BNX2X_DEV_INFO("MCP not active\n");
7044                 bp->flags |= NO_MCP_FLAG;
7045                 return;
7046         }
7047
7048         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7049         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7050                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7051                 BNX2X_ERR("BAD MCP validity signature\n");
7052
7053         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7054         bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
7055
7056         BNX2X_DEV_INFO("hw_config 0x%08x  board 0x%08x\n",
7057                        bp->common.hw_config, bp->common.board);
7058
7059         bp->link_params.hw_led_mode = ((bp->common.hw_config &
7060                                         SHARED_HW_CFG_LED_MODE_MASK) >>
7061                                        SHARED_HW_CFG_LED_MODE_SHIFT);
7062
7063         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7064         bp->common.bc_ver = val;
7065         BNX2X_DEV_INFO("bc_ver %X\n", val);
7066         if (val < BNX2X_BC_VER) {
7067                 /* for now only warn
7068                  * later we might need to enforce this */
7069                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7070                           " please upgrade BC\n", BNX2X_BC_VER, val);
7071         }
7072
7073         if (BP_E1HVN(bp) == 0) {
7074                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7075                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7076         } else {
7077                 /* no WOL capability for E1HVN != 0 */
7078                 bp->flags |= NO_WOL_FLAG;
7079         }
7080         BNX2X_DEV_INFO("%sWoL capable\n",
7081                        (bp->flags & NO_WOL_FLAG) ? "Not " : "");
7082
7083         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7084         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7085         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7086         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7087
7088         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7089                val, val2, val3, val4);
7090 }
7091
7092 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7093                                                     u32 switch_cfg)
7094 {
7095         int port = BP_PORT(bp);
7096         u32 ext_phy_type;
7097
7098         switch (switch_cfg) {
7099         case SWITCH_CFG_1G:
7100                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7101
7102                 ext_phy_type =
7103                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7104                 switch (ext_phy_type) {
7105                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7106                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7107                                        ext_phy_type);
7108
7109                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7110                                                SUPPORTED_10baseT_Full |
7111                                                SUPPORTED_100baseT_Half |
7112                                                SUPPORTED_100baseT_Full |
7113                                                SUPPORTED_1000baseT_Full |
7114                                                SUPPORTED_2500baseX_Full |
7115                                                SUPPORTED_TP |
7116                                                SUPPORTED_FIBRE |
7117                                                SUPPORTED_Autoneg |
7118                                                SUPPORTED_Pause |
7119                                                SUPPORTED_Asym_Pause);
7120                         break;
7121
7122                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7123                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7124                                        ext_phy_type);
7125
7126                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7127                                                SUPPORTED_10baseT_Full |
7128                                                SUPPORTED_100baseT_Half |
7129                                                SUPPORTED_100baseT_Full |
7130                                                SUPPORTED_1000baseT_Full |
7131                                                SUPPORTED_TP |
7132                                                SUPPORTED_FIBRE |
7133                                                SUPPORTED_Autoneg |
7134                                                SUPPORTED_Pause |
7135                                                SUPPORTED_Asym_Pause);
7136                         break;
7137
7138                 default:
7139                         BNX2X_ERR("NVRAM config error. "
7140                                   "BAD SerDes ext_phy_config 0x%x\n",
7141                                   bp->link_params.ext_phy_config);
7142                         return;
7143                 }
7144
7145                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7146                                            port*0x10);
7147                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7148                 break;
7149
7150         case SWITCH_CFG_10G:
7151                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7152
7153                 ext_phy_type =
7154                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7155                 switch (ext_phy_type) {
7156                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7157                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7158                                        ext_phy_type);
7159
7160                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7161                                                SUPPORTED_10baseT_Full |
7162                                                SUPPORTED_100baseT_Half |
7163                                                SUPPORTED_100baseT_Full |
7164                                                SUPPORTED_1000baseT_Full |
7165                                                SUPPORTED_2500baseX_Full |
7166                                                SUPPORTED_10000baseT_Full |
7167                                                SUPPORTED_TP |
7168                                                SUPPORTED_FIBRE |
7169                                                SUPPORTED_Autoneg |
7170                                                SUPPORTED_Pause |
7171                                                SUPPORTED_Asym_Pause);
7172                         break;
7173
7174                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7175                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7176                                        ext_phy_type);
7177
7178                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7179                                                SUPPORTED_FIBRE |
7180                                                SUPPORTED_Pause |
7181                                                SUPPORTED_Asym_Pause);
7182                         break;
7183
7184                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7185                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7186                                        ext_phy_type);
7187
7188                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7189                                                SUPPORTED_1000baseT_Full |
7190                                                SUPPORTED_FIBRE |
7191                                                SUPPORTED_Pause |
7192                                                SUPPORTED_Asym_Pause);
7193                         break;
7194
7195                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7196                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7197                                        ext_phy_type);
7198
7199                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7200                                                SUPPORTED_1000baseT_Full |
7201                                                SUPPORTED_FIBRE |
7202                                                SUPPORTED_Autoneg |
7203                                                SUPPORTED_Pause |
7204                                                SUPPORTED_Asym_Pause);
7205                         break;
7206
7207                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7208                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7209                                        ext_phy_type);
7210
7211                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7212                                                SUPPORTED_2500baseX_Full |
7213                                                SUPPORTED_1000baseT_Full |
7214                                                SUPPORTED_FIBRE |
7215                                                SUPPORTED_Autoneg |
7216                                                SUPPORTED_Pause |
7217                                                SUPPORTED_Asym_Pause);
7218                         break;
7219
7220                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7221                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7222                                        ext_phy_type);
7223
7224                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7225                                                SUPPORTED_TP |
7226                                                SUPPORTED_Autoneg |
7227                                                SUPPORTED_Pause |
7228                                                SUPPORTED_Asym_Pause);
7229                         break;
7230
7231                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7232                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7233                                   bp->link_params.ext_phy_config);
7234                         break;
7235
7236                 default:
7237                         BNX2X_ERR("NVRAM config error. "
7238                                   "BAD XGXS ext_phy_config 0x%x\n",
7239                                   bp->link_params.ext_phy_config);
7240                         return;
7241                 }
7242
7243                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7244                                            port*0x18);
7245                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7246
7247                 break;
7248
7249         default:
7250                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7251                           bp->port.link_config);
7252                 return;
7253         }
7254         bp->link_params.phy_addr = bp->port.phy_addr;
7255
7256         /* mask what we support according to speed_cap_mask */
7257         if (!(bp->link_params.speed_cap_mask &
7258                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7259                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7260
7261         if (!(bp->link_params.speed_cap_mask &
7262                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7263                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7264
7265         if (!(bp->link_params.speed_cap_mask &
7266                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7267                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7268
7269         if (!(bp->link_params.speed_cap_mask &
7270                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7271                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7272
7273         if (!(bp->link_params.speed_cap_mask &
7274                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7275                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7276                                         SUPPORTED_1000baseT_Full);
7277
7278         if (!(bp->link_params.speed_cap_mask &
7279                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7280                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7281
7282         if (!(bp->link_params.speed_cap_mask &
7283                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7284                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7285
7286         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7287 }
7288
7289 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7290 {
7291         bp->link_params.req_duplex = DUPLEX_FULL;
7292
7293         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7294         case PORT_FEATURE_LINK_SPEED_AUTO:
7295                 if (bp->port.supported & SUPPORTED_Autoneg) {
7296                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7297                         bp->port.advertising = bp->port.supported;
7298                 } else {
7299                         u32 ext_phy_type =
7300                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7301
7302                         if ((ext_phy_type ==
7303                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7304                             (ext_phy_type ==
7305                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7306                                 /* force 10G, no AN */
7307                                 bp->link_params.req_line_speed = SPEED_10000;
7308                                 bp->port.advertising =
7309                                                 (ADVERTISED_10000baseT_Full |
7310                                                  ADVERTISED_FIBRE);
7311                                 break;
7312                         }
7313                         BNX2X_ERR("NVRAM config error. "
7314                                   "Invalid link_config 0x%x"
7315                                   "  Autoneg not supported\n",
7316                                   bp->port.link_config);
7317                         return;
7318                 }
7319                 break;
7320
7321         case PORT_FEATURE_LINK_SPEED_10M_FULL:
7322                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7323                         bp->link_params.req_line_speed = SPEED_10;
7324                         bp->port.advertising = (ADVERTISED_10baseT_Full |
7325                                                 ADVERTISED_TP);
7326                 } else {
7327                         BNX2X_ERR("NVRAM config error. "
7328                                   "Invalid link_config 0x%x"
7329                                   "  speed_cap_mask 0x%x\n",
7330                                   bp->port.link_config,
7331                                   bp->link_params.speed_cap_mask);
7332                         return;
7333                 }
7334                 break;
7335
7336         case PORT_FEATURE_LINK_SPEED_10M_HALF:
7337                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7338                         bp->link_params.req_line_speed = SPEED_10;
7339                         bp->link_params.req_duplex = DUPLEX_HALF;
7340                         bp->port.advertising = (ADVERTISED_10baseT_Half |
7341                                                 ADVERTISED_TP);
7342                 } else {
7343                         BNX2X_ERR("NVRAM config error. "
7344                                   "Invalid link_config 0x%x"
7345                                   "  speed_cap_mask 0x%x\n",
7346                                   bp->port.link_config,
7347                                   bp->link_params.speed_cap_mask);
7348                         return;
7349                 }
7350                 break;
7351
7352         case PORT_FEATURE_LINK_SPEED_100M_FULL:
7353                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7354                         bp->link_params.req_line_speed = SPEED_100;
7355                         bp->port.advertising = (ADVERTISED_100baseT_Full |
7356                                                 ADVERTISED_TP);
7357                 } else {
7358                         BNX2X_ERR("NVRAM config error. "
7359                                   "Invalid link_config 0x%x"
7360                                   "  speed_cap_mask 0x%x\n",
7361                                   bp->port.link_config,
7362                                   bp->link_params.speed_cap_mask);
7363                         return;
7364                 }
7365                 break;
7366
7367         case PORT_FEATURE_LINK_SPEED_100M_HALF:
7368                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7369                         bp->link_params.req_line_speed = SPEED_100;
7370                         bp->link_params.req_duplex = DUPLEX_HALF;
7371                         bp->port.advertising = (ADVERTISED_100baseT_Half |
7372                                                 ADVERTISED_TP);
7373                 } else {
7374                         BNX2X_ERR("NVRAM config error. "
7375                                   "Invalid link_config 0x%x"
7376                                   "  speed_cap_mask 0x%x\n",
7377                                   bp->port.link_config,
7378                                   bp->link_params.speed_cap_mask);
7379                         return;
7380                 }
7381                 break;
7382
7383         case PORT_FEATURE_LINK_SPEED_1G:
7384                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7385                         bp->link_params.req_line_speed = SPEED_1000;
7386                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
7387                                                 ADVERTISED_TP);
7388                 } else {
7389                         BNX2X_ERR("NVRAM config error. "
7390                                   "Invalid link_config 0x%x"
7391                                   "  speed_cap_mask 0x%x\n",
7392                                   bp->port.link_config,
7393                                   bp->link_params.speed_cap_mask);
7394                         return;
7395                 }
7396                 break;
7397
7398         case PORT_FEATURE_LINK_SPEED_2_5G:
7399                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7400                         bp->link_params.req_line_speed = SPEED_2500;
7401                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
7402                                                 ADVERTISED_TP);
7403                 } else {
7404                         BNX2X_ERR("NVRAM config error. "
7405                                   "Invalid link_config 0x%x"
7406                                   "  speed_cap_mask 0x%x\n",
7407                                   bp->port.link_config,
7408                                   bp->link_params.speed_cap_mask);
7409                         return;
7410                 }
7411                 break;
7412
7413         case PORT_FEATURE_LINK_SPEED_10G_CX4:
7414         case PORT_FEATURE_LINK_SPEED_10G_KX4:
7415         case PORT_FEATURE_LINK_SPEED_10G_KR:
7416                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7417                         bp->link_params.req_line_speed = SPEED_10000;
7418                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
7419                                                 ADVERTISED_FIBRE);
7420                 } else {
7421                         BNX2X_ERR("NVRAM config error. "
7422                                   "Invalid link_config 0x%x"
7423                                   "  speed_cap_mask 0x%x\n",
7424                                   bp->port.link_config,
7425                                   bp->link_params.speed_cap_mask);
7426                         return;
7427                 }
7428                 break;
7429
7430         default:
7431                 BNX2X_ERR("NVRAM config error. "
7432                           "BAD link speed link_config 0x%x\n",
7433                           bp->port.link_config);
7434                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7435                 bp->port.advertising = bp->port.supported;
7436                 break;
7437         }
7438
7439         bp->link_params.req_flow_ctrl = (bp->port.link_config &
7440                                          PORT_FEATURE_FLOW_CONTROL_MASK);
7441         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
7442             !(bp->port.supported & SUPPORTED_Autoneg))
7443                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7444
7445         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
7446                        "  advertising 0x%x\n",
7447                        bp->link_params.req_line_speed,
7448                        bp->link_params.req_duplex,
7449                        bp->link_params.req_flow_ctrl, bp->port.advertising);
7450 }
7451
7452 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7453 {
7454         int port = BP_PORT(bp);
7455         u32 val, val2;
7456
7457         bp->link_params.bp = bp;
7458         bp->link_params.port = port;
7459
7460         bp->link_params.serdes_config =
7461                 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7462         bp->link_params.lane_config =
7463                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7464         bp->link_params.ext_phy_config =
7465                 SHMEM_RD(bp,
7466                          dev_info.port_hw_config[port].external_phy_config);
7467         bp->link_params.speed_cap_mask =
7468                 SHMEM_RD(bp,
7469                          dev_info.port_hw_config[port].speed_capability_mask);
7470
7471         bp->port.link_config =
7472                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7473
7474         BNX2X_DEV_INFO("serdes_config 0x%08x  lane_config 0x%08x\n"
7475              KERN_INFO "  ext_phy_config 0x%08x  speed_cap_mask 0x%08x"
7476                        "  link_config 0x%08x\n",
7477                        bp->link_params.serdes_config,
7478                        bp->link_params.lane_config,
7479                        bp->link_params.ext_phy_config,
7480                        bp->link_params.speed_cap_mask, bp->port.link_config);
7481
7482         bp->link_params.switch_cfg = (bp->port.link_config &
7483                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
7484         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7485
7486         bnx2x_link_settings_requested(bp);
7487
7488         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7489         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7490         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7491         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7492         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7493         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7494         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7495         bp->dev->dev_addr[5] = (u8)(val & 0xff);
7496         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7497         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7498 }
7499
7500 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7501 {
7502         int func = BP_FUNC(bp);
7503         u32 val, val2;
7504         int rc = 0;
7505
7506         bnx2x_get_common_hwinfo(bp);
7507
7508         bp->e1hov = 0;
7509         bp->e1hmf = 0;
7510         if (CHIP_IS_E1H(bp)) {
7511                 bp->mf_config =
7512                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7513
7514                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7515                        FUNC_MF_CFG_E1HOV_TAG_MASK);
7516                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7517
7518                         bp->e1hov = val;
7519                         bp->e1hmf = 1;
7520                         BNX2X_DEV_INFO("MF mode  E1HOV for func %d is %d "
7521                                        "(0x%04x)\n",
7522                                        func, bp->e1hov, bp->e1hov);
7523                 } else {
7524                         BNX2X_DEV_INFO("Single function mode\n");
7525                         if (BP_E1HVN(bp)) {
7526                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
7527                                           "  aborting\n", func);
7528                                 rc = -EPERM;
7529                         }
7530                 }
7531         }
7532
7533         if (!BP_NOMCP(bp)) {
7534                 bnx2x_get_port_hwinfo(bp);
7535
7536                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7537                               DRV_MSG_SEQ_NUMBER_MASK);
7538                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7539         }
7540
7541         if (IS_E1HMF(bp)) {
7542                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7543                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
7544                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7545                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7546                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7547                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7548                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7549                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7550                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7551                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
7552                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7553                                ETH_ALEN);
7554                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7555                                ETH_ALEN);
7556                 }
7557
7558                 return rc;
7559         }
7560
7561         if (BP_NOMCP(bp)) {
7562                 /* only supposed to happen on emulation/FPGA */
7563                 BNX2X_ERR("warning random MAC workaround active\n");
7564                 random_ether_addr(bp->dev->dev_addr);
7565                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7566         }
7567
7568         return rc;
7569 }
7570
7571 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7572 {
7573         int func = BP_FUNC(bp);
7574         int rc;
7575
7576         /* Disable interrupt handling until HW is initialized */
7577         atomic_set(&bp->intr_sem, 1);
7578
7579         mutex_init(&bp->port.phy_mutex);
7580
7581         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
7582         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7583
7584         rc = bnx2x_get_hwinfo(bp);
7585
7586         /* need to reset chip if undi was active */
7587         if (!BP_NOMCP(bp))
7588                 bnx2x_undi_unload(bp);
7589
7590         if (CHIP_REV_IS_FPGA(bp))
7591                 printk(KERN_ERR PFX "FPGA detected\n");
7592
7593         if (BP_NOMCP(bp) && (func == 0))
7594                 printk(KERN_ERR PFX
7595                        "MCP disabled, must load devices in order!\n");
7596
7597         /* Set TPA flags */
7598         if (disable_tpa) {
7599                 bp->flags &= ~TPA_ENABLE_FLAG;
7600                 bp->dev->features &= ~NETIF_F_LRO;
7601         } else {
7602                 bp->flags |= TPA_ENABLE_FLAG;
7603                 bp->dev->features |= NETIF_F_LRO;
7604         }
7605
7606
7607         bp->tx_ring_size = MAX_TX_AVAIL;
7608         bp->rx_ring_size = MAX_RX_AVAIL;
7609
7610         bp->rx_csum = 1;
7611         bp->rx_offset = 0;
7612
7613         bp->tx_ticks = 50;
7614         bp->rx_ticks = 25;
7615
7616         bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7617         bp->current_interval = (poll ? poll : bp->timer_interval);
7618
7619         init_timer(&bp->timer);
7620         bp->timer.expires = jiffies + bp->current_interval;
7621         bp->timer.data = (unsigned long) bp;
7622         bp->timer.function = bnx2x_timer;
7623
7624         return rc;
7625 }
7626
7627 /*
7628  * ethtool service functions
7629  */
7630
7631 /* All ethtool functions called with rtnl_lock */
7632
7633 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7634 {
7635         struct bnx2x *bp = netdev_priv(dev);
7636
7637         cmd->supported = bp->port.supported;
7638         cmd->advertising = bp->port.advertising;
7639
7640         if (netif_carrier_ok(dev)) {
7641                 cmd->speed = bp->link_vars.line_speed;
7642                 cmd->duplex = bp->link_vars.duplex;
7643         } else {
7644                 cmd->speed = bp->link_params.req_line_speed;
7645                 cmd->duplex = bp->link_params.req_duplex;
7646         }
7647         if (IS_E1HMF(bp)) {
7648                 u16 vn_max_rate;
7649
7650                 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7651                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7652                 if (vn_max_rate < cmd->speed)
7653                         cmd->speed = vn_max_rate;
7654         }
7655
7656         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7657                 u32 ext_phy_type =
7658                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7659
7660                 switch (ext_phy_type) {
7661                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7662                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7663                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7664                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7665                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7666                         cmd->port = PORT_FIBRE;
7667                         break;
7668
7669                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7670                         cmd->port = PORT_TP;
7671                         break;
7672
7673                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7674                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7675                                   bp->link_params.ext_phy_config);
7676                         break;
7677
7678                 default:
7679                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7680                            bp->link_params.ext_phy_config);
7681                         break;
7682                 }
7683         } else
7684                 cmd->port = PORT_TP;
7685
7686         cmd->phy_address = bp->port.phy_addr;
7687         cmd->transceiver = XCVR_INTERNAL;
7688
7689         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7690                 cmd->autoneg = AUTONEG_ENABLE;
7691         else
7692                 cmd->autoneg = AUTONEG_DISABLE;
7693
7694         cmd->maxtxpkt = 0;
7695         cmd->maxrxpkt = 0;
7696
7697         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7698            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7699            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7700            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7701            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7702            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7703            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7704
7705         return 0;
7706 }
7707
7708 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7709 {
7710         struct bnx2x *bp = netdev_priv(dev);
7711         u32 advertising;
7712
7713         if (IS_E1HMF(bp))
7714                 return 0;
7715
7716         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7717            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7718            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7719            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7720            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7721            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7722            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7723
7724         if (cmd->autoneg == AUTONEG_ENABLE) {
7725                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7726                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7727                         return -EINVAL;
7728                 }
7729
7730                 /* advertise the requested speed and duplex if supported */
7731                 cmd->advertising &= bp->port.supported;
7732
7733                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7734                 bp->link_params.req_duplex = DUPLEX_FULL;
7735                 bp->port.advertising |= (ADVERTISED_Autoneg |
7736                                          cmd->advertising);
7737
7738         } else { /* forced speed */
7739                 /* advertise the requested speed and duplex if supported */
7740                 switch (cmd->speed) {
7741                 case SPEED_10:
7742                         if (cmd->duplex == DUPLEX_FULL) {
7743                                 if (!(bp->port.supported &
7744                                       SUPPORTED_10baseT_Full)) {
7745                                         DP(NETIF_MSG_LINK,
7746                                            "10M full not supported\n");
7747                                         return -EINVAL;
7748                                 }
7749
7750                                 advertising = (ADVERTISED_10baseT_Full |
7751                                                ADVERTISED_TP);
7752                         } else {
7753                                 if (!(bp->port.supported &
7754                                       SUPPORTED_10baseT_Half)) {
7755                                         DP(NETIF_MSG_LINK,
7756                                            "10M half not supported\n");
7757                                         return -EINVAL;
7758                                 }
7759
7760                                 advertising = (ADVERTISED_10baseT_Half |
7761                                                ADVERTISED_TP);
7762                         }
7763                         break;
7764
7765                 case SPEED_100:
7766                         if (cmd->duplex == DUPLEX_FULL) {
7767                                 if (!(bp->port.supported &
7768                                                 SUPPORTED_100baseT_Full)) {
7769                                         DP(NETIF_MSG_LINK,
7770                                            "100M full not supported\n");
7771                                         return -EINVAL;
7772                                 }
7773
7774                                 advertising = (ADVERTISED_100baseT_Full |
7775                                                ADVERTISED_TP);
7776                         } else {
7777                                 if (!(bp->port.supported &
7778                                                 SUPPORTED_100baseT_Half)) {
7779                                         DP(NETIF_MSG_LINK,
7780                                            "100M half not supported\n");
7781                                         return -EINVAL;
7782                                 }
7783
7784                                 advertising = (ADVERTISED_100baseT_Half |
7785                                                ADVERTISED_TP);
7786                         }
7787                         break;
7788
7789                 case SPEED_1000:
7790                         if (cmd->duplex != DUPLEX_FULL) {
7791                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
7792                                 return -EINVAL;
7793                         }
7794
7795                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7796                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
7797                                 return -EINVAL;
7798                         }
7799
7800                         advertising = (ADVERTISED_1000baseT_Full |
7801                                        ADVERTISED_TP);
7802                         break;
7803
7804                 case SPEED_2500:
7805                         if (cmd->duplex != DUPLEX_FULL) {
7806                                 DP(NETIF_MSG_LINK,
7807                                    "2.5G half not supported\n");
7808                                 return -EINVAL;
7809                         }
7810
7811                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7812                                 DP(NETIF_MSG_LINK,
7813                                    "2.5G full not supported\n");
7814                                 return -EINVAL;
7815                         }
7816
7817                         advertising = (ADVERTISED_2500baseX_Full |
7818                                        ADVERTISED_TP);
7819                         break;
7820
7821                 case SPEED_10000:
7822                         if (cmd->duplex != DUPLEX_FULL) {
7823                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
7824                                 return -EINVAL;
7825                         }
7826
7827                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7828                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
7829                                 return -EINVAL;
7830                         }
7831
7832                         advertising = (ADVERTISED_10000baseT_Full |
7833                                        ADVERTISED_FIBRE);
7834                         break;
7835
7836                 default:
7837                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
7838                         return -EINVAL;
7839                 }
7840
7841                 bp->link_params.req_line_speed = cmd->speed;
7842                 bp->link_params.req_duplex = cmd->duplex;
7843                 bp->port.advertising = advertising;
7844         }
7845
7846         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
7847            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
7848            bp->link_params.req_line_speed, bp->link_params.req_duplex,
7849            bp->port.advertising);
7850
7851         if (netif_running(dev)) {
7852                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7853                 bnx2x_link_set(bp);
7854         }
7855
7856         return 0;
7857 }
7858
7859 #define PHY_FW_VER_LEN                  10
7860
7861 static void bnx2x_get_drvinfo(struct net_device *dev,
7862                               struct ethtool_drvinfo *info)
7863 {
7864         struct bnx2x *bp = netdev_priv(dev);
7865         u8 phy_fw_ver[PHY_FW_VER_LEN];
7866
7867         strcpy(info->driver, DRV_MODULE_NAME);
7868         strcpy(info->version, DRV_MODULE_VERSION);
7869
7870         phy_fw_ver[0] = '\0';
7871         if (bp->port.pmf) {
7872                 bnx2x_acquire_phy_lock(bp);
7873                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7874                                              (bp->state != BNX2X_STATE_CLOSED),
7875                                              phy_fw_ver, PHY_FW_VER_LEN);
7876                 bnx2x_release_phy_lock(bp);
7877         }
7878
7879         snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7880                  (bp->common.bc_ver & 0xff0000) >> 16,
7881                  (bp->common.bc_ver & 0xff00) >> 8,
7882                  (bp->common.bc_ver & 0xff),
7883                  ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
7884         strcpy(info->bus_info, pci_name(bp->pdev));
7885         info->n_stats = BNX2X_NUM_STATS;
7886         info->testinfo_len = BNX2X_NUM_TESTS;
7887         info->eedump_len = bp->common.flash_size;
7888         info->regdump_len = 0;
7889 }
7890
7891 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7892 {
7893         struct bnx2x *bp = netdev_priv(dev);
7894
7895         if (bp->flags & NO_WOL_FLAG) {
7896                 wol->supported = 0;
7897                 wol->wolopts = 0;
7898         } else {
7899                 wol->supported = WAKE_MAGIC;
7900                 if (bp->wol)
7901                         wol->wolopts = WAKE_MAGIC;
7902                 else
7903                         wol->wolopts = 0;
7904         }
7905         memset(&wol->sopass, 0, sizeof(wol->sopass));
7906 }
7907
7908 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7909 {
7910         struct bnx2x *bp = netdev_priv(dev);
7911
7912         if (wol->wolopts & ~WAKE_MAGIC)
7913                 return -EINVAL;
7914
7915         if (wol->wolopts & WAKE_MAGIC) {
7916                 if (bp->flags & NO_WOL_FLAG)
7917                         return -EINVAL;
7918
7919                 bp->wol = 1;
7920         } else
7921                 bp->wol = 0;
7922
7923         return 0;
7924 }
7925
7926 static u32 bnx2x_get_msglevel(struct net_device *dev)
7927 {
7928         struct bnx2x *bp = netdev_priv(dev);
7929
7930         return bp->msglevel;
7931 }
7932
7933 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7934 {
7935         struct bnx2x *bp = netdev_priv(dev);
7936
7937         if (capable(CAP_NET_ADMIN))
7938                 bp->msglevel = level;
7939 }
7940
7941 static int bnx2x_nway_reset(struct net_device *dev)
7942 {
7943         struct bnx2x *bp = netdev_priv(dev);
7944
7945         if (!bp->port.pmf)
7946                 return 0;
7947
7948         if (netif_running(dev)) {
7949                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7950                 bnx2x_link_set(bp);
7951         }
7952
7953         return 0;
7954 }
7955
7956 static int bnx2x_get_eeprom_len(struct net_device *dev)
7957 {
7958         struct bnx2x *bp = netdev_priv(dev);
7959
7960         return bp->common.flash_size;
7961 }
7962
7963 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7964 {
7965         int port = BP_PORT(bp);
7966         int count, i;
7967         u32 val = 0;
7968
7969         /* adjust timeout for emulation/FPGA */
7970         count = NVRAM_TIMEOUT_COUNT;
7971         if (CHIP_REV_IS_SLOW(bp))
7972                 count *= 100;
7973
7974         /* request access to nvram interface */
7975         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7976                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7977
7978         for (i = 0; i < count*10; i++) {
7979                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7980                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7981                         break;
7982
7983                 udelay(5);
7984         }
7985
7986         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7987                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
7988                 return -EBUSY;
7989         }
7990
7991         return 0;
7992 }
7993
7994 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7995 {
7996         int port = BP_PORT(bp);
7997         int count, i;
7998         u32 val = 0;
7999
8000         /* adjust timeout for emulation/FPGA */
8001         count = NVRAM_TIMEOUT_COUNT;
8002         if (CHIP_REV_IS_SLOW(bp))
8003                 count *= 100;
8004
8005         /* relinquish nvram interface */
8006         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8007                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8008
8009         for (i = 0; i < count*10; i++) {
8010                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8011                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8012                         break;
8013
8014                 udelay(5);
8015         }
8016
8017         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8018                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
8019                 return -EBUSY;
8020         }
8021
8022         return 0;
8023 }
8024
8025 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8026 {
8027         u32 val;
8028
8029         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8030
8031         /* enable both bits, even on read */
8032         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8033                (val | MCPR_NVM_ACCESS_ENABLE_EN |
8034                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
8035 }
8036
8037 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8038 {
8039         u32 val;
8040
8041         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8042
8043         /* disable both bits, even after read */
8044         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8045                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8046                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8047 }
8048
8049 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8050                                   u32 cmd_flags)
8051 {
8052         int count, i, rc;
8053         u32 val;
8054
8055         /* build the command word */
8056         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8057
8058         /* need to clear DONE bit separately */
8059         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8060
8061         /* address of the NVRAM to read from */
8062         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8063                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8064
8065         /* issue a read command */
8066         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8067
8068         /* adjust timeout for emulation/FPGA */
8069         count = NVRAM_TIMEOUT_COUNT;
8070         if (CHIP_REV_IS_SLOW(bp))
8071                 count *= 100;
8072
8073         /* wait for completion */
8074         *ret_val = 0;
8075         rc = -EBUSY;
8076         for (i = 0; i < count; i++) {
8077                 udelay(5);
8078                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8079
8080                 if (val & MCPR_NVM_COMMAND_DONE) {
8081                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8082                         /* we read nvram data in cpu order
8083                          * but ethtool sees it as an array of bytes
8084                          * converting to big-endian will do the work */
8085                         val = cpu_to_be32(val);
8086                         *ret_val = val;
8087                         rc = 0;
8088                         break;
8089                 }
8090         }
8091
8092         return rc;
8093 }
8094
8095 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8096                             int buf_size)
8097 {
8098         int rc;
8099         u32 cmd_flags;
8100         u32 val;
8101
8102         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8103                 DP(BNX2X_MSG_NVM,
8104                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8105                    offset, buf_size);
8106                 return -EINVAL;
8107         }
8108
8109         if (offset + buf_size > bp->common.flash_size) {
8110                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8111                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8112                    offset, buf_size, bp->common.flash_size);
8113                 return -EINVAL;
8114         }
8115
8116         /* request access to nvram interface */
8117         rc = bnx2x_acquire_nvram_lock(bp);
8118         if (rc)
8119                 return rc;
8120
8121         /* enable access to nvram interface */
8122         bnx2x_enable_nvram_access(bp);
8123
8124         /* read the first word(s) */
8125         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8126         while ((buf_size > sizeof(u32)) && (rc == 0)) {
8127                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8128                 memcpy(ret_buf, &val, 4);
8129
8130                 /* advance to the next dword */
8131                 offset += sizeof(u32);
8132                 ret_buf += sizeof(u32);
8133                 buf_size -= sizeof(u32);
8134                 cmd_flags = 0;
8135         }
8136
8137         if (rc == 0) {
8138                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8139                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8140                 memcpy(ret_buf, &val, 4);
8141         }
8142
8143         /* disable access to nvram interface */
8144         bnx2x_disable_nvram_access(bp);
8145         bnx2x_release_nvram_lock(bp);
8146
8147         return rc;
8148 }
8149
8150 static int bnx2x_get_eeprom(struct net_device *dev,
8151                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8152 {
8153         struct bnx2x *bp = netdev_priv(dev);
8154         int rc;
8155
8156         if (!netif_running(dev))
8157                 return -EAGAIN;
8158
8159         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8160            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8161            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8162            eeprom->len, eeprom->len);
8163
8164         /* parameters already validated in ethtool_get_eeprom */
8165
8166         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8167
8168         return rc;
8169 }
8170
8171 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8172                                    u32 cmd_flags)
8173 {
8174         int count, i, rc;
8175
8176         /* build the command word */
8177         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8178
8179         /* need to clear DONE bit separately */
8180         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8181
8182         /* write the data */
8183         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8184
8185         /* address of the NVRAM to write to */
8186         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8187                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8188
8189         /* issue the write command */
8190         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8191
8192         /* adjust timeout for emulation/FPGA */
8193         count = NVRAM_TIMEOUT_COUNT;
8194         if (CHIP_REV_IS_SLOW(bp))
8195                 count *= 100;
8196
8197         /* wait for completion */
8198         rc = -EBUSY;
8199         for (i = 0; i < count; i++) {
8200                 udelay(5);
8201                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8202                 if (val & MCPR_NVM_COMMAND_DONE) {
8203                         rc = 0;
8204                         break;
8205                 }
8206         }
8207
8208         return rc;
8209 }
8210
8211 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
8212
8213 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8214                               int buf_size)
8215 {
8216         int rc;
8217         u32 cmd_flags;
8218         u32 align_offset;
8219         u32 val;
8220
8221         if (offset + buf_size > bp->common.flash_size) {
8222                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8223                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8224                    offset, buf_size, bp->common.flash_size);
8225                 return -EINVAL;
8226         }
8227
8228         /* request access to nvram interface */
8229         rc = bnx2x_acquire_nvram_lock(bp);
8230         if (rc)
8231                 return rc;
8232
8233         /* enable access to nvram interface */
8234         bnx2x_enable_nvram_access(bp);
8235
8236         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8237         align_offset = (offset & ~0x03);
8238         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8239
8240         if (rc == 0) {
8241                 val &= ~(0xff << BYTE_OFFSET(offset));
8242                 val |= (*data_buf << BYTE_OFFSET(offset));
8243
8244                 /* nvram data is returned as an array of bytes
8245                  * convert it back to cpu order */
8246                 val = be32_to_cpu(val);
8247
8248                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8249                                              cmd_flags);
8250         }
8251
8252         /* disable access to nvram interface */
8253         bnx2x_disable_nvram_access(bp);
8254         bnx2x_release_nvram_lock(bp);
8255
8256         return rc;
8257 }
8258
8259 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8260                              int buf_size)
8261 {
8262         int rc;
8263         u32 cmd_flags;
8264         u32 val;
8265         u32 written_so_far;
8266
8267         if (buf_size == 1)      /* ethtool */
8268                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8269
8270         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8271                 DP(BNX2X_MSG_NVM,
8272                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8273                    offset, buf_size);
8274                 return -EINVAL;
8275         }
8276
8277         if (offset + buf_size > bp->common.flash_size) {
8278                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8279                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8280                    offset, buf_size, bp->common.flash_size);
8281                 return -EINVAL;
8282         }
8283
8284         /* request access to nvram interface */
8285         rc = bnx2x_acquire_nvram_lock(bp);
8286         if (rc)
8287                 return rc;
8288
8289         /* enable access to nvram interface */
8290         bnx2x_enable_nvram_access(bp);
8291
8292         written_so_far = 0;
8293         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8294         while ((written_so_far < buf_size) && (rc == 0)) {
8295                 if (written_so_far == (buf_size - sizeof(u32)))
8296                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8297                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8298                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8299                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8300                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8301
8302                 memcpy(&val, data_buf, 4);
8303
8304                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8305
8306                 /* advance to the next dword */
8307                 offset += sizeof(u32);
8308                 data_buf += sizeof(u32);
8309                 written_so_far += sizeof(u32);
8310                 cmd_flags = 0;
8311         }
8312
8313         /* disable access to nvram interface */
8314         bnx2x_disable_nvram_access(bp);
8315         bnx2x_release_nvram_lock(bp);
8316
8317         return rc;
8318 }
8319
8320 static int bnx2x_set_eeprom(struct net_device *dev,
8321                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8322 {
8323         struct bnx2x *bp = netdev_priv(dev);
8324         int rc;
8325
8326         if (!netif_running(dev))
8327                 return -EAGAIN;
8328
8329         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8330            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8331            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8332            eeprom->len, eeprom->len);
8333
8334         /* parameters already validated in ethtool_set_eeprom */
8335
8336         /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8337         if (eeprom->magic == 0x00504859)
8338                 if (bp->port.pmf) {
8339
8340                         bnx2x_acquire_phy_lock(bp);
8341                         rc = bnx2x_flash_download(bp, BP_PORT(bp),
8342                                              bp->link_params.ext_phy_config,
8343                                              (bp->state != BNX2X_STATE_CLOSED),
8344                                              eebuf, eeprom->len);
8345                         if ((bp->state == BNX2X_STATE_OPEN) ||
8346                             (bp->state == BNX2X_STATE_DISABLED)) {
8347                                 rc |= bnx2x_link_reset(&bp->link_params,
8348                                                        &bp->link_vars);
8349                                 rc |= bnx2x_phy_init(&bp->link_params,
8350                                                      &bp->link_vars);
8351                         }
8352                         bnx2x_release_phy_lock(bp);
8353
8354                 } else /* Only the PMF can access the PHY */
8355                         return -EINVAL;
8356         else
8357                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8358
8359         return rc;
8360 }
8361
8362 static int bnx2x_get_coalesce(struct net_device *dev,
8363                               struct ethtool_coalesce *coal)
8364 {
8365         struct bnx2x *bp = netdev_priv(dev);
8366
8367         memset(coal, 0, sizeof(struct ethtool_coalesce));
8368
8369         coal->rx_coalesce_usecs = bp->rx_ticks;
8370         coal->tx_coalesce_usecs = bp->tx_ticks;
8371
8372         return 0;
8373 }
8374
8375 static int bnx2x_set_coalesce(struct net_device *dev,
8376                               struct ethtool_coalesce *coal)
8377 {
8378         struct bnx2x *bp = netdev_priv(dev);
8379
8380         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8381         if (bp->rx_ticks > 3000)
8382                 bp->rx_ticks = 3000;
8383
8384         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8385         if (bp->tx_ticks > 0x3000)
8386                 bp->tx_ticks = 0x3000;
8387
8388         if (netif_running(dev))
8389                 bnx2x_update_coalesce(bp);
8390
8391         return 0;
8392 }
8393
8394 static void bnx2x_get_ringparam(struct net_device *dev,
8395                                 struct ethtool_ringparam *ering)
8396 {
8397         struct bnx2x *bp = netdev_priv(dev);
8398
8399         ering->rx_max_pending = MAX_RX_AVAIL;
8400         ering->rx_mini_max_pending = 0;
8401         ering->rx_jumbo_max_pending = 0;
8402
8403         ering->rx_pending = bp->rx_ring_size;
8404         ering->rx_mini_pending = 0;
8405         ering->rx_jumbo_pending = 0;
8406
8407         ering->tx_max_pending = MAX_TX_AVAIL;
8408         ering->tx_pending = bp->tx_ring_size;
8409 }
8410
8411 static int bnx2x_set_ringparam(struct net_device *dev,
8412                                struct ethtool_ringparam *ering)
8413 {
8414         struct bnx2x *bp = netdev_priv(dev);
8415         int rc = 0;
8416
8417         if ((ering->rx_pending > MAX_RX_AVAIL) ||
8418             (ering->tx_pending > MAX_TX_AVAIL) ||
8419             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8420                 return -EINVAL;
8421
8422         bp->rx_ring_size = ering->rx_pending;
8423         bp->tx_ring_size = ering->tx_pending;
8424
8425         if (netif_running(dev)) {
8426                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8427                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8428         }
8429
8430         return rc;
8431 }
8432
8433 static void bnx2x_get_pauseparam(struct net_device *dev,
8434                                  struct ethtool_pauseparam *epause)
8435 {
8436         struct bnx2x *bp = netdev_priv(dev);
8437
8438         epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8439                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8440
8441         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8442                             BNX2X_FLOW_CTRL_RX);
8443         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8444                             BNX2X_FLOW_CTRL_TX);
8445
8446         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8447            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8448            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8449 }
8450
8451 static int bnx2x_set_pauseparam(struct net_device *dev,
8452                                 struct ethtool_pauseparam *epause)
8453 {
8454         struct bnx2x *bp = netdev_priv(dev);
8455
8456         if (IS_E1HMF(bp))
8457                 return 0;
8458
8459         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8460            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8461            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8462
8463         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8464
8465         if (epause->rx_pause)
8466                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
8467
8468         if (epause->tx_pause)
8469                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
8470
8471         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8472                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8473
8474         if (epause->autoneg) {
8475                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8476                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
8477                         return -EINVAL;
8478                 }
8479
8480                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8481                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8482         }
8483
8484         DP(NETIF_MSG_LINK,
8485            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8486
8487         if (netif_running(dev)) {
8488                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8489                 bnx2x_link_set(bp);
8490         }
8491
8492         return 0;
8493 }
8494
8495 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8496 {
8497         struct bnx2x *bp = netdev_priv(dev);
8498         int changed = 0;
8499         int rc = 0;
8500
8501         /* TPA requires Rx CSUM offloading */
8502         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8503                 if (!(dev->features & NETIF_F_LRO)) {
8504                         dev->features |= NETIF_F_LRO;
8505                         bp->flags |= TPA_ENABLE_FLAG;
8506                         changed = 1;
8507                 }
8508
8509         } else if (dev->features & NETIF_F_LRO) {
8510                 dev->features &= ~NETIF_F_LRO;
8511                 bp->flags &= ~TPA_ENABLE_FLAG;
8512                 changed = 1;
8513         }
8514
8515         if (changed && netif_running(dev)) {
8516                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8517                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8518         }
8519
8520         return rc;
8521 }
8522
8523 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8524 {
8525         struct bnx2x *bp = netdev_priv(dev);
8526
8527         return bp->rx_csum;
8528 }
8529
8530 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8531 {
8532         struct bnx2x *bp = netdev_priv(dev);
8533         int rc = 0;
8534
8535         bp->rx_csum = data;
8536
8537         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8538            TPA'ed packets will be discarded due to wrong TCP CSUM */
8539         if (!data) {
8540                 u32 flags = ethtool_op_get_flags(dev);
8541
8542                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8543         }
8544
8545         return rc;
8546 }
8547
8548 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8549 {
8550         if (data) {
8551                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8552                 dev->features |= NETIF_F_TSO6;
8553         } else {
8554                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8555                 dev->features &= ~NETIF_F_TSO6;
8556         }
8557
8558         return 0;
8559 }
8560
8561 static const struct {
8562         char string[ETH_GSTRING_LEN];
8563 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8564         { "register_test (offline)" },
8565         { "memory_test (offline)" },
8566         { "loopback_test (offline)" },
8567         { "nvram_test (online)" },
8568         { "interrupt_test (online)" },
8569         { "link_test (online)" },
8570         { "idle check (online)" },
8571         { "MC errors (online)" }
8572 };
8573
8574 static int bnx2x_self_test_count(struct net_device *dev)
8575 {
8576         return BNX2X_NUM_TESTS;
8577 }
8578
8579 static int bnx2x_test_registers(struct bnx2x *bp)
8580 {
8581         int idx, i, rc = -ENODEV;
8582         u32 wr_val = 0;
8583         int port = BP_PORT(bp);
8584         static const struct {
8585                 u32  offset0;
8586                 u32  offset1;
8587                 u32  mask;
8588         } reg_tbl[] = {
8589 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
8590                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
8591                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
8592                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
8593                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
8594                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
8595                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
8596                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
8597                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
8598                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
8599 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
8600                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
8601                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
8602                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
8603                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
8604                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8605                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
8606                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
8607                 { NIG_REG_EGRESS_MNG0_FIFO,           20, 0xffffffff },
8608                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
8609 /* 20 */        { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
8610                 { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
8611                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
8612                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
8613                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
8614                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
8615                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
8616                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
8617                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
8618                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
8619 /* 30 */        { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
8620                 { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
8621                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
8622                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
8623                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8624                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
8625                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8626                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
8627
8628                 { 0xffffffff, 0, 0x00000000 }
8629         };
8630
8631         if (!netif_running(bp->dev))
8632                 return rc;
8633
8634         /* Repeat the test twice:
8635            First by writing 0x00000000, second by writing 0xffffffff */
8636         for (idx = 0; idx < 2; idx++) {
8637
8638                 switch (idx) {
8639                 case 0:
8640                         wr_val = 0;
8641                         break;
8642                 case 1:
8643                         wr_val = 0xffffffff;
8644                         break;
8645                 }
8646
8647                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8648                         u32 offset, mask, save_val, val;
8649
8650                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8651                         mask = reg_tbl[i].mask;
8652
8653                         save_val = REG_RD(bp, offset);
8654
8655                         REG_WR(bp, offset, wr_val);
8656                         val = REG_RD(bp, offset);
8657
8658                         /* Restore the original register's value */
8659                         REG_WR(bp, offset, save_val);
8660
8661                         /* verify that value is as expected value */
8662                         if ((val & mask) != (wr_val & mask))
8663                                 goto test_reg_exit;
8664                 }
8665         }
8666
8667         rc = 0;
8668
8669 test_reg_exit:
8670         return rc;
8671 }
8672
8673 static int bnx2x_test_memory(struct bnx2x *bp)
8674 {
8675         int i, j, rc = -ENODEV;
8676         u32 val;
8677         static const struct {
8678                 u32 offset;
8679                 int size;
8680         } mem_tbl[] = {
8681                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
8682                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8683                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
8684                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
8685                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
8686                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
8687                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
8688
8689                 { 0xffffffff, 0 }
8690         };
8691         static const struct {
8692                 char *name;
8693                 u32 offset;
8694                 u32 e1_mask;
8695                 u32 e1h_mask;
8696         } prty_tbl[] = {
8697                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
8698                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
8699                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
8700                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
8701                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
8702                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
8703
8704                 { NULL, 0xffffffff, 0, 0 }
8705         };
8706
8707         if (!netif_running(bp->dev))
8708                 return rc;
8709
8710         /* Go through all the memories */
8711         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8712                 for (j = 0; j < mem_tbl[i].size; j++)
8713                         REG_RD(bp, mem_tbl[i].offset + j*4);
8714
8715         /* Check the parity status */
8716         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8717                 val = REG_RD(bp, prty_tbl[i].offset);
8718                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8719                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
8720                         DP(NETIF_MSG_HW,
8721                            "%s is 0x%x\n", prty_tbl[i].name, val);
8722                         goto test_mem_exit;
8723                 }
8724         }
8725
8726         rc = 0;
8727
8728 test_mem_exit:
8729         return rc;
8730 }
8731
8732 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8733 {
8734         int cnt = 1000;
8735
8736         if (link_up)
8737                 while (bnx2x_link_test(bp) && cnt--)
8738                         msleep(10);
8739 }
8740
8741 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8742 {
8743         unsigned int pkt_size, num_pkts, i;
8744         struct sk_buff *skb;
8745         unsigned char *packet;
8746         struct bnx2x_fastpath *fp = &bp->fp[0];
8747         u16 tx_start_idx, tx_idx;
8748         u16 rx_start_idx, rx_idx;
8749         u16 pkt_prod;
8750         struct sw_tx_bd *tx_buf;
8751         struct eth_tx_bd *tx_bd;
8752         dma_addr_t mapping;
8753         union eth_rx_cqe *cqe;
8754         u8 cqe_fp_flags;
8755         struct sw_rx_bd *rx_buf;
8756         u16 len;
8757         int rc = -ENODEV;
8758
8759         if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8760                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8761                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8762
8763         } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8764                 u16 cnt = 1000;
8765                 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8766                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8767                 /* wait until link state is restored */
8768                 if (link_up)
8769                         while (cnt-- && bnx2x_test_link(&bp->link_params,
8770                                                         &bp->link_vars))
8771                                 msleep(10);
8772         } else
8773                 return -EINVAL;
8774
8775         pkt_size = 1514;
8776         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8777         if (!skb) {
8778                 rc = -ENOMEM;
8779                 goto test_loopback_exit;
8780         }
8781         packet = skb_put(skb, pkt_size);
8782         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8783         memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8784         for (i = ETH_HLEN; i < pkt_size; i++)
8785                 packet[i] = (unsigned char) (i & 0xff);
8786
8787         num_pkts = 0;
8788         tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8789         rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8790
8791         pkt_prod = fp->tx_pkt_prod++;
8792         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8793         tx_buf->first_bd = fp->tx_bd_prod;
8794         tx_buf->skb = skb;
8795
8796         tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8797         mapping = pci_map_single(bp->pdev, skb->data,
8798                                  skb_headlen(skb), PCI_DMA_TODEVICE);
8799         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8800         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8801         tx_bd->nbd = cpu_to_le16(1);
8802         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8803         tx_bd->vlan = cpu_to_le16(pkt_prod);
8804         tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8805                                        ETH_TX_BD_FLAGS_END_BD);
8806         tx_bd->general_data = ((UNICAST_ADDRESS <<
8807                                 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8808
8809         wmb();
8810
8811         fp->hw_tx_prods->bds_prod =
8812                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8813         mb(); /* FW restriction: must not reorder writing nbd and packets */
8814         fp->hw_tx_prods->packets_prod =
8815                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8816         DOORBELL(bp, FP_IDX(fp), 0);
8817
8818         mmiowb();
8819
8820         num_pkts++;
8821         fp->tx_bd_prod++;
8822         bp->dev->trans_start = jiffies;
8823
8824         udelay(100);
8825
8826         tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8827         if (tx_idx != tx_start_idx + num_pkts)
8828                 goto test_loopback_exit;
8829
8830         rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8831         if (rx_idx != rx_start_idx + num_pkts)
8832                 goto test_loopback_exit;
8833
8834         cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8835         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8836         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8837                 goto test_loopback_rx_exit;
8838
8839         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8840         if (len != pkt_size)
8841                 goto test_loopback_rx_exit;
8842
8843         rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8844         skb = rx_buf->skb;
8845         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8846         for (i = ETH_HLEN; i < pkt_size; i++)
8847                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8848                         goto test_loopback_rx_exit;
8849
8850         rc = 0;
8851
8852 test_loopback_rx_exit:
8853
8854         fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8855         fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8856         fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8857         fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8858
8859         /* Update producers */
8860         bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8861                              fp->rx_sge_prod);
8862
8863 test_loopback_exit:
8864         bp->link_params.loopback_mode = LOOPBACK_NONE;
8865
8866         return rc;
8867 }
8868
8869 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8870 {
8871         int rc = 0;
8872
8873         if (!netif_running(bp->dev))
8874                 return BNX2X_LOOPBACK_FAILED;
8875
8876         bnx2x_netif_stop(bp, 1);
8877         bnx2x_acquire_phy_lock(bp);
8878
8879         if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8880                 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8881                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8882         }
8883
8884         if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8885                 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8886                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8887         }
8888
8889         bnx2x_release_phy_lock(bp);
8890         bnx2x_netif_start(bp);
8891
8892         return rc;
8893 }
8894
8895 #define CRC32_RESIDUAL                  0xdebb20e3
8896
8897 static int bnx2x_test_nvram(struct bnx2x *bp)
8898 {
8899         static const struct {
8900                 int offset;
8901                 int size;
8902         } nvram_tbl[] = {
8903                 {     0,  0x14 }, /* bootstrap */
8904                 {  0x14,  0xec }, /* dir */
8905                 { 0x100, 0x350 }, /* manuf_info */
8906                 { 0x450,  0xf0 }, /* feature_info */
8907                 { 0x640,  0x64 }, /* upgrade_key_info */
8908                 { 0x6a4,  0x64 },
8909                 { 0x708,  0x70 }, /* manuf_key_info */
8910                 { 0x778,  0x70 },
8911                 {     0,     0 }
8912         };
8913         u32 buf[0x350 / 4];
8914         u8 *data = (u8 *)buf;
8915         int i, rc;
8916         u32 magic, csum;
8917
8918         rc = bnx2x_nvram_read(bp, 0, data, 4);
8919         if (rc) {
8920                 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8921                 goto test_nvram_exit;
8922         }
8923
8924         magic = be32_to_cpu(buf[0]);
8925         if (magic != 0x669955aa) {
8926                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8927                 rc = -ENODEV;
8928                 goto test_nvram_exit;
8929         }
8930
8931         for (i = 0; nvram_tbl[i].size; i++) {
8932
8933                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8934                                       nvram_tbl[i].size);
8935                 if (rc) {
8936                         DP(NETIF_MSG_PROBE,
8937                            "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8938                         goto test_nvram_exit;
8939                 }
8940
8941                 csum = ether_crc_le(nvram_tbl[i].size, data);
8942                 if (csum != CRC32_RESIDUAL) {
8943                         DP(NETIF_MSG_PROBE,
8944                            "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8945                         rc = -ENODEV;
8946                         goto test_nvram_exit;
8947                 }
8948         }
8949
8950 test_nvram_exit:
8951         return rc;
8952 }
8953
8954 static int bnx2x_test_intr(struct bnx2x *bp)
8955 {
8956         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8957         int i, rc;
8958
8959         if (!netif_running(bp->dev))
8960                 return -ENODEV;
8961
8962         config->hdr.length_6b = 0;
8963         if (CHIP_IS_E1(bp))
8964                 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
8965         else
8966                 config->hdr.offset = BP_FUNC(bp);
8967         config->hdr.client_id = BP_CL_ID(bp);
8968         config->hdr.reserved1 = 0;
8969
8970         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8971                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8972                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8973         if (rc == 0) {
8974                 bp->set_mac_pending++;
8975                 for (i = 0; i < 10; i++) {
8976                         if (!bp->set_mac_pending)
8977                                 break;
8978                         msleep_interruptible(10);
8979                 }
8980                 if (i == 10)
8981                         rc = -ENODEV;
8982         }
8983
8984         return rc;
8985 }
8986
8987 static void bnx2x_self_test(struct net_device *dev,
8988                             struct ethtool_test *etest, u64 *buf)
8989 {
8990         struct bnx2x *bp = netdev_priv(dev);
8991
8992         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8993
8994         if (!netif_running(dev))
8995                 return;
8996
8997         /* offline tests are not supported in MF mode */
8998         if (IS_E1HMF(bp))
8999                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9000
9001         if (etest->flags & ETH_TEST_FL_OFFLINE) {
9002                 u8 link_up;
9003
9004                 link_up = bp->link_vars.link_up;
9005                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9006                 bnx2x_nic_load(bp, LOAD_DIAG);
9007                 /* wait until link state is restored */
9008                 bnx2x_wait_for_link(bp, link_up);
9009
9010                 if (bnx2x_test_registers(bp) != 0) {
9011                         buf[0] = 1;
9012                         etest->flags |= ETH_TEST_FL_FAILED;
9013                 }
9014                 if (bnx2x_test_memory(bp) != 0) {
9015                         buf[1] = 1;
9016                         etest->flags |= ETH_TEST_FL_FAILED;
9017                 }
9018                 buf[2] = bnx2x_test_loopback(bp, link_up);
9019                 if (buf[2] != 0)
9020                         etest->flags |= ETH_TEST_FL_FAILED;
9021
9022                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9023                 bnx2x_nic_load(bp, LOAD_NORMAL);
9024                 /* wait until link state is restored */
9025                 bnx2x_wait_for_link(bp, link_up);
9026         }
9027         if (bnx2x_test_nvram(bp) != 0) {
9028                 buf[3] = 1;
9029                 etest->flags |= ETH_TEST_FL_FAILED;
9030         }
9031         if (bnx2x_test_intr(bp) != 0) {
9032                 buf[4] = 1;
9033                 etest->flags |= ETH_TEST_FL_FAILED;
9034         }
9035         if (bp->port.pmf)
9036                 if (bnx2x_link_test(bp) != 0) {
9037                         buf[5] = 1;
9038                         etest->flags |= ETH_TEST_FL_FAILED;
9039                 }
9040         buf[7] = bnx2x_mc_assert(bp);
9041         if (buf[7] != 0)
9042                 etest->flags |= ETH_TEST_FL_FAILED;
9043
9044 #ifdef BNX2X_EXTRA_DEBUG
9045         bnx2x_panic_dump(bp);
9046 #endif
9047 }
9048
9049 static const struct {
9050         long offset;
9051         int size;
9052         u32 flags;
9053 #define STATS_FLAGS_PORT                1
9054 #define STATS_FLAGS_FUNC                2
9055         u8 string[ETH_GSTRING_LEN];
9056 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9057 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
9058                                 8, STATS_FLAGS_FUNC, "rx_bytes" },
9059         { STATS_OFFSET32(error_bytes_received_hi),
9060                                 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
9061         { STATS_OFFSET32(total_bytes_transmitted_hi),
9062                                 8, STATS_FLAGS_FUNC, "tx_bytes" },
9063         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9064                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9065         { STATS_OFFSET32(total_unicast_packets_received_hi),
9066                                 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
9067         { STATS_OFFSET32(total_multicast_packets_received_hi),
9068                                 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
9069         { STATS_OFFSET32(total_broadcast_packets_received_hi),
9070                                 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
9071         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9072                                 8, STATS_FLAGS_FUNC, "tx_packets" },
9073         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9074                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9075 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9076                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9077         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9078                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9079         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9080                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
9081         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9082                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9083         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9084                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9085         { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9086                                 8, STATS_FLAGS_PORT, "tx_deferred" },
9087         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9088                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9089         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9090                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9091         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9092                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9093         { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9094                                 8, STATS_FLAGS_PORT, "rx_fragments" },
9095 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9096                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
9097         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9098                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9099         { STATS_OFFSET32(jabber_packets_received),
9100                                 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
9101         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9102                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9103         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9104                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9105         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9106                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9107         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9108                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9109         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9110                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9111         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9112                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9113         { STATS_OFFSET32(etherstatspktsover1522octets_hi),
9114                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9115 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
9116                                 8, STATS_FLAGS_PORT, "rx_xon_frames" },
9117         { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
9118                                 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9119         { STATS_OFFSET32(tx_stat_outxonsent_hi),
9120                                 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9121         { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9122                                 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
9123         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9124                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9125         { STATS_OFFSET32(mac_filter_discard),
9126                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9127         { STATS_OFFSET32(no_buff_discard),
9128                                 4, STATS_FLAGS_FUNC, "rx_discards" },
9129         { STATS_OFFSET32(xxoverflow_discard),
9130                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9131         { STATS_OFFSET32(brb_drop_hi),
9132                                 8, STATS_FLAGS_PORT, "brb_discard" },
9133         { STATS_OFFSET32(brb_truncate_hi),
9134                                 8, STATS_FLAGS_PORT, "brb_truncate" },
9135 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9136                                 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9137         { STATS_OFFSET32(rx_skb_alloc_failed),
9138                                 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9139 /* 42 */{ STATS_OFFSET32(hw_csum_err),
9140                                 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
9141 };
9142
9143 #define IS_NOT_E1HMF_STAT(bp, i) \
9144                 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9145
9146 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9147 {
9148         struct bnx2x *bp = netdev_priv(dev);
9149         int i, j;
9150
9151         switch (stringset) {
9152         case ETH_SS_STATS:
9153                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9154                         if (IS_NOT_E1HMF_STAT(bp, i))
9155                                 continue;
9156                         strcpy(buf + j*ETH_GSTRING_LEN,
9157                                bnx2x_stats_arr[i].string);
9158                         j++;
9159                 }
9160                 break;
9161
9162         case ETH_SS_TEST:
9163                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9164                 break;
9165         }
9166 }
9167
9168 static int bnx2x_get_stats_count(struct net_device *dev)
9169 {
9170         struct bnx2x *bp = netdev_priv(dev);
9171         int i, num_stats = 0;
9172
9173         for (i = 0; i < BNX2X_NUM_STATS; i++) {
9174                 if (IS_NOT_E1HMF_STAT(bp, i))
9175                         continue;
9176                 num_stats++;
9177         }
9178         return num_stats;
9179 }
9180
9181 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9182                                     struct ethtool_stats *stats, u64 *buf)
9183 {
9184         struct bnx2x *bp = netdev_priv(dev);
9185         u32 *hw_stats = (u32 *)&bp->eth_stats;
9186         int i, j;
9187
9188         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9189                 if (IS_NOT_E1HMF_STAT(bp, i))
9190                         continue;
9191
9192                 if (bnx2x_stats_arr[i].size == 0) {
9193                         /* skip this counter */
9194                         buf[j] = 0;
9195                         j++;
9196                         continue;
9197                 }
9198                 if (bnx2x_stats_arr[i].size == 4) {
9199                         /* 4-byte counter */
9200                         buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9201                         j++;
9202                         continue;
9203                 }
9204                 /* 8-byte counter */
9205                 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9206                                   *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9207                 j++;
9208         }
9209 }
9210
9211 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9212 {
9213         struct bnx2x *bp = netdev_priv(dev);
9214         int port = BP_PORT(bp);
9215         int i;
9216
9217         if (!netif_running(dev))
9218                 return 0;
9219
9220         if (!bp->port.pmf)
9221                 return 0;
9222
9223         if (data == 0)
9224                 data = 2;
9225
9226         for (i = 0; i < (data * 2); i++) {
9227                 if ((i % 2) == 0)
9228                         bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9229                                       bp->link_params.hw_led_mode,
9230                                       bp->link_params.chip_id);
9231                 else
9232                         bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9233                                       bp->link_params.hw_led_mode,
9234                                       bp->link_params.chip_id);
9235
9236                 msleep_interruptible(500);
9237                 if (signal_pending(current))
9238                         break;
9239         }
9240
9241         if (bp->link_vars.link_up)
9242                 bnx2x_set_led(bp, port, LED_MODE_OPER,
9243                               bp->link_vars.line_speed,
9244                               bp->link_params.hw_led_mode,
9245                               bp->link_params.chip_id);
9246
9247         return 0;
9248 }
9249
9250 static struct ethtool_ops bnx2x_ethtool_ops = {
9251         .get_settings           = bnx2x_get_settings,
9252         .set_settings           = bnx2x_set_settings,
9253         .get_drvinfo            = bnx2x_get_drvinfo,
9254         .get_wol                = bnx2x_get_wol,
9255         .set_wol                = bnx2x_set_wol,
9256         .get_msglevel           = bnx2x_get_msglevel,
9257         .set_msglevel           = bnx2x_set_msglevel,
9258         .nway_reset             = bnx2x_nway_reset,
9259         .get_link               = ethtool_op_get_link,
9260         .get_eeprom_len         = bnx2x_get_eeprom_len,
9261         .get_eeprom             = bnx2x_get_eeprom,
9262         .set_eeprom             = bnx2x_set_eeprom,
9263         .get_coalesce           = bnx2x_get_coalesce,
9264         .set_coalesce           = bnx2x_set_coalesce,
9265         .get_ringparam          = bnx2x_get_ringparam,
9266         .set_ringparam          = bnx2x_set_ringparam,
9267         .get_pauseparam         = bnx2x_get_pauseparam,
9268         .set_pauseparam         = bnx2x_set_pauseparam,
9269         .get_rx_csum            = bnx2x_get_rx_csum,
9270         .set_rx_csum            = bnx2x_set_rx_csum,
9271         .get_tx_csum            = ethtool_op_get_tx_csum,
9272         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
9273         .set_flags              = bnx2x_set_flags,
9274         .get_flags              = ethtool_op_get_flags,
9275         .get_sg                 = ethtool_op_get_sg,
9276         .set_sg                 = ethtool_op_set_sg,
9277         .get_tso                = ethtool_op_get_tso,
9278         .set_tso                = bnx2x_set_tso,
9279         .self_test_count        = bnx2x_self_test_count,
9280         .self_test              = bnx2x_self_test,
9281         .get_strings            = bnx2x_get_strings,
9282         .phys_id                = bnx2x_phys_id,
9283         .get_stats_count        = bnx2x_get_stats_count,
9284         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
9285 };
9286
9287 /* end of ethtool_ops */
9288
9289 /****************************************************************************
9290 * General service functions
9291 ****************************************************************************/
9292
9293 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9294 {
9295         u16 pmcsr;
9296
9297         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9298
9299         switch (state) {
9300         case PCI_D0:
9301                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9302                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9303                                        PCI_PM_CTRL_PME_STATUS));
9304
9305                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9306                         /* delay required during transition out of D3hot */
9307                         msleep(20);
9308                 break;
9309
9310         case PCI_D3hot:
9311                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9312                 pmcsr |= 3;
9313
9314                 if (bp->wol)
9315                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9316
9317                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9318                                       pmcsr);
9319
9320                 /* No more memory access after this point until
9321                 * device is brought back to D0.
9322                 */
9323                 break;
9324
9325         default:
9326                 return -EINVAL;
9327         }
9328         return 0;
9329 }
9330
9331 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
9332 {
9333         u16 rx_cons_sb;
9334
9335         /* Tell compiler that status block fields can change */
9336         barrier();
9337         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9338         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9339                 rx_cons_sb++;
9340         return (fp->rx_comp_cons != rx_cons_sb);
9341 }
9342
9343 /*
9344  * net_device service functions
9345  */
9346
9347 static int bnx2x_poll(struct napi_struct *napi, int budget)
9348 {
9349         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9350                                                  napi);
9351         struct bnx2x *bp = fp->bp;
9352         int work_done = 0;
9353
9354 #ifdef BNX2X_STOP_ON_ERROR
9355         if (unlikely(bp->panic))
9356                 goto poll_panic;
9357 #endif
9358
9359         prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9360         prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9361         prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9362
9363         bnx2x_update_fpsb_idx(fp);
9364
9365         if (bnx2x_has_tx_work(fp))
9366                 bnx2x_tx_int(fp, budget);
9367
9368         if (bnx2x_has_rx_work(fp))
9369                 work_done = bnx2x_rx_int(fp, budget);
9370         rmb(); /* BNX2X_HAS_WORK() reads the status block */
9371
9372         /* must not complete if we consumed full budget */
9373         if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9374
9375 #ifdef BNX2X_STOP_ON_ERROR
9376 poll_panic:
9377 #endif
9378                 napi_complete(napi);
9379
9380                 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9381                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9382                 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9383                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9384         }
9385         return work_done;
9386 }
9387
9388
9389 /* we split the first BD into headers and data BDs
9390  * to ease the pain of our fellow microcode engineers
9391  * we use one mapping for both BDs
9392  * So far this has only been observed to happen
9393  * in Other Operating Systems(TM)
9394  */
9395 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9396                                    struct bnx2x_fastpath *fp,
9397                                    struct eth_tx_bd **tx_bd, u16 hlen,
9398                                    u16 bd_prod, int nbd)
9399 {
9400         struct eth_tx_bd *h_tx_bd = *tx_bd;
9401         struct eth_tx_bd *d_tx_bd;
9402         dma_addr_t mapping;
9403         int old_len = le16_to_cpu(h_tx_bd->nbytes);
9404
9405         /* first fix first BD */
9406         h_tx_bd->nbd = cpu_to_le16(nbd);
9407         h_tx_bd->nbytes = cpu_to_le16(hlen);
9408
9409         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9410            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9411            h_tx_bd->addr_lo, h_tx_bd->nbd);
9412
9413         /* now get a new data BD
9414          * (after the pbd) and fill it */
9415         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9416         d_tx_bd = &fp->tx_desc_ring[bd_prod];
9417
9418         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9419                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9420
9421         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9422         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9423         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9424         d_tx_bd->vlan = 0;
9425         /* this marks the BD as one that has no individual mapping
9426          * the FW ignores this flag in a BD not marked start
9427          */
9428         d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9429         DP(NETIF_MSG_TX_QUEUED,
9430            "TSO split data size is %d (%x:%x)\n",
9431            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9432
9433         /* update tx_bd for marking the last BD flag */
9434         *tx_bd = d_tx_bd;
9435
9436         return bd_prod;
9437 }
9438
9439 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9440 {
9441         if (fix > 0)
9442                 csum = (u16) ~csum_fold(csum_sub(csum,
9443                                 csum_partial(t_header - fix, fix, 0)));
9444
9445         else if (fix < 0)
9446                 csum = (u16) ~csum_fold(csum_add(csum,
9447                                 csum_partial(t_header, -fix, 0)));
9448
9449         return swab16(csum);
9450 }
9451
9452 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9453 {
9454         u32 rc;
9455
9456         if (skb->ip_summed != CHECKSUM_PARTIAL)
9457                 rc = XMIT_PLAIN;
9458
9459         else {
9460                 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9461                         rc = XMIT_CSUM_V6;
9462                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9463                                 rc |= XMIT_CSUM_TCP;
9464
9465                 } else {
9466                         rc = XMIT_CSUM_V4;
9467                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9468                                 rc |= XMIT_CSUM_TCP;
9469                 }
9470         }
9471
9472         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9473                 rc |= XMIT_GSO_V4;
9474
9475         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9476                 rc |= XMIT_GSO_V6;
9477
9478         return rc;
9479 }
9480
9481 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
9482 /* check if packet requires linearization (packet is too fragmented) */
9483 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9484                              u32 xmit_type)
9485 {
9486         int to_copy = 0;
9487         int hlen = 0;
9488         int first_bd_sz = 0;
9489
9490         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9491         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9492
9493                 if (xmit_type & XMIT_GSO) {
9494                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9495                         /* Check if LSO packet needs to be copied:
9496                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9497                         int wnd_size = MAX_FETCH_BD - 3;
9498                         /* Number of windows to check */
9499                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9500                         int wnd_idx = 0;
9501                         int frag_idx = 0;
9502                         u32 wnd_sum = 0;
9503
9504                         /* Headers length */
9505                         hlen = (int)(skb_transport_header(skb) - skb->data) +
9506                                 tcp_hdrlen(skb);
9507
9508                         /* Amount of data (w/o headers) on linear part of SKB*/
9509                         first_bd_sz = skb_headlen(skb) - hlen;
9510
9511                         wnd_sum  = first_bd_sz;
9512
9513                         /* Calculate the first sum - it's special */
9514                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9515                                 wnd_sum +=
9516                                         skb_shinfo(skb)->frags[frag_idx].size;
9517
9518                         /* If there was data on linear skb data - check it */
9519                         if (first_bd_sz > 0) {
9520                                 if (unlikely(wnd_sum < lso_mss)) {
9521                                         to_copy = 1;
9522                                         goto exit_lbl;
9523                                 }
9524
9525                                 wnd_sum -= first_bd_sz;
9526                         }
9527
9528                         /* Others are easier: run through the frag list and
9529                            check all windows */
9530                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9531                                 wnd_sum +=
9532                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9533
9534                                 if (unlikely(wnd_sum < lso_mss)) {
9535                                         to_copy = 1;
9536                                         break;
9537                                 }
9538                                 wnd_sum -=
9539                                         skb_shinfo(skb)->frags[wnd_idx].size;
9540                         }
9541
9542                 } else {
9543                         /* in non-LSO too fragmented packet should always
9544                            be linearized */
9545                         to_copy = 1;
9546                 }
9547         }
9548
9549 exit_lbl:
9550         if (unlikely(to_copy))
9551                 DP(NETIF_MSG_TX_QUEUED,
9552                    "Linearization IS REQUIRED for %s packet. "
9553                    "num_frags %d  hlen %d  first_bd_sz %d\n",
9554                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9555                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9556
9557         return to_copy;
9558 }
9559 #endif
9560
9561 /* called with netif_tx_lock
9562  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9563  * netif_wake_queue()
9564  */
9565 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9566 {
9567         struct bnx2x *bp = netdev_priv(dev);
9568         struct bnx2x_fastpath *fp;
9569         struct sw_tx_bd *tx_buf;
9570         struct eth_tx_bd *tx_bd;
9571         struct eth_tx_parse_bd *pbd = NULL;
9572         u16 pkt_prod, bd_prod;
9573         int nbd, fp_index;
9574         dma_addr_t mapping;
9575         u32 xmit_type = bnx2x_xmit_type(bp, skb);
9576         int vlan_off = (bp->e1hov ? 4 : 0);
9577         int i;
9578         u8 hlen = 0;
9579
9580 #ifdef BNX2X_STOP_ON_ERROR
9581         if (unlikely(bp->panic))
9582                 return NETDEV_TX_BUSY;
9583 #endif
9584
9585         fp_index = (smp_processor_id() % bp->num_queues);
9586         fp = &bp->fp[fp_index];
9587
9588         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
9589                 bp->eth_stats.driver_xoff++,
9590                 netif_stop_queue(dev);
9591                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9592                 return NETDEV_TX_BUSY;
9593         }
9594
9595         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
9596            "  gso type %x  xmit_type %x\n",
9597            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9598            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9599
9600 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
9601         /* First, check if we need to linearize the skb
9602            (due to FW restrictions) */
9603         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9604                 /* Statistics of linearization */
9605                 bp->lin_cnt++;
9606                 if (skb_linearize(skb) != 0) {
9607                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9608                            "silently dropping this SKB\n");
9609                         dev_kfree_skb_any(skb);
9610                         return NETDEV_TX_OK;
9611                 }
9612         }
9613 #endif
9614
9615         /*
9616         Please read carefully. First we use one BD which we mark as start,
9617         then for TSO or xsum we have a parsing info BD,
9618         and only then we have the rest of the TSO BDs.
9619         (don't forget to mark the last one as last,
9620         and to unmap only AFTER you write to the BD ...)
9621         And above all, all pdb sizes are in words - NOT DWORDS!
9622         */
9623
9624         pkt_prod = fp->tx_pkt_prod++;
9625         bd_prod = TX_BD(fp->tx_bd_prod);
9626
9627         /* get a tx_buf and first BD */
9628         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9629         tx_bd = &fp->tx_desc_ring[bd_prod];
9630
9631         tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9632         tx_bd->general_data = (UNICAST_ADDRESS <<
9633                                ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9634         /* header nbd */
9635         tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
9636
9637         /* remember the first BD of the packet */
9638         tx_buf->first_bd = fp->tx_bd_prod;
9639         tx_buf->skb = skb;
9640
9641         DP(NETIF_MSG_TX_QUEUED,
9642            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
9643            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9644
9645 #ifdef BCM_VLAN
9646         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
9647             (bp->flags & HW_VLAN_TX_FLAG)) {
9648                 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9649                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9650                 vlan_off += 4;
9651         } else
9652 #endif
9653                 tx_bd->vlan = cpu_to_le16(pkt_prod);
9654
9655         if (xmit_type) {
9656                 /* turn on parsing and get a BD */
9657                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9658                 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9659
9660                 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9661         }
9662
9663         if (xmit_type & XMIT_CSUM) {
9664                 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9665
9666                 /* for now NS flag is not used in Linux */
9667                 pbd->global_data = (hlen |
9668                                     ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9669                                      ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9670
9671                 pbd->ip_hlen = (skb_transport_header(skb) -
9672                                 skb_network_header(skb)) / 2;
9673
9674                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9675
9676                 pbd->total_hlen = cpu_to_le16(hlen);
9677                 hlen = hlen*2 - vlan_off;
9678
9679                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9680
9681                 if (xmit_type & XMIT_CSUM_V4)
9682                         tx_bd->bd_flags.as_bitfield |=
9683                                                 ETH_TX_BD_FLAGS_IP_CSUM;
9684                 else
9685                         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9686
9687                 if (xmit_type & XMIT_CSUM_TCP) {
9688                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9689
9690                 } else {
9691                         s8 fix = SKB_CS_OFF(skb); /* signed! */
9692
9693                         pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9694                         pbd->cs_offset = fix / 2;
9695
9696                         DP(NETIF_MSG_TX_QUEUED,
9697                            "hlen %d  offset %d  fix %d  csum before fix %x\n",
9698                            le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9699                            SKB_CS(skb));
9700
9701                         /* HW bug: fixup the CSUM */
9702                         pbd->tcp_pseudo_csum =
9703                                 bnx2x_csum_fix(skb_transport_header(skb),
9704                                                SKB_CS(skb), fix);
9705
9706                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9707                            pbd->tcp_pseudo_csum);
9708                 }
9709         }
9710
9711         mapping = pci_map_single(bp->pdev, skb->data,
9712                                  skb_headlen(skb), PCI_DMA_TODEVICE);
9713
9714         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9715         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9716         nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
9717         tx_bd->nbd = cpu_to_le16(nbd);
9718         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9719
9720         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
9721            "  nbytes %d  flags %x  vlan %x\n",
9722            tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9723            le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9724            le16_to_cpu(tx_bd->vlan));
9725
9726         if (xmit_type & XMIT_GSO) {
9727
9728                 DP(NETIF_MSG_TX_QUEUED,
9729                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
9730                    skb->len, hlen, skb_headlen(skb),
9731                    skb_shinfo(skb)->gso_size);
9732
9733                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9734
9735                 if (unlikely(skb_headlen(skb) > hlen))
9736                         bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9737                                                  bd_prod, ++nbd);
9738
9739                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9740                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9741                 pbd->tcp_flags = pbd_tcp_flags(skb);
9742
9743                 if (xmit_type & XMIT_GSO_V4) {
9744                         pbd->ip_id = swab16(ip_hdr(skb)->id);
9745                         pbd->tcp_pseudo_csum =
9746                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9747                                                           ip_hdr(skb)->daddr,
9748                                                           0, IPPROTO_TCP, 0));
9749
9750                 } else
9751                         pbd->tcp_pseudo_csum =
9752                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9753                                                         &ipv6_hdr(skb)->daddr,
9754                                                         0, IPPROTO_TCP, 0));
9755
9756                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9757         }
9758
9759         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9760                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9761
9762                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9763                 tx_bd = &fp->tx_desc_ring[bd_prod];
9764
9765                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9766                                        frag->size, PCI_DMA_TODEVICE);
9767
9768                 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9769                 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9770                 tx_bd->nbytes = cpu_to_le16(frag->size);
9771                 tx_bd->vlan = cpu_to_le16(pkt_prod);
9772                 tx_bd->bd_flags.as_bitfield = 0;
9773
9774                 DP(NETIF_MSG_TX_QUEUED,
9775                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d  flags %x\n",
9776                    i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9777                    le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9778         }
9779
9780         /* now at last mark the BD as the last BD */
9781         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9782
9783         DP(NETIF_MSG_TX_QUEUED, "last bd @%p  flags %x\n",
9784            tx_bd, tx_bd->bd_flags.as_bitfield);
9785
9786         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9787
9788         /* now send a tx doorbell, counting the next BD
9789          * if the packet contains or ends with it
9790          */
9791         if (TX_BD_POFF(bd_prod) < nbd)
9792                 nbd++;
9793
9794         if (pbd)
9795                 DP(NETIF_MSG_TX_QUEUED,
9796                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
9797                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
9798                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9799                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9800                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9801
9802         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
9803
9804         /*
9805          * Make sure that the BD data is updated before updating the producer
9806          * since FW might read the BD right after the producer is updated.
9807          * This is only applicable for weak-ordered memory model archs such
9808          * as IA-64. The following barrier is also mandatory since FW will
9809          * assumes packets must have BDs.
9810          */
9811         wmb();
9812
9813         fp->hw_tx_prods->bds_prod =
9814                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9815         mb(); /* FW restriction: must not reorder writing nbd and packets */
9816         fp->hw_tx_prods->packets_prod =
9817                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9818         DOORBELL(bp, FP_IDX(fp), 0);
9819
9820         mmiowb();
9821
9822         fp->tx_bd_prod += nbd;
9823         dev->trans_start = jiffies;
9824
9825         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9826                 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
9827                    if we put Tx into XOFF state. */
9828                 smp_mb();
9829                 netif_stop_queue(dev);
9830                 bp->eth_stats.driver_xoff++;
9831                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9832                         netif_wake_queue(dev);
9833         }
9834         fp->tx_pkt++;
9835
9836         return NETDEV_TX_OK;
9837 }
9838
9839 /* called with rtnl_lock */
9840 static int bnx2x_open(struct net_device *dev)
9841 {
9842         struct bnx2x *bp = netdev_priv(dev);
9843
9844         netif_carrier_off(dev);
9845
9846         bnx2x_set_power_state(bp, PCI_D0);
9847
9848         return bnx2x_nic_load(bp, LOAD_OPEN);
9849 }
9850
9851 /* called with rtnl_lock */
9852 static int bnx2x_close(struct net_device *dev)
9853 {
9854         struct bnx2x *bp = netdev_priv(dev);
9855
9856         /* Unload the driver, release IRQs */
9857         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9858         if (atomic_read(&bp->pdev->enable_cnt) == 1)
9859                 if (!CHIP_REV_IS_SLOW(bp))
9860                         bnx2x_set_power_state(bp, PCI_D3hot);
9861
9862         return 0;
9863 }
9864
9865 /* called with netif_tx_lock from set_multicast */
9866 static void bnx2x_set_rx_mode(struct net_device *dev)
9867 {
9868         struct bnx2x *bp = netdev_priv(dev);
9869         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9870         int port = BP_PORT(bp);
9871
9872         if (bp->state != BNX2X_STATE_OPEN) {
9873                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9874                 return;
9875         }
9876
9877         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9878
9879         if (dev->flags & IFF_PROMISC)
9880                 rx_mode = BNX2X_RX_MODE_PROMISC;
9881
9882         else if ((dev->flags & IFF_ALLMULTI) ||
9883                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9884                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9885
9886         else { /* some multicasts */
9887                 if (CHIP_IS_E1(bp)) {
9888                         int i, old, offset;
9889                         struct dev_mc_list *mclist;
9890                         struct mac_configuration_cmd *config =
9891                                                 bnx2x_sp(bp, mcast_config);
9892
9893                         for (i = 0, mclist = dev->mc_list;
9894                              mclist && (i < dev->mc_count);
9895                              i++, mclist = mclist->next) {
9896
9897                                 config->config_table[i].
9898                                         cam_entry.msb_mac_addr =
9899                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
9900                                 config->config_table[i].
9901                                         cam_entry.middle_mac_addr =
9902                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
9903                                 config->config_table[i].
9904                                         cam_entry.lsb_mac_addr =
9905                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
9906                                 config->config_table[i].cam_entry.flags =
9907                                                         cpu_to_le16(port);
9908                                 config->config_table[i].
9909                                         target_table_entry.flags = 0;
9910                                 config->config_table[i].
9911                                         target_table_entry.client_id = 0;
9912                                 config->config_table[i].
9913                                         target_table_entry.vlan_id = 0;
9914
9915                                 DP(NETIF_MSG_IFUP,
9916                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9917                                    config->config_table[i].
9918                                                 cam_entry.msb_mac_addr,
9919                                    config->config_table[i].
9920                                                 cam_entry.middle_mac_addr,
9921                                    config->config_table[i].
9922                                                 cam_entry.lsb_mac_addr);
9923                         }
9924                         old = config->hdr.length_6b;
9925                         if (old > i) {
9926                                 for (; i < old; i++) {
9927                                         if (CAM_IS_INVALID(config->
9928                                                            config_table[i])) {
9929                                                 /* already invalidated */
9930                                                 break;
9931                                         }
9932                                         /* invalidate */
9933                                         CAM_INVALIDATE(config->
9934                                                        config_table[i]);
9935                                 }
9936                         }
9937
9938                         if (CHIP_REV_IS_SLOW(bp))
9939                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9940                         else
9941                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
9942
9943                         config->hdr.length_6b = i;
9944                         config->hdr.offset = offset;
9945                         config->hdr.client_id = BP_CL_ID(bp);
9946                         config->hdr.reserved1 = 0;
9947
9948                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9949                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9950                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9951                                       0);
9952                 } else { /* E1H */
9953                         /* Accept one or more multicasts */
9954                         struct dev_mc_list *mclist;
9955                         u32 mc_filter[MC_HASH_SIZE];
9956                         u32 crc, bit, regidx;
9957                         int i;
9958
9959                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9960
9961                         for (i = 0, mclist = dev->mc_list;
9962                              mclist && (i < dev->mc_count);
9963                              i++, mclist = mclist->next) {
9964
9965                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
9966                                    mclist->dmi_addr);
9967
9968                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9969                                 bit = (crc >> 24) & 0xff;
9970                                 regidx = bit >> 5;
9971                                 bit &= 0x1f;
9972                                 mc_filter[regidx] |= (1 << bit);
9973                         }
9974
9975                         for (i = 0; i < MC_HASH_SIZE; i++)
9976                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9977                                        mc_filter[i]);
9978                 }
9979         }
9980
9981         bp->rx_mode = rx_mode;
9982         bnx2x_set_storm_rx_mode(bp);
9983 }
9984
9985 /* called with rtnl_lock */
9986 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9987 {
9988         struct sockaddr *addr = p;
9989         struct bnx2x *bp = netdev_priv(dev);
9990
9991         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
9992                 return -EINVAL;
9993
9994         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9995         if (netif_running(dev)) {
9996                 if (CHIP_IS_E1(bp))
9997                         bnx2x_set_mac_addr_e1(bp, 1);
9998                 else
9999                         bnx2x_set_mac_addr_e1h(bp, 1);
10000         }
10001
10002         return 0;
10003 }
10004
10005 /* called with rtnl_lock */
10006 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10007 {
10008         struct mii_ioctl_data *data = if_mii(ifr);
10009         struct bnx2x *bp = netdev_priv(dev);
10010         int port = BP_PORT(bp);
10011         int err;
10012
10013         switch (cmd) {
10014         case SIOCGMIIPHY:
10015                 data->phy_id = bp->port.phy_addr;
10016
10017                 /* fallthrough */
10018
10019         case SIOCGMIIREG: {
10020                 u16 mii_regval;
10021
10022                 if (!netif_running(dev))
10023                         return -EAGAIN;
10024
10025                 mutex_lock(&bp->port.phy_mutex);
10026                 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
10027                                       DEFAULT_PHY_DEV_ADDR,
10028                                       (data->reg_num & 0x1f), &mii_regval);
10029                 data->val_out = mii_regval;
10030                 mutex_unlock(&bp->port.phy_mutex);
10031                 return err;
10032         }
10033
10034         case SIOCSMIIREG:
10035                 if (!capable(CAP_NET_ADMIN))
10036                         return -EPERM;
10037
10038                 if (!netif_running(dev))
10039                         return -EAGAIN;
10040
10041                 mutex_lock(&bp->port.phy_mutex);
10042                 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
10043                                        DEFAULT_PHY_DEV_ADDR,
10044                                        (data->reg_num & 0x1f), data->val_in);
10045                 mutex_unlock(&bp->port.phy_mutex);
10046                 return err;
10047
10048         default:
10049                 /* do nothing */
10050                 break;
10051         }
10052
10053         return -EOPNOTSUPP;
10054 }
10055
10056 /* called with rtnl_lock */
10057 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10058 {
10059         struct bnx2x *bp = netdev_priv(dev);
10060         int rc = 0;
10061
10062         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10063             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10064                 return -EINVAL;
10065
10066         /* This does not race with packet allocation
10067          * because the actual alloc size is
10068          * only updated as part of load
10069          */
10070         dev->mtu = new_mtu;
10071
10072         if (netif_running(dev)) {
10073                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10074                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10075         }
10076
10077         return rc;
10078 }
10079
10080 static void bnx2x_tx_timeout(struct net_device *dev)
10081 {
10082         struct bnx2x *bp = netdev_priv(dev);
10083
10084 #ifdef BNX2X_STOP_ON_ERROR
10085         if (!bp->panic)
10086                 bnx2x_panic();
10087 #endif
10088         /* This allows the netif to be shutdown gracefully before resetting */
10089         schedule_work(&bp->reset_task);
10090 }
10091
10092 #ifdef BCM_VLAN
10093 /* called with rtnl_lock */
10094 static void bnx2x_vlan_rx_register(struct net_device *dev,
10095                                    struct vlan_group *vlgrp)
10096 {
10097         struct bnx2x *bp = netdev_priv(dev);
10098
10099         bp->vlgrp = vlgrp;
10100
10101         /* Set flags according to the required capabilities */
10102         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10103
10104         if (dev->features & NETIF_F_HW_VLAN_TX)
10105                 bp->flags |= HW_VLAN_TX_FLAG;
10106
10107         if (dev->features & NETIF_F_HW_VLAN_RX)
10108                 bp->flags |= HW_VLAN_RX_FLAG;
10109
10110         if (netif_running(dev))
10111                 bnx2x_set_client_config(bp);
10112 }
10113
10114 #endif
10115
10116 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10117 static void poll_bnx2x(struct net_device *dev)
10118 {
10119         struct bnx2x *bp = netdev_priv(dev);
10120
10121         disable_irq(bp->pdev->irq);
10122         bnx2x_interrupt(bp->pdev->irq, dev);
10123         enable_irq(bp->pdev->irq);
10124 }
10125 #endif
10126
10127 static const struct net_device_ops bnx2x_netdev_ops = {
10128         .ndo_open               = bnx2x_open,
10129         .ndo_stop               = bnx2x_close,
10130         .ndo_start_xmit         = bnx2x_start_xmit,
10131         .ndo_set_multicast_list = bnx2x_set_rx_mode,
10132         .ndo_set_mac_address    = bnx2x_change_mac_addr,
10133         .ndo_validate_addr      = eth_validate_addr,
10134         .ndo_do_ioctl           = bnx2x_ioctl,
10135         .ndo_change_mtu         = bnx2x_change_mtu,
10136         .ndo_tx_timeout         = bnx2x_tx_timeout,
10137 #ifdef BCM_VLAN
10138         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
10139 #endif
10140 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10141         .ndo_poll_controller    = poll_bnx2x,
10142 #endif
10143 };
10144
10145
10146 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10147                                     struct net_device *dev)
10148 {
10149         struct bnx2x *bp;
10150         int rc;
10151
10152         SET_NETDEV_DEV(dev, &pdev->dev);
10153         bp = netdev_priv(dev);
10154
10155         bp->dev = dev;
10156         bp->pdev = pdev;
10157         bp->flags = 0;
10158         bp->func = PCI_FUNC(pdev->devfn);
10159
10160         rc = pci_enable_device(pdev);
10161         if (rc) {
10162                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10163                 goto err_out;
10164         }
10165
10166         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10167                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10168                        " aborting\n");
10169                 rc = -ENODEV;
10170                 goto err_out_disable;
10171         }
10172
10173         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10174                 printk(KERN_ERR PFX "Cannot find second PCI device"
10175                        " base address, aborting\n");
10176                 rc = -ENODEV;
10177                 goto err_out_disable;
10178         }
10179
10180         if (atomic_read(&pdev->enable_cnt) == 1) {
10181                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10182                 if (rc) {
10183                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10184                                " aborting\n");
10185                         goto err_out_disable;
10186                 }
10187
10188                 pci_set_master(pdev);
10189                 pci_save_state(pdev);
10190         }
10191
10192         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10193         if (bp->pm_cap == 0) {
10194                 printk(KERN_ERR PFX "Cannot find power management"
10195                        " capability, aborting\n");
10196                 rc = -EIO;
10197                 goto err_out_release;
10198         }
10199
10200         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10201         if (bp->pcie_cap == 0) {
10202                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10203                        " aborting\n");
10204                 rc = -EIO;
10205                 goto err_out_release;
10206         }
10207
10208         if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10209                 bp->flags |= USING_DAC_FLAG;
10210                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10211                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10212                                " failed, aborting\n");
10213                         rc = -EIO;
10214                         goto err_out_release;
10215                 }
10216
10217         } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10218                 printk(KERN_ERR PFX "System does not support DMA,"
10219                        " aborting\n");
10220                 rc = -EIO;
10221                 goto err_out_release;
10222         }
10223
10224         dev->mem_start = pci_resource_start(pdev, 0);
10225         dev->base_addr = dev->mem_start;
10226         dev->mem_end = pci_resource_end(pdev, 0);
10227
10228         dev->irq = pdev->irq;
10229
10230         bp->regview = pci_ioremap_bar(pdev, 0);
10231         if (!bp->regview) {
10232                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10233                 rc = -ENOMEM;
10234                 goto err_out_release;
10235         }
10236
10237         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10238                                         min_t(u64, BNX2X_DB_SIZE,
10239                                               pci_resource_len(pdev, 2)));
10240         if (!bp->doorbells) {
10241                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10242                 rc = -ENOMEM;
10243                 goto err_out_unmap;
10244         }
10245
10246         bnx2x_set_power_state(bp, PCI_D0);
10247
10248         /* clean indirect addresses */
10249         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10250                                PCICFG_VENDOR_ID_OFFSET);
10251         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10252         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10253         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10254         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10255
10256         dev->watchdog_timeo = TX_TIMEOUT;
10257
10258         dev->netdev_ops = &bnx2x_netdev_ops;
10259         dev->ethtool_ops = &bnx2x_ethtool_ops;
10260         dev->features |= NETIF_F_SG;
10261         dev->features |= NETIF_F_HW_CSUM;
10262         if (bp->flags & USING_DAC_FLAG)
10263                 dev->features |= NETIF_F_HIGHDMA;
10264 #ifdef BCM_VLAN
10265         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10266         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10267 #endif
10268         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10269         dev->features |= NETIF_F_TSO6;
10270
10271         return 0;
10272
10273 err_out_unmap:
10274         if (bp->regview) {
10275                 iounmap(bp->regview);
10276                 bp->regview = NULL;
10277         }
10278         if (bp->doorbells) {
10279                 iounmap(bp->doorbells);
10280                 bp->doorbells = NULL;
10281         }
10282
10283 err_out_release:
10284         if (atomic_read(&pdev->enable_cnt) == 1)
10285                 pci_release_regions(pdev);
10286
10287 err_out_disable:
10288         pci_disable_device(pdev);
10289         pci_set_drvdata(pdev, NULL);
10290
10291 err_out:
10292         return rc;
10293 }
10294
10295 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10296 {
10297         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10298
10299         val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10300         return val;
10301 }
10302
10303 /* return value of 1=2.5GHz 2=5GHz */
10304 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10305 {
10306         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10307
10308         val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10309         return val;
10310 }
10311
10312 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10313                                     const struct pci_device_id *ent)
10314 {
10315         static int version_printed;
10316         struct net_device *dev = NULL;
10317         struct bnx2x *bp;
10318         int rc;
10319
10320         if (version_printed++ == 0)
10321                 printk(KERN_INFO "%s", version);
10322
10323         /* dev zeroed in init_etherdev */
10324         dev = alloc_etherdev(sizeof(*bp));
10325         if (!dev) {
10326                 printk(KERN_ERR PFX "Cannot allocate net device\n");
10327                 return -ENOMEM;
10328         }
10329
10330         bp = netdev_priv(dev);
10331         bp->msglevel = debug;
10332
10333         rc = bnx2x_init_dev(pdev, dev);
10334         if (rc < 0) {
10335                 free_netdev(dev);
10336                 return rc;
10337         }
10338
10339         pci_set_drvdata(pdev, dev);
10340
10341         rc = bnx2x_init_bp(bp);
10342         if (rc)
10343                 goto init_one_exit;
10344
10345         rc = register_netdev(dev);
10346         if (rc) {
10347                 dev_err(&pdev->dev, "Cannot register net device\n");
10348                 goto init_one_exit;
10349         }
10350
10351         bp->common.name = board_info[ent->driver_data].name;
10352         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10353                " IRQ %d, ", dev->name, bp->common.name,
10354                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10355                bnx2x_get_pcie_width(bp),
10356                (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10357                dev->base_addr, bp->pdev->irq);
10358         printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
10359         return 0;
10360
10361 init_one_exit:
10362         if (bp->regview)
10363                 iounmap(bp->regview);
10364
10365         if (bp->doorbells)
10366                 iounmap(bp->doorbells);
10367
10368         free_netdev(dev);
10369
10370         if (atomic_read(&pdev->enable_cnt) == 1)
10371                 pci_release_regions(pdev);
10372
10373         pci_disable_device(pdev);
10374         pci_set_drvdata(pdev, NULL);
10375
10376         return rc;
10377 }
10378
10379 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10380 {
10381         struct net_device *dev = pci_get_drvdata(pdev);
10382         struct bnx2x *bp;
10383
10384         if (!dev) {
10385                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10386                 return;
10387         }
10388         bp = netdev_priv(dev);
10389
10390         unregister_netdev(dev);
10391
10392         if (bp->regview)
10393                 iounmap(bp->regview);
10394
10395         if (bp->doorbells)
10396                 iounmap(bp->doorbells);
10397
10398         free_netdev(dev);
10399
10400         if (atomic_read(&pdev->enable_cnt) == 1)
10401                 pci_release_regions(pdev);
10402
10403         pci_disable_device(pdev);
10404         pci_set_drvdata(pdev, NULL);
10405 }
10406
10407 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10408 {
10409         struct net_device *dev = pci_get_drvdata(pdev);
10410         struct bnx2x *bp;
10411
10412         if (!dev) {
10413                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10414                 return -ENODEV;
10415         }
10416         bp = netdev_priv(dev);
10417
10418         rtnl_lock();
10419
10420         pci_save_state(pdev);
10421
10422         if (!netif_running(dev)) {
10423                 rtnl_unlock();
10424                 return 0;
10425         }
10426
10427         netif_device_detach(dev);
10428
10429         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10430
10431         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10432
10433         rtnl_unlock();
10434
10435         return 0;
10436 }
10437
10438 static int bnx2x_resume(struct pci_dev *pdev)
10439 {
10440         struct net_device *dev = pci_get_drvdata(pdev);
10441         struct bnx2x *bp;
10442         int rc;
10443
10444         if (!dev) {
10445                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10446                 return -ENODEV;
10447         }
10448         bp = netdev_priv(dev);
10449
10450         rtnl_lock();
10451
10452         pci_restore_state(pdev);
10453
10454         if (!netif_running(dev)) {
10455                 rtnl_unlock();
10456                 return 0;
10457         }
10458
10459         bnx2x_set_power_state(bp, PCI_D0);
10460         netif_device_attach(dev);
10461
10462         rc = bnx2x_nic_load(bp, LOAD_OPEN);
10463
10464         rtnl_unlock();
10465
10466         return rc;
10467 }
10468
10469 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10470 {
10471         int i;
10472
10473         bp->state = BNX2X_STATE_ERROR;
10474
10475         bp->rx_mode = BNX2X_RX_MODE_NONE;
10476
10477         bnx2x_netif_stop(bp, 0);
10478
10479         del_timer_sync(&bp->timer);
10480         bp->stats_state = STATS_STATE_DISABLED;
10481         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10482
10483         /* Release IRQs */
10484         bnx2x_free_irq(bp);
10485
10486         if (CHIP_IS_E1(bp)) {
10487                 struct mac_configuration_cmd *config =
10488                                                 bnx2x_sp(bp, mcast_config);
10489
10490                 for (i = 0; i < config->hdr.length_6b; i++)
10491                         CAM_INVALIDATE(config->config_table[i]);
10492         }
10493
10494         /* Free SKBs, SGEs, TPA pool and driver internals */
10495         bnx2x_free_skbs(bp);
10496         for_each_queue(bp, i)
10497                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10498         for_each_queue(bp, i)
10499                 netif_napi_del(&bnx2x_fp(bp, i, napi));
10500         bnx2x_free_mem(bp);
10501
10502         bp->state = BNX2X_STATE_CLOSED;
10503
10504         netif_carrier_off(bp->dev);
10505
10506         return 0;
10507 }
10508
10509 static void bnx2x_eeh_recover(struct bnx2x *bp)
10510 {
10511         u32 val;
10512
10513         mutex_init(&bp->port.phy_mutex);
10514
10515         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10516         bp->link_params.shmem_base = bp->common.shmem_base;
10517         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10518
10519         if (!bp->common.shmem_base ||
10520             (bp->common.shmem_base < 0xA0000) ||
10521             (bp->common.shmem_base >= 0xC0000)) {
10522                 BNX2X_DEV_INFO("MCP not active\n");
10523                 bp->flags |= NO_MCP_FLAG;
10524                 return;
10525         }
10526
10527         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10528         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10529                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10530                 BNX2X_ERR("BAD MCP validity signature\n");
10531
10532         if (!BP_NOMCP(bp)) {
10533                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10534                               & DRV_MSG_SEQ_NUMBER_MASK);
10535                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10536         }
10537 }
10538
10539 /**
10540  * bnx2x_io_error_detected - called when PCI error is detected
10541  * @pdev: Pointer to PCI device
10542  * @state: The current pci connection state
10543  *
10544  * This function is called after a PCI bus error affecting
10545  * this device has been detected.
10546  */
10547 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10548                                                 pci_channel_state_t state)
10549 {
10550         struct net_device *dev = pci_get_drvdata(pdev);
10551         struct bnx2x *bp = netdev_priv(dev);
10552
10553         rtnl_lock();
10554
10555         netif_device_detach(dev);
10556
10557         if (netif_running(dev))
10558                 bnx2x_eeh_nic_unload(bp);
10559
10560         pci_disable_device(pdev);
10561
10562         rtnl_unlock();
10563
10564         /* Request a slot reset */
10565         return PCI_ERS_RESULT_NEED_RESET;
10566 }
10567
10568 /**
10569  * bnx2x_io_slot_reset - called after the PCI bus has been reset
10570  * @pdev: Pointer to PCI device
10571  *
10572  * Restart the card from scratch, as if from a cold-boot.
10573  */
10574 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10575 {
10576         struct net_device *dev = pci_get_drvdata(pdev);
10577         struct bnx2x *bp = netdev_priv(dev);
10578
10579         rtnl_lock();
10580
10581         if (pci_enable_device(pdev)) {
10582                 dev_err(&pdev->dev,
10583                         "Cannot re-enable PCI device after reset\n");
10584                 rtnl_unlock();
10585                 return PCI_ERS_RESULT_DISCONNECT;
10586         }
10587
10588         pci_set_master(pdev);
10589         pci_restore_state(pdev);
10590
10591         if (netif_running(dev))
10592                 bnx2x_set_power_state(bp, PCI_D0);
10593
10594         rtnl_unlock();
10595
10596         return PCI_ERS_RESULT_RECOVERED;
10597 }
10598
10599 /**
10600  * bnx2x_io_resume - called when traffic can start flowing again
10601  * @pdev: Pointer to PCI device
10602  *
10603  * This callback is called when the error recovery driver tells us that
10604  * its OK to resume normal operation.
10605  */
10606 static void bnx2x_io_resume(struct pci_dev *pdev)
10607 {
10608         struct net_device *dev = pci_get_drvdata(pdev);
10609         struct bnx2x *bp = netdev_priv(dev);
10610
10611         rtnl_lock();
10612
10613         bnx2x_eeh_recover(bp);
10614
10615         if (netif_running(dev))
10616                 bnx2x_nic_load(bp, LOAD_NORMAL);
10617
10618         netif_device_attach(dev);
10619
10620         rtnl_unlock();
10621 }
10622
10623 static struct pci_error_handlers bnx2x_err_handler = {
10624         .error_detected = bnx2x_io_error_detected,
10625         .slot_reset = bnx2x_io_slot_reset,
10626         .resume = bnx2x_io_resume,
10627 };
10628
10629 static struct pci_driver bnx2x_pci_driver = {
10630         .name        = DRV_MODULE_NAME,
10631         .id_table    = bnx2x_pci_tbl,
10632         .probe       = bnx2x_init_one,
10633         .remove      = __devexit_p(bnx2x_remove_one),
10634         .suspend     = bnx2x_suspend,
10635         .resume      = bnx2x_resume,
10636         .err_handler = &bnx2x_err_handler,
10637 };
10638
10639 static int __init bnx2x_init(void)
10640 {
10641         bnx2x_wq = create_singlethread_workqueue("bnx2x");
10642         if (bnx2x_wq == NULL) {
10643                 printk(KERN_ERR PFX "Cannot create workqueue\n");
10644                 return -ENOMEM;
10645         }
10646
10647         return pci_register_driver(&bnx2x_pci_driver);
10648 }
10649
10650 static void __exit bnx2x_cleanup(void)
10651 {
10652         pci_unregister_driver(&bnx2x_pci_driver);
10653
10654         destroy_workqueue(bnx2x_wq);
10655 }
10656
10657 module_init(bnx2x_init);
10658 module_exit(bnx2x_cleanup);
10659