3c509: convert to net_device_ops
[linux-2.6] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53 #include "bnx2x_reg.h"
54 #include "bnx2x_fw_defs.h"
55 #include "bnx2x_hsi.h"
56 #include "bnx2x_link.h"
57 #include "bnx2x.h"
58 #include "bnx2x_init.h"
59
60 #define DRV_MODULE_VERSION      "1.45.24"
61 #define DRV_MODULE_RELDATE      "2009/01/14"
62 #define BNX2X_BC_VER            0x040200
63
64 /* Time in jiffies before concluding the transmitter is hung */
65 #define TX_TIMEOUT              (5*HZ)
66
67 static char version[] __devinitdata =
68         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
69         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
71 MODULE_AUTHOR("Eliezer Tamir");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
75
76 static int disable_tpa;
77 static int use_inta;
78 static int poll;
79 static int debug;
80 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
81 static int use_multi;
82
83 module_param(disable_tpa, int, 0);
84 module_param(use_inta, int, 0);
85 module_param(poll, int, 0);
86 module_param(debug, int, 0);
87 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
88 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
89 MODULE_PARM_DESC(poll, "use polling (for debug)");
90 MODULE_PARM_DESC(debug, "default debug msglevel");
91
92 #ifdef BNX2X_MULTI
93 module_param(use_multi, int, 0);
94 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
95 #endif
96 static struct workqueue_struct *bnx2x_wq;
97
98 enum bnx2x_board_type {
99         BCM57710 = 0,
100         BCM57711 = 1,
101         BCM57711E = 2,
102 };
103
104 /* indexed by board_type, above */
105 static struct {
106         char *name;
107 } board_info[] __devinitdata = {
108         { "Broadcom NetXtreme II BCM57710 XGb" },
109         { "Broadcom NetXtreme II BCM57711 XGb" },
110         { "Broadcom NetXtreme II BCM57711E XGb" }
111 };
112
113
114 static const struct pci_device_id bnx2x_pci_tbl[] = {
115         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
117         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
121         { 0 }
122 };
123
124 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
125
126 /****************************************************************************
127 * General service functions
128 ****************************************************************************/
129
130 /* used only at init
131  * locking is done by mcp
132  */
133 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
134 {
135         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138                                PCICFG_VENDOR_ID_OFFSET);
139 }
140
141 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
142 {
143         u32 val;
144
145         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148                                PCICFG_VENDOR_ID_OFFSET);
149
150         return val;
151 }
152
153 static const u32 dmae_reg_go_c[] = {
154         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
158 };
159
160 /* copy command into DMAE command memory and set DMAE command go */
161 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
162                             int idx)
163 {
164         u32 cmd_offset;
165         int i;
166
167         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
170
171                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
173         }
174         REG_WR(bp, dmae_reg_go_c[idx], 1);
175 }
176
177 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
178                       u32 len32)
179 {
180         struct dmae_command *dmae = &bp->init_dmae;
181         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
182         int cnt = 200;
183
184         if (!bp->dmae_ready) {
185                 u32 *data = bnx2x_sp(bp, wb_data[0]);
186
187                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
188                    "  using indirect\n", dst_addr, len32);
189                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
190                 return;
191         }
192
193         mutex_lock(&bp->dmae_mutex);
194
195         memset(dmae, 0, sizeof(struct dmae_command));
196
197         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
200 #ifdef __BIG_ENDIAN
201                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
202 #else
203                         DMAE_CMD_ENDIANITY_DW_SWAP |
204 #endif
205                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
207         dmae->src_addr_lo = U64_LO(dma_addr);
208         dmae->src_addr_hi = U64_HI(dma_addr);
209         dmae->dst_addr_lo = dst_addr >> 2;
210         dmae->dst_addr_hi = 0;
211         dmae->len = len32;
212         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
214         dmae->comp_val = DMAE_COMP_VAL;
215
216         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
217            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
218                     "dst_addr [%x:%08x (%08x)]\n"
219            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
220            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
223         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
224            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
226
227         *wb_comp = 0;
228
229         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
230
231         udelay(5);
232
233         while (*wb_comp != DMAE_COMP_VAL) {
234                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
235
236                 if (!cnt) {
237                         BNX2X_ERR("dmae timeout!\n");
238                         break;
239                 }
240                 cnt--;
241                 /* adjust delay for emulation/FPGA */
242                 if (CHIP_REV_IS_SLOW(bp))
243                         msleep(100);
244                 else
245                         udelay(5);
246         }
247
248         mutex_unlock(&bp->dmae_mutex);
249 }
250
251 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
252 {
253         struct dmae_command *dmae = &bp->init_dmae;
254         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
255         int cnt = 200;
256
257         if (!bp->dmae_ready) {
258                 u32 *data = bnx2x_sp(bp, wb_data[0]);
259                 int i;
260
261                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
262                    "  using indirect\n", src_addr, len32);
263                 for (i = 0; i < len32; i++)
264                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
265                 return;
266         }
267
268         mutex_lock(&bp->dmae_mutex);
269
270         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271         memset(dmae, 0, sizeof(struct dmae_command));
272
273         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
276 #ifdef __BIG_ENDIAN
277                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
278 #else
279                         DMAE_CMD_ENDIANITY_DW_SWAP |
280 #endif
281                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
283         dmae->src_addr_lo = src_addr >> 2;
284         dmae->src_addr_hi = 0;
285         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
287         dmae->len = len32;
288         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
290         dmae->comp_val = DMAE_COMP_VAL;
291
292         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
293            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
294                     "dst_addr [%x:%08x (%08x)]\n"
295            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
296            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
299
300         *wb_comp = 0;
301
302         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
303
304         udelay(5);
305
306         while (*wb_comp != DMAE_COMP_VAL) {
307
308                 if (!cnt) {
309                         BNX2X_ERR("dmae timeout!\n");
310                         break;
311                 }
312                 cnt--;
313                 /* adjust delay for emulation/FPGA */
314                 if (CHIP_REV_IS_SLOW(bp))
315                         msleep(100);
316                 else
317                         udelay(5);
318         }
319         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
320            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
322
323         mutex_unlock(&bp->dmae_mutex);
324 }
325
326 /* used only for slowpath so not inlined */
327 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
328 {
329         u32 wb_write[2];
330
331         wb_write[0] = val_hi;
332         wb_write[1] = val_lo;
333         REG_WR_DMAE(bp, reg, wb_write, 2);
334 }
335
336 #ifdef USE_WB_RD
337 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
338 {
339         u32 wb_data[2];
340
341         REG_RD_DMAE(bp, reg, wb_data, 2);
342
343         return HILO_U64(wb_data[0], wb_data[1]);
344 }
345 #endif
346
347 static int bnx2x_mc_assert(struct bnx2x *bp)
348 {
349         char last_idx;
350         int i, rc = 0;
351         u32 row0, row1, row2, row3;
352
353         /* XSTORM */
354         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
356         if (last_idx)
357                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
358
359         /* print the asserts */
360         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
361
362                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363                               XSTORM_ASSERT_LIST_OFFSET(i));
364                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
370
371                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373                                   " 0x%08x 0x%08x 0x%08x\n",
374                                   i, row3, row2, row1, row0);
375                         rc++;
376                 } else {
377                         break;
378                 }
379         }
380
381         /* TSTORM */
382         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
384         if (last_idx)
385                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
386
387         /* print the asserts */
388         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
389
390                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391                               TSTORM_ASSERT_LIST_OFFSET(i));
392                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
398
399                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401                                   " 0x%08x 0x%08x 0x%08x\n",
402                                   i, row3, row2, row1, row0);
403                         rc++;
404                 } else {
405                         break;
406                 }
407         }
408
409         /* CSTORM */
410         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
412         if (last_idx)
413                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
414
415         /* print the asserts */
416         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
417
418                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419                               CSTORM_ASSERT_LIST_OFFSET(i));
420                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
426
427                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429                                   " 0x%08x 0x%08x 0x%08x\n",
430                                   i, row3, row2, row1, row0);
431                         rc++;
432                 } else {
433                         break;
434                 }
435         }
436
437         /* USTORM */
438         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439                            USTORM_ASSERT_LIST_INDEX_OFFSET);
440         if (last_idx)
441                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
442
443         /* print the asserts */
444         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
445
446                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447                               USTORM_ASSERT_LIST_OFFSET(i));
448                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
450                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
452                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
454
455                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457                                   " 0x%08x 0x%08x 0x%08x\n",
458                                   i, row3, row2, row1, row0);
459                         rc++;
460                 } else {
461                         break;
462                 }
463         }
464
465         return rc;
466 }
467
468 static void bnx2x_fw_dump(struct bnx2x *bp)
469 {
470         u32 mark, offset;
471         u32 data[9];
472         int word;
473
474         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
475         mark = ((mark + 0x3) & ~0x3);
476         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
477
478         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479                 for (word = 0; word < 8; word++)
480                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
481                                                   offset + 4*word));
482                 data[8] = 0x0;
483                 printk(KERN_CONT "%s", (char *)data);
484         }
485         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486                 for (word = 0; word < 8; word++)
487                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
488                                                   offset + 4*word));
489                 data[8] = 0x0;
490                 printk(KERN_CONT "%s", (char *)data);
491         }
492         printk("\n" KERN_ERR PFX "end of fw dump\n");
493 }
494
495 static void bnx2x_panic_dump(struct bnx2x *bp)
496 {
497         int i;
498         u16 j, start, end;
499
500         bp->stats_state = STATS_STATE_DISABLED;
501         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
502
503         BNX2X_ERR("begin crash dump -----------------\n");
504
505         for_each_queue(bp, i) {
506                 struct bnx2x_fastpath *fp = &bp->fp[i];
507                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
508
509                 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
510                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
511                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
512                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
513                 BNX2X_ERR("          rx_bd_prod(%x)  rx_bd_cons(%x)"
514                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
515                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
516                           fp->rx_bd_prod, fp->rx_bd_cons,
517                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519                 BNX2X_ERR("          rx_sge_prod(%x)  last_max_sge(%x)"
520                           "  fp_c_idx(%x)  *sb_c_idx(%x)  fp_u_idx(%x)"
521                           "  *sb_u_idx(%x)  bd data(%x,%x)\n",
522                           fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523                           fp->status_blk->c_status_block.status_block_index,
524                           fp->fp_u_idx,
525                           fp->status_blk->u_status_block.status_block_index,
526                           hw_prods->packets_prod, hw_prods->bds_prod);
527
528                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530                 for (j = start; j < end; j++) {
531                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
532
533                         BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534                                   sw_bd->skb, sw_bd->first_bd);
535                 }
536
537                 start = TX_BD(fp->tx_bd_cons - 10);
538                 end = TX_BD(fp->tx_bd_cons + 254);
539                 for (j = start; j < end; j++) {
540                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
541
542                         BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543                                   j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
544                 }
545
546                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548                 for (j = start; j < end; j++) {
549                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
551
552                         BNX2X_ERR("rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
553                                   j, rx_bd[1], rx_bd[0], sw_bd->skb);
554                 }
555
556                 start = RX_SGE(fp->rx_sge_prod);
557                 end = RX_SGE(fp->last_max_sge);
558                 for (j = start; j < end; j++) {
559                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
561
562                         BNX2X_ERR("rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
563                                   j, rx_sge[1], rx_sge[0], sw_page->page);
564                 }
565
566                 start = RCQ_BD(fp->rx_comp_cons - 10);
567                 end = RCQ_BD(fp->rx_comp_cons + 503);
568                 for (j = start; j < end; j++) {
569                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
570
571                         BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572                                   j, cqe[0], cqe[1], cqe[2], cqe[3]);
573                 }
574         }
575
576         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
577                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
578                   "  spq_prod_idx(%u)\n",
579                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
580                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
581
582         bnx2x_fw_dump(bp);
583         bnx2x_mc_assert(bp);
584         BNX2X_ERR("end crash dump -----------------\n");
585 }
586
587 static void bnx2x_int_enable(struct bnx2x *bp)
588 {
589         int port = BP_PORT(bp);
590         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591         u32 val = REG_RD(bp, addr);
592         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
593
594         if (msix) {
595                 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
596                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
597                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
598         } else {
599                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
600                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
601                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
602                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
603
604                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
605                    val, port, addr, msix);
606
607                 REG_WR(bp, addr, val);
608
609                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
610         }
611
612         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
613            val, port, addr, msix);
614
615         REG_WR(bp, addr, val);
616
617         if (CHIP_IS_E1H(bp)) {
618                 /* init leading/trailing edge */
619                 if (IS_E1HMF(bp)) {
620                         val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
621                         if (bp->port.pmf)
622                                 /* enable nig attention */
623                                 val |= 0x0100;
624                 } else
625                         val = 0xffff;
626
627                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
628                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
629         }
630 }
631
632 static void bnx2x_int_disable(struct bnx2x *bp)
633 {
634         int port = BP_PORT(bp);
635         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
636         u32 val = REG_RD(bp, addr);
637
638         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
639                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
640                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
641                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
642
643         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
644            val, port, addr);
645
646         REG_WR(bp, addr, val);
647         if (REG_RD(bp, addr) != val)
648                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
649 }
650
651 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
652 {
653         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
654         int i;
655
656         /* disable interrupt handling */
657         atomic_inc(&bp->intr_sem);
658         if (disable_hw)
659                 /* prevent the HW from sending interrupts */
660                 bnx2x_int_disable(bp);
661
662         /* make sure all ISRs are done */
663         if (msix) {
664                 for_each_queue(bp, i)
665                         synchronize_irq(bp->msix_table[i].vector);
666
667                 /* one more for the Slow Path IRQ */
668                 synchronize_irq(bp->msix_table[i].vector);
669         } else
670                 synchronize_irq(bp->pdev->irq);
671
672         /* make sure sp_task is not running */
673         cancel_delayed_work(&bp->sp_task);
674         flush_workqueue(bnx2x_wq);
675 }
676
677 /* fast path */
678
679 /*
680  * General service functions
681  */
682
683 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
684                                 u8 storm, u16 index, u8 op, u8 update)
685 {
686         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687                        COMMAND_REG_INT_ACK);
688         struct igu_ack_register igu_ack;
689
690         igu_ack.status_block_index = index;
691         igu_ack.sb_id_and_flags =
692                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
693                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
694                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
696
697         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698            (*(u32 *)&igu_ack), hc_addr);
699         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
700 }
701
702 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
703 {
704         struct host_status_block *fpsb = fp->status_blk;
705         u16 rc = 0;
706
707         barrier(); /* status block is written to by the chip */
708         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
709                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
710                 rc |= 1;
711         }
712         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
713                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
714                 rc |= 2;
715         }
716         return rc;
717 }
718
719 static u16 bnx2x_ack_int(struct bnx2x *bp)
720 {
721         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722                        COMMAND_REG_SIMD_MASK);
723         u32 result = REG_RD(bp, hc_addr);
724
725         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
726            result, hc_addr);
727
728         return result;
729 }
730
731
732 /*
733  * fast path service functions
734  */
735
736 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
737 {
738         u16 tx_cons_sb;
739
740         /* Tell compiler that status block fields can change */
741         barrier();
742         tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
743         return ((fp->tx_pkt_prod != tx_cons_sb) ||
744                 (fp->tx_pkt_prod != fp->tx_pkt_cons));
745 }
746
747 /* free skb in the packet ring at pos idx
748  * return idx of last bd freed
749  */
750 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
751                              u16 idx)
752 {
753         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
754         struct eth_tx_bd *tx_bd;
755         struct sk_buff *skb = tx_buf->skb;
756         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
757         int nbd;
758
759         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
760            idx, tx_buf, skb);
761
762         /* unmap first bd */
763         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
764         tx_bd = &fp->tx_desc_ring[bd_idx];
765         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
766                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
767
768         nbd = le16_to_cpu(tx_bd->nbd) - 1;
769         new_cons = nbd + tx_buf->first_bd;
770 #ifdef BNX2X_STOP_ON_ERROR
771         if (nbd > (MAX_SKB_FRAGS + 2)) {
772                 BNX2X_ERR("BAD nbd!\n");
773                 bnx2x_panic();
774         }
775 #endif
776
777         /* Skip a parse bd and the TSO split header bd
778            since they have no mapping */
779         if (nbd)
780                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
781
782         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
783                                            ETH_TX_BD_FLAGS_TCP_CSUM |
784                                            ETH_TX_BD_FLAGS_SW_LSO)) {
785                 if (--nbd)
786                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
787                 tx_bd = &fp->tx_desc_ring[bd_idx];
788                 /* is this a TSO split header bd? */
789                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
790                         if (--nbd)
791                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
792                 }
793         }
794
795         /* now free frags */
796         while (nbd > 0) {
797
798                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
799                 tx_bd = &fp->tx_desc_ring[bd_idx];
800                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
801                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
802                 if (--nbd)
803                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
804         }
805
806         /* release skb */
807         WARN_ON(!skb);
808         dev_kfree_skb(skb);
809         tx_buf->first_bd = 0;
810         tx_buf->skb = NULL;
811
812         return new_cons;
813 }
814
815 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
816 {
817         s16 used;
818         u16 prod;
819         u16 cons;
820
821         barrier(); /* Tell compiler that prod and cons can change */
822         prod = fp->tx_bd_prod;
823         cons = fp->tx_bd_cons;
824
825         /* NUM_TX_RINGS = number of "next-page" entries
826            It will be used as a threshold */
827         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
828
829 #ifdef BNX2X_STOP_ON_ERROR
830         WARN_ON(used < 0);
831         WARN_ON(used > fp->bp->tx_ring_size);
832         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
833 #endif
834
835         return (s16)(fp->bp->tx_ring_size) - used;
836 }
837
838 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
839 {
840         struct bnx2x *bp = fp->bp;
841         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
842         int done = 0;
843
844 #ifdef BNX2X_STOP_ON_ERROR
845         if (unlikely(bp->panic))
846                 return;
847 #endif
848
849         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
850         sw_cons = fp->tx_pkt_cons;
851
852         while (sw_cons != hw_cons) {
853                 u16 pkt_cons;
854
855                 pkt_cons = TX_BD(sw_cons);
856
857                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
858
859                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
860                    hw_cons, sw_cons, pkt_cons);
861
862 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
863                         rmb();
864                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
865                 }
866 */
867                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
868                 sw_cons++;
869                 done++;
870
871                 if (done == work)
872                         break;
873         }
874
875         fp->tx_pkt_cons = sw_cons;
876         fp->tx_bd_cons = bd_cons;
877
878         /* Need to make the tx_cons update visible to start_xmit()
879          * before checking for netif_queue_stopped().  Without the
880          * memory barrier, there is a small possibility that start_xmit()
881          * will miss it and cause the queue to be stopped forever.
882          */
883         smp_mb();
884
885         /* TBD need a thresh? */
886         if (unlikely(netif_queue_stopped(bp->dev))) {
887
888                 netif_tx_lock(bp->dev);
889
890                 if (netif_queue_stopped(bp->dev) &&
891                     (bp->state == BNX2X_STATE_OPEN) &&
892                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
893                         netif_wake_queue(bp->dev);
894
895                 netif_tx_unlock(bp->dev);
896         }
897 }
898
899
900 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
901                            union eth_rx_cqe *rr_cqe)
902 {
903         struct bnx2x *bp = fp->bp;
904         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
905         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
906
907         DP(BNX2X_MSG_SP,
908            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
909            FP_IDX(fp), cid, command, bp->state,
910            rr_cqe->ramrod_cqe.ramrod_type);
911
912         bp->spq_left++;
913
914         if (FP_IDX(fp)) {
915                 switch (command | fp->state) {
916                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
917                                                 BNX2X_FP_STATE_OPENING):
918                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
919                            cid);
920                         fp->state = BNX2X_FP_STATE_OPEN;
921                         break;
922
923                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
924                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
925                            cid);
926                         fp->state = BNX2X_FP_STATE_HALTED;
927                         break;
928
929                 default:
930                         BNX2X_ERR("unexpected MC reply (%d)  "
931                                   "fp->state is %x\n", command, fp->state);
932                         break;
933                 }
934                 mb(); /* force bnx2x_wait_ramrod() to see the change */
935                 return;
936         }
937
938         switch (command | bp->state) {
939         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
940                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
941                 bp->state = BNX2X_STATE_OPEN;
942                 break;
943
944         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
945                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
946                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
947                 fp->state = BNX2X_FP_STATE_HALTED;
948                 break;
949
950         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
951                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
952                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
953                 break;
954
955
956         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
957         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
958                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
959                 bp->set_mac_pending = 0;
960                 break;
961
962         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
963                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
964                 break;
965
966         default:
967                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
968                           command, bp->state);
969                 break;
970         }
971         mb(); /* force bnx2x_wait_ramrod() to see the change */
972 }
973
974 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
975                                      struct bnx2x_fastpath *fp, u16 index)
976 {
977         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
978         struct page *page = sw_buf->page;
979         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
980
981         /* Skip "next page" elements */
982         if (!page)
983                 return;
984
985         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
986                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
987         __free_pages(page, PAGES_PER_SGE_SHIFT);
988
989         sw_buf->page = NULL;
990         sge->addr_hi = 0;
991         sge->addr_lo = 0;
992 }
993
994 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
995                                            struct bnx2x_fastpath *fp, int last)
996 {
997         int i;
998
999         for (i = 0; i < last; i++)
1000                 bnx2x_free_rx_sge(bp, fp, i);
1001 }
1002
1003 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1004                                      struct bnx2x_fastpath *fp, u16 index)
1005 {
1006         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1007         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1008         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1009         dma_addr_t mapping;
1010
1011         if (unlikely(page == NULL))
1012                 return -ENOMEM;
1013
1014         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1015                                PCI_DMA_FROMDEVICE);
1016         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1017                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1018                 return -ENOMEM;
1019         }
1020
1021         sw_buf->page = page;
1022         pci_unmap_addr_set(sw_buf, mapping, mapping);
1023
1024         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1025         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1026
1027         return 0;
1028 }
1029
1030 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1031                                      struct bnx2x_fastpath *fp, u16 index)
1032 {
1033         struct sk_buff *skb;
1034         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1035         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1036         dma_addr_t mapping;
1037
1038         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1039         if (unlikely(skb == NULL))
1040                 return -ENOMEM;
1041
1042         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1043                                  PCI_DMA_FROMDEVICE);
1044         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1045                 dev_kfree_skb(skb);
1046                 return -ENOMEM;
1047         }
1048
1049         rx_buf->skb = skb;
1050         pci_unmap_addr_set(rx_buf, mapping, mapping);
1051
1052         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1053         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1054
1055         return 0;
1056 }
1057
1058 /* note that we are not allocating a new skb,
1059  * we are just moving one from cons to prod
1060  * we are not creating a new mapping,
1061  * so there is no need to check for dma_mapping_error().
1062  */
1063 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1064                                struct sk_buff *skb, u16 cons, u16 prod)
1065 {
1066         struct bnx2x *bp = fp->bp;
1067         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1068         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1069         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1070         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1071
1072         pci_dma_sync_single_for_device(bp->pdev,
1073                                        pci_unmap_addr(cons_rx_buf, mapping),
1074                                        bp->rx_offset + RX_COPY_THRESH,
1075                                        PCI_DMA_FROMDEVICE);
1076
1077         prod_rx_buf->skb = cons_rx_buf->skb;
1078         pci_unmap_addr_set(prod_rx_buf, mapping,
1079                            pci_unmap_addr(cons_rx_buf, mapping));
1080         *prod_bd = *cons_bd;
1081 }
1082
1083 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1084                                              u16 idx)
1085 {
1086         u16 last_max = fp->last_max_sge;
1087
1088         if (SUB_S16(idx, last_max) > 0)
1089                 fp->last_max_sge = idx;
1090 }
1091
1092 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1093 {
1094         int i, j;
1095
1096         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1097                 int idx = RX_SGE_CNT * i - 1;
1098
1099                 for (j = 0; j < 2; j++) {
1100                         SGE_MASK_CLEAR_BIT(fp, idx);
1101                         idx--;
1102                 }
1103         }
1104 }
1105
1106 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1107                                   struct eth_fast_path_rx_cqe *fp_cqe)
1108 {
1109         struct bnx2x *bp = fp->bp;
1110         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1111                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1112                       SGE_PAGE_SHIFT;
1113         u16 last_max, last_elem, first_elem;
1114         u16 delta = 0;
1115         u16 i;
1116
1117         if (!sge_len)
1118                 return;
1119
1120         /* First mark all used pages */
1121         for (i = 0; i < sge_len; i++)
1122                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1123
1124         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1125            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1126
1127         /* Here we assume that the last SGE index is the biggest */
1128         prefetch((void *)(fp->sge_mask));
1129         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1130
1131         last_max = RX_SGE(fp->last_max_sge);
1132         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1133         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1134
1135         /* If ring is not full */
1136         if (last_elem + 1 != first_elem)
1137                 last_elem++;
1138
1139         /* Now update the prod */
1140         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1141                 if (likely(fp->sge_mask[i]))
1142                         break;
1143
1144                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1145                 delta += RX_SGE_MASK_ELEM_SZ;
1146         }
1147
1148         if (delta > 0) {
1149                 fp->rx_sge_prod += delta;
1150                 /* clear page-end entries */
1151                 bnx2x_clear_sge_mask_next_elems(fp);
1152         }
1153
1154         DP(NETIF_MSG_RX_STATUS,
1155            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1156            fp->last_max_sge, fp->rx_sge_prod);
1157 }
1158
1159 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1160 {
1161         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1162         memset(fp->sge_mask, 0xff,
1163                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1164
1165         /* Clear the two last indices in the page to 1:
1166            these are the indices that correspond to the "next" element,
1167            hence will never be indicated and should be removed from
1168            the calculations. */
1169         bnx2x_clear_sge_mask_next_elems(fp);
1170 }
1171
1172 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1173                             struct sk_buff *skb, u16 cons, u16 prod)
1174 {
1175         struct bnx2x *bp = fp->bp;
1176         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1177         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1178         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1179         dma_addr_t mapping;
1180
1181         /* move empty skb from pool to prod and map it */
1182         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1183         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1184                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1185         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1186
1187         /* move partial skb from cons to pool (don't unmap yet) */
1188         fp->tpa_pool[queue] = *cons_rx_buf;
1189
1190         /* mark bin state as start - print error if current state != stop */
1191         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1192                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1193
1194         fp->tpa_state[queue] = BNX2X_TPA_START;
1195
1196         /* point prod_bd to new skb */
1197         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1198         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1199
1200 #ifdef BNX2X_STOP_ON_ERROR
1201         fp->tpa_queue_used |= (1 << queue);
1202 #ifdef __powerpc64__
1203         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1204 #else
1205         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1206 #endif
1207            fp->tpa_queue_used);
1208 #endif
1209 }
1210
1211 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1212                                struct sk_buff *skb,
1213                                struct eth_fast_path_rx_cqe *fp_cqe,
1214                                u16 cqe_idx)
1215 {
1216         struct sw_rx_page *rx_pg, old_rx_pg;
1217         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1218         u32 i, frag_len, frag_size, pages;
1219         int err;
1220         int j;
1221
1222         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1223         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1224
1225         /* This is needed in order to enable forwarding support */
1226         if (frag_size)
1227                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1228                                                max(frag_size, (u32)len_on_bd));
1229
1230 #ifdef BNX2X_STOP_ON_ERROR
1231         if (pages >
1232             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1233                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1234                           pages, cqe_idx);
1235                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1236                           fp_cqe->pkt_len, len_on_bd);
1237                 bnx2x_panic();
1238                 return -EINVAL;
1239         }
1240 #endif
1241
1242         /* Run through the SGL and compose the fragmented skb */
1243         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1244                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1245
1246                 /* FW gives the indices of the SGE as if the ring is an array
1247                    (meaning that "next" element will consume 2 indices) */
1248                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1249                 rx_pg = &fp->rx_page_ring[sge_idx];
1250                 old_rx_pg = *rx_pg;
1251
1252                 /* If we fail to allocate a substitute page, we simply stop
1253                    where we are and drop the whole packet */
1254                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1255                 if (unlikely(err)) {
1256                         bp->eth_stats.rx_skb_alloc_failed++;
1257                         return err;
1258                 }
1259
1260                 /* Unmap the page as we r going to pass it to the stack */
1261                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1262                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1263
1264                 /* Add one frag and update the appropriate fields in the skb */
1265                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1266
1267                 skb->data_len += frag_len;
1268                 skb->truesize += frag_len;
1269                 skb->len += frag_len;
1270
1271                 frag_size -= frag_len;
1272         }
1273
1274         return 0;
1275 }
1276
1277 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1278                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1279                            u16 cqe_idx)
1280 {
1281         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1282         struct sk_buff *skb = rx_buf->skb;
1283         /* alloc new skb */
1284         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1285
1286         /* Unmap skb in the pool anyway, as we are going to change
1287            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1288            fails. */
1289         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1290                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1291
1292         if (likely(new_skb)) {
1293                 /* fix ip xsum and give it to the stack */
1294                 /* (no need to map the new skb) */
1295 #ifdef BCM_VLAN
1296                 int is_vlan_cqe =
1297                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1298                          PARSING_FLAGS_VLAN);
1299                 int is_not_hwaccel_vlan_cqe =
1300                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1301 #endif
1302
1303                 prefetch(skb);
1304                 prefetch(((char *)(skb)) + 128);
1305
1306 #ifdef BNX2X_STOP_ON_ERROR
1307                 if (pad + len > bp->rx_buf_size) {
1308                         BNX2X_ERR("skb_put is about to fail...  "
1309                                   "pad %d  len %d  rx_buf_size %d\n",
1310                                   pad, len, bp->rx_buf_size);
1311                         bnx2x_panic();
1312                         return;
1313                 }
1314 #endif
1315
1316                 skb_reserve(skb, pad);
1317                 skb_put(skb, len);
1318
1319                 skb->protocol = eth_type_trans(skb, bp->dev);
1320                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1321
1322                 {
1323                         struct iphdr *iph;
1324
1325                         iph = (struct iphdr *)skb->data;
1326 #ifdef BCM_VLAN
1327                         /* If there is no Rx VLAN offloading -
1328                            take VLAN tag into an account */
1329                         if (unlikely(is_not_hwaccel_vlan_cqe))
1330                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1331 #endif
1332                         iph->check = 0;
1333                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1334                 }
1335
1336                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1337                                          &cqe->fast_path_cqe, cqe_idx)) {
1338 #ifdef BCM_VLAN
1339                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1340                             (!is_not_hwaccel_vlan_cqe))
1341                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1342                                                 le16_to_cpu(cqe->fast_path_cqe.
1343                                                             vlan_tag));
1344                         else
1345 #endif
1346                                 netif_receive_skb(skb);
1347                 } else {
1348                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1349                            " - dropping packet!\n");
1350                         dev_kfree_skb(skb);
1351                 }
1352
1353
1354                 /* put new skb in bin */
1355                 fp->tpa_pool[queue].skb = new_skb;
1356
1357         } else {
1358                 /* else drop the packet and keep the buffer in the bin */
1359                 DP(NETIF_MSG_RX_STATUS,
1360                    "Failed to allocate new skb - dropping packet!\n");
1361                 bp->eth_stats.rx_skb_alloc_failed++;
1362         }
1363
1364         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1365 }
1366
1367 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1368                                         struct bnx2x_fastpath *fp,
1369                                         u16 bd_prod, u16 rx_comp_prod,
1370                                         u16 rx_sge_prod)
1371 {
1372         struct tstorm_eth_rx_producers rx_prods = {0};
1373         int i;
1374
1375         /* Update producers */
1376         rx_prods.bd_prod = bd_prod;
1377         rx_prods.cqe_prod = rx_comp_prod;
1378         rx_prods.sge_prod = rx_sge_prod;
1379
1380         /*
1381          * Make sure that the BD and SGE data is updated before updating the
1382          * producers since FW might read the BD/SGE right after the producer
1383          * is updated.
1384          * This is only applicable for weak-ordered memory model archs such
1385          * as IA-64. The following barrier is also mandatory since FW will
1386          * assumes BDs must have buffers.
1387          */
1388         wmb();
1389
1390         for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1391                 REG_WR(bp, BAR_TSTRORM_INTMEM +
1392                        TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1393                        ((u32 *)&rx_prods)[i]);
1394
1395         mmiowb(); /* keep prod updates ordered */
1396
1397         DP(NETIF_MSG_RX_STATUS,
1398            "Wrote: bd_prod %u  cqe_prod %u  sge_prod %u\n",
1399            bd_prod, rx_comp_prod, rx_sge_prod);
1400 }
1401
1402 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1403 {
1404         struct bnx2x *bp = fp->bp;
1405         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1406         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1407         int rx_pkt = 0;
1408
1409 #ifdef BNX2X_STOP_ON_ERROR
1410         if (unlikely(bp->panic))
1411                 return 0;
1412 #endif
1413
1414         /* CQ "next element" is of the size of the regular element,
1415            that's why it's ok here */
1416         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1417         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1418                 hw_comp_cons++;
1419
1420         bd_cons = fp->rx_bd_cons;
1421         bd_prod = fp->rx_bd_prod;
1422         bd_prod_fw = bd_prod;
1423         sw_comp_cons = fp->rx_comp_cons;
1424         sw_comp_prod = fp->rx_comp_prod;
1425
1426         /* Memory barrier necessary as speculative reads of the rx
1427          * buffer can be ahead of the index in the status block
1428          */
1429         rmb();
1430
1431         DP(NETIF_MSG_RX_STATUS,
1432            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1433            FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1434
1435         while (sw_comp_cons != hw_comp_cons) {
1436                 struct sw_rx_bd *rx_buf = NULL;
1437                 struct sk_buff *skb;
1438                 union eth_rx_cqe *cqe;
1439                 u8 cqe_fp_flags;
1440                 u16 len, pad;
1441
1442                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1443                 bd_prod = RX_BD(bd_prod);
1444                 bd_cons = RX_BD(bd_cons);
1445
1446                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1447                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1448
1449                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1450                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1451                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1452                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1453                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1454                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1455
1456                 /* is this a slowpath msg? */
1457                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1458                         bnx2x_sp_event(fp, cqe);
1459                         goto next_cqe;
1460
1461                 /* this is an rx packet */
1462                 } else {
1463                         rx_buf = &fp->rx_buf_ring[bd_cons];
1464                         skb = rx_buf->skb;
1465                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1466                         pad = cqe->fast_path_cqe.placement_offset;
1467
1468                         /* If CQE is marked both TPA_START and TPA_END
1469                            it is a non-TPA CQE */
1470                         if ((!fp->disable_tpa) &&
1471                             (TPA_TYPE(cqe_fp_flags) !=
1472                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1473                                 u16 queue = cqe->fast_path_cqe.queue_index;
1474
1475                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1476                                         DP(NETIF_MSG_RX_STATUS,
1477                                            "calling tpa_start on queue %d\n",
1478                                            queue);
1479
1480                                         bnx2x_tpa_start(fp, queue, skb,
1481                                                         bd_cons, bd_prod);
1482                                         goto next_rx;
1483                                 }
1484
1485                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1486                                         DP(NETIF_MSG_RX_STATUS,
1487                                            "calling tpa_stop on queue %d\n",
1488                                            queue);
1489
1490                                         if (!BNX2X_RX_SUM_FIX(cqe))
1491                                                 BNX2X_ERR("STOP on none TCP "
1492                                                           "data\n");
1493
1494                                         /* This is a size of the linear data
1495                                            on this skb */
1496                                         len = le16_to_cpu(cqe->fast_path_cqe.
1497                                                                 len_on_bd);
1498                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1499                                                     len, cqe, comp_ring_cons);
1500 #ifdef BNX2X_STOP_ON_ERROR
1501                                         if (bp->panic)
1502                                                 return -EINVAL;
1503 #endif
1504
1505                                         bnx2x_update_sge_prod(fp,
1506                                                         &cqe->fast_path_cqe);
1507                                         goto next_cqe;
1508                                 }
1509                         }
1510
1511                         pci_dma_sync_single_for_device(bp->pdev,
1512                                         pci_unmap_addr(rx_buf, mapping),
1513                                                        pad + RX_COPY_THRESH,
1514                                                        PCI_DMA_FROMDEVICE);
1515                         prefetch(skb);
1516                         prefetch(((char *)(skb)) + 128);
1517
1518                         /* is this an error packet? */
1519                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1520                                 DP(NETIF_MSG_RX_ERR,
1521                                    "ERROR  flags %x  rx packet %u\n",
1522                                    cqe_fp_flags, sw_comp_cons);
1523                                 bp->eth_stats.rx_err_discard_pkt++;
1524                                 goto reuse_rx;
1525                         }
1526
1527                         /* Since we don't have a jumbo ring
1528                          * copy small packets if mtu > 1500
1529                          */
1530                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1531                             (len <= RX_COPY_THRESH)) {
1532                                 struct sk_buff *new_skb;
1533
1534                                 new_skb = netdev_alloc_skb(bp->dev,
1535                                                            len + pad);
1536                                 if (new_skb == NULL) {
1537                                         DP(NETIF_MSG_RX_ERR,
1538                                            "ERROR  packet dropped "
1539                                            "because of alloc failure\n");
1540                                         bp->eth_stats.rx_skb_alloc_failed++;
1541                                         goto reuse_rx;
1542                                 }
1543
1544                                 /* aligned copy */
1545                                 skb_copy_from_linear_data_offset(skb, pad,
1546                                                     new_skb->data + pad, len);
1547                                 skb_reserve(new_skb, pad);
1548                                 skb_put(new_skb, len);
1549
1550                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1551
1552                                 skb = new_skb;
1553
1554                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1555                                 pci_unmap_single(bp->pdev,
1556                                         pci_unmap_addr(rx_buf, mapping),
1557                                                  bp->rx_buf_size,
1558                                                  PCI_DMA_FROMDEVICE);
1559                                 skb_reserve(skb, pad);
1560                                 skb_put(skb, len);
1561
1562                         } else {
1563                                 DP(NETIF_MSG_RX_ERR,
1564                                    "ERROR  packet dropped because "
1565                                    "of alloc failure\n");
1566                                 bp->eth_stats.rx_skb_alloc_failed++;
1567 reuse_rx:
1568                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1569                                 goto next_rx;
1570                         }
1571
1572                         skb->protocol = eth_type_trans(skb, bp->dev);
1573
1574                         skb->ip_summed = CHECKSUM_NONE;
1575                         if (bp->rx_csum) {
1576                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1577                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1578                                 else
1579                                         bp->eth_stats.hw_csum_err++;
1580                         }
1581                 }
1582
1583 #ifdef BCM_VLAN
1584                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1585                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1586                      PARSING_FLAGS_VLAN))
1587                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1588                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1589                 else
1590 #endif
1591                         netif_receive_skb(skb);
1592
1593
1594 next_rx:
1595                 rx_buf->skb = NULL;
1596
1597                 bd_cons = NEXT_RX_IDX(bd_cons);
1598                 bd_prod = NEXT_RX_IDX(bd_prod);
1599                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1600                 rx_pkt++;
1601 next_cqe:
1602                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1603                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1604
1605                 if (rx_pkt == budget)
1606                         break;
1607         } /* while */
1608
1609         fp->rx_bd_cons = bd_cons;
1610         fp->rx_bd_prod = bd_prod_fw;
1611         fp->rx_comp_cons = sw_comp_cons;
1612         fp->rx_comp_prod = sw_comp_prod;
1613
1614         /* Update producers */
1615         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1616                              fp->rx_sge_prod);
1617
1618         fp->rx_pkt += rx_pkt;
1619         fp->rx_calls++;
1620
1621         return rx_pkt;
1622 }
1623
1624 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1625 {
1626         struct bnx2x_fastpath *fp = fp_cookie;
1627         struct bnx2x *bp = fp->bp;
1628         int index = FP_IDX(fp);
1629
1630         /* Return here if interrupt is disabled */
1631         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1632                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1633                 return IRQ_HANDLED;
1634         }
1635
1636         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1637            index, FP_SB_ID(fp));
1638         bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1639
1640 #ifdef BNX2X_STOP_ON_ERROR
1641         if (unlikely(bp->panic))
1642                 return IRQ_HANDLED;
1643 #endif
1644
1645         prefetch(fp->rx_cons_sb);
1646         prefetch(fp->tx_cons_sb);
1647         prefetch(&fp->status_blk->c_status_block.status_block_index);
1648         prefetch(&fp->status_blk->u_status_block.status_block_index);
1649
1650         netif_rx_schedule(&bnx2x_fp(bp, index, napi));
1651
1652         return IRQ_HANDLED;
1653 }
1654
1655 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1656 {
1657         struct net_device *dev = dev_instance;
1658         struct bnx2x *bp = netdev_priv(dev);
1659         u16 status = bnx2x_ack_int(bp);
1660         u16 mask;
1661
1662         /* Return here if interrupt is shared and it's not for us */
1663         if (unlikely(status == 0)) {
1664                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1665                 return IRQ_NONE;
1666         }
1667         DP(NETIF_MSG_INTR, "got an interrupt  status %u\n", status);
1668
1669         /* Return here if interrupt is disabled */
1670         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1671                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1672                 return IRQ_HANDLED;
1673         }
1674
1675 #ifdef BNX2X_STOP_ON_ERROR
1676         if (unlikely(bp->panic))
1677                 return IRQ_HANDLED;
1678 #endif
1679
1680         mask = 0x2 << bp->fp[0].sb_id;
1681         if (status & mask) {
1682                 struct bnx2x_fastpath *fp = &bp->fp[0];
1683
1684                 prefetch(fp->rx_cons_sb);
1685                 prefetch(fp->tx_cons_sb);
1686                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1687                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1688
1689                 netif_rx_schedule(&bnx2x_fp(bp, 0, napi));
1690
1691                 status &= ~mask;
1692         }
1693
1694
1695         if (unlikely(status & 0x1)) {
1696                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1697
1698                 status &= ~0x1;
1699                 if (!status)
1700                         return IRQ_HANDLED;
1701         }
1702
1703         if (status)
1704                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1705                    status);
1706
1707         return IRQ_HANDLED;
1708 }
1709
1710 /* end of fast path */
1711
1712 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1713
1714 /* Link */
1715
1716 /*
1717  * General service functions
1718  */
1719
1720 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1721 {
1722         u32 lock_status;
1723         u32 resource_bit = (1 << resource);
1724         int func = BP_FUNC(bp);
1725         u32 hw_lock_control_reg;
1726         int cnt;
1727
1728         /* Validating that the resource is within range */
1729         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1730                 DP(NETIF_MSG_HW,
1731                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1732                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1733                 return -EINVAL;
1734         }
1735
1736         if (func <= 5) {
1737                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1738         } else {
1739                 hw_lock_control_reg =
1740                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1741         }
1742
1743         /* Validating that the resource is not already taken */
1744         lock_status = REG_RD(bp, hw_lock_control_reg);
1745         if (lock_status & resource_bit) {
1746                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1747                    lock_status, resource_bit);
1748                 return -EEXIST;
1749         }
1750
1751         /* Try for 5 second every 5ms */
1752         for (cnt = 0; cnt < 1000; cnt++) {
1753                 /* Try to acquire the lock */
1754                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1755                 lock_status = REG_RD(bp, hw_lock_control_reg);
1756                 if (lock_status & resource_bit)
1757                         return 0;
1758
1759                 msleep(5);
1760         }
1761         DP(NETIF_MSG_HW, "Timeout\n");
1762         return -EAGAIN;
1763 }
1764
1765 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1766 {
1767         u32 lock_status;
1768         u32 resource_bit = (1 << resource);
1769         int func = BP_FUNC(bp);
1770         u32 hw_lock_control_reg;
1771
1772         /* Validating that the resource is within range */
1773         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1774                 DP(NETIF_MSG_HW,
1775                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1776                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1777                 return -EINVAL;
1778         }
1779
1780         if (func <= 5) {
1781                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1782         } else {
1783                 hw_lock_control_reg =
1784                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1785         }
1786
1787         /* Validating that the resource is currently taken */
1788         lock_status = REG_RD(bp, hw_lock_control_reg);
1789         if (!(lock_status & resource_bit)) {
1790                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1791                    lock_status, resource_bit);
1792                 return -EFAULT;
1793         }
1794
1795         REG_WR(bp, hw_lock_control_reg, resource_bit);
1796         return 0;
1797 }
1798
1799 /* HW Lock for shared dual port PHYs */
1800 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1801 {
1802         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1803
1804         mutex_lock(&bp->port.phy_mutex);
1805
1806         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1807             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1808                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1809 }
1810
1811 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1812 {
1813         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1814
1815         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1816             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1817                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1818
1819         mutex_unlock(&bp->port.phy_mutex);
1820 }
1821
1822 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1823 {
1824         /* The GPIO should be swapped if swap register is set and active */
1825         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1826                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1827         int gpio_shift = gpio_num +
1828                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1829         u32 gpio_mask = (1 << gpio_shift);
1830         u32 gpio_reg;
1831
1832         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1833                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1834                 return -EINVAL;
1835         }
1836
1837         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1838         /* read GPIO and mask except the float bits */
1839         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1840
1841         switch (mode) {
1842         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1843                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1844                    gpio_num, gpio_shift);
1845                 /* clear FLOAT and set CLR */
1846                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1847                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1848                 break;
1849
1850         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1851                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1852                    gpio_num, gpio_shift);
1853                 /* clear FLOAT and set SET */
1854                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1855                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1856                 break;
1857
1858         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1859                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1860                    gpio_num, gpio_shift);
1861                 /* set FLOAT */
1862                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1863                 break;
1864
1865         default:
1866                 break;
1867         }
1868
1869         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1870         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1871
1872         return 0;
1873 }
1874
1875 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1876 {
1877         u32 spio_mask = (1 << spio_num);
1878         u32 spio_reg;
1879
1880         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1881             (spio_num > MISC_REGISTERS_SPIO_7)) {
1882                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1883                 return -EINVAL;
1884         }
1885
1886         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1887         /* read SPIO and mask except the float bits */
1888         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1889
1890         switch (mode) {
1891         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1892                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1893                 /* clear FLOAT and set CLR */
1894                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1895                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1896                 break;
1897
1898         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1899                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1900                 /* clear FLOAT and set SET */
1901                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1902                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1903                 break;
1904
1905         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1906                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1907                 /* set FLOAT */
1908                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1909                 break;
1910
1911         default:
1912                 break;
1913         }
1914
1915         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1916         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1917
1918         return 0;
1919 }
1920
1921 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1922 {
1923         switch (bp->link_vars.ieee_fc &
1924                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1925         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1926                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1927                                           ADVERTISED_Pause);
1928                 break;
1929         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1930                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1931                                          ADVERTISED_Pause);
1932                 break;
1933         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1934                 bp->port.advertising |= ADVERTISED_Asym_Pause;
1935                 break;
1936         default:
1937                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1938                                           ADVERTISED_Pause);
1939                 break;
1940         }
1941 }
1942
1943 static void bnx2x_link_report(struct bnx2x *bp)
1944 {
1945         if (bp->link_vars.link_up) {
1946                 if (bp->state == BNX2X_STATE_OPEN)
1947                         netif_carrier_on(bp->dev);
1948                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1949
1950                 printk("%d Mbps ", bp->link_vars.line_speed);
1951
1952                 if (bp->link_vars.duplex == DUPLEX_FULL)
1953                         printk("full duplex");
1954                 else
1955                         printk("half duplex");
1956
1957                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1958                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
1959                                 printk(", receive ");
1960                                 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1961                                         printk("& transmit ");
1962                         } else {
1963                                 printk(", transmit ");
1964                         }
1965                         printk("flow control ON");
1966                 }
1967                 printk("\n");
1968
1969         } else { /* link_down */
1970                 netif_carrier_off(bp->dev);
1971                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1972         }
1973 }
1974
1975 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1976 {
1977         if (!BP_NOMCP(bp)) {
1978                 u8 rc;
1979
1980                 /* Initialize link parameters structure variables */
1981                 /* It is recommended to turn off RX FC for jumbo frames
1982                    for better performance */
1983                 if (IS_E1HMF(bp))
1984                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1985                 else if (bp->dev->mtu > 5000)
1986                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1987                 else
1988                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1989
1990                 bnx2x_acquire_phy_lock(bp);
1991                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1992                 bnx2x_release_phy_lock(bp);
1993
1994                 bnx2x_calc_fc_adv(bp);
1995
1996                 if (bp->link_vars.link_up)
1997                         bnx2x_link_report(bp);
1998
1999
2000                 return rc;
2001         }
2002         BNX2X_ERR("Bootcode is missing -not initializing link\n");
2003         return -EINVAL;
2004 }
2005
2006 static void bnx2x_link_set(struct bnx2x *bp)
2007 {
2008         if (!BP_NOMCP(bp)) {
2009                 bnx2x_acquire_phy_lock(bp);
2010                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2011                 bnx2x_release_phy_lock(bp);
2012
2013                 bnx2x_calc_fc_adv(bp);
2014         } else
2015                 BNX2X_ERR("Bootcode is missing -not setting link\n");
2016 }
2017
2018 static void bnx2x__link_reset(struct bnx2x *bp)
2019 {
2020         if (!BP_NOMCP(bp)) {
2021                 bnx2x_acquire_phy_lock(bp);
2022                 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
2023                 bnx2x_release_phy_lock(bp);
2024         } else
2025                 BNX2X_ERR("Bootcode is missing -not resetting link\n");
2026 }
2027
2028 static u8 bnx2x_link_test(struct bnx2x *bp)
2029 {
2030         u8 rc;
2031
2032         bnx2x_acquire_phy_lock(bp);
2033         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2034         bnx2x_release_phy_lock(bp);
2035
2036         return rc;
2037 }
2038
2039 /* Calculates the sum of vn_min_rates.
2040    It's needed for further normalizing of the min_rates.
2041
2042    Returns:
2043      sum of vn_min_rates
2044        or
2045      0 - if all the min_rates are 0.
2046      In the later case fairness algorithm should be deactivated.
2047      If not all min_rates are zero then those that are zeroes will
2048      be set to 1.
2049  */
2050 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2051 {
2052         int i, port = BP_PORT(bp);
2053         u32 wsum = 0;
2054         int all_zero = 1;
2055
2056         for (i = 0; i < E1HVN_MAX; i++) {
2057                 u32 vn_cfg =
2058                         SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2059                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2060                                      FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2061                 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2062                         /* If min rate is zero - set it to 1 */
2063                         if (!vn_min_rate)
2064                                 vn_min_rate = DEF_MIN_RATE;
2065                         else
2066                                 all_zero = 0;
2067
2068                         wsum += vn_min_rate;
2069                 }
2070         }
2071
2072         /* ... only if all min rates are zeros - disable FAIRNESS */
2073         if (all_zero)
2074                 return 0;
2075
2076         return wsum;
2077 }
2078
2079 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2080                                    int en_fness,
2081                                    u16 port_rate,
2082                                    struct cmng_struct_per_port *m_cmng_port)
2083 {
2084         u32 r_param = port_rate / 8;
2085         int port = BP_PORT(bp);
2086         int i;
2087
2088         memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2089
2090         /* Enable minmax only if we are in e1hmf mode */
2091         if (IS_E1HMF(bp)) {
2092                 u32 fair_periodic_timeout_usec;
2093                 u32 t_fair;
2094
2095                 /* Enable rate shaping and fairness */
2096                 m_cmng_port->flags.cmng_vn_enable = 1;
2097                 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2098                 m_cmng_port->flags.rate_shaping_enable = 1;
2099
2100                 if (!en_fness)
2101                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2102                            "  fairness will be disabled\n");
2103
2104                 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2105                 m_cmng_port->rs_vars.rs_periodic_timeout =
2106                                                 RS_PERIODIC_TIMEOUT_USEC / 4;
2107
2108                 /* this is the threshold below which no timer arming will occur
2109                    1.25 coefficient is for the threshold to be a little bigger
2110                    than the real time, to compensate for timer in-accuracy */
2111                 m_cmng_port->rs_vars.rs_threshold =
2112                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2113
2114                 /* resolution of fairness timer */
2115                 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2116                 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2117                 t_fair = T_FAIR_COEF / port_rate;
2118
2119                 /* this is the threshold below which we won't arm
2120                    the timer anymore */
2121                 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2122
2123                 /* we multiply by 1e3/8 to get bytes/msec.
2124                    We don't want the credits to pass a credit
2125                    of the T_FAIR*FAIR_MEM (algorithm resolution) */
2126                 m_cmng_port->fair_vars.upper_bound =
2127                                                 r_param * t_fair * FAIR_MEM;
2128                 /* since each tick is 4 usec */
2129                 m_cmng_port->fair_vars.fairness_timeout =
2130                                                 fair_periodic_timeout_usec / 4;
2131
2132         } else {
2133                 /* Disable rate shaping and fairness */
2134                 m_cmng_port->flags.cmng_vn_enable = 0;
2135                 m_cmng_port->flags.fairness_enable = 0;
2136                 m_cmng_port->flags.rate_shaping_enable = 0;
2137
2138                 DP(NETIF_MSG_IFUP,
2139                    "Single function mode  minmax will be disabled\n");
2140         }
2141
2142         /* Store it to internal memory */
2143         for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2144                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2145                        XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2146                        ((u32 *)(m_cmng_port))[i]);
2147 }
2148
2149 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2150                                    u32 wsum, u16 port_rate,
2151                                  struct cmng_struct_per_port *m_cmng_port)
2152 {
2153         struct rate_shaping_vars_per_vn m_rs_vn;
2154         struct fairness_vars_per_vn m_fair_vn;
2155         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2156         u16 vn_min_rate, vn_max_rate;
2157         int i;
2158
2159         /* If function is hidden - set min and max to zeroes */
2160         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2161                 vn_min_rate = 0;
2162                 vn_max_rate = 0;
2163
2164         } else {
2165                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2166                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2167                 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2168                    if current min rate is zero - set it to 1.
2169                    This is a requirement of the algorithm. */
2170                 if ((vn_min_rate == 0) && wsum)
2171                         vn_min_rate = DEF_MIN_RATE;
2172                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2173                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2174         }
2175
2176         DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d  vn_max_rate=%d  "
2177            "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2178
2179         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2180         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2181
2182         /* global vn counter - maximal Mbps for this vn */
2183         m_rs_vn.vn_counter.rate = vn_max_rate;
2184
2185         /* quota - number of bytes transmitted in this period */
2186         m_rs_vn.vn_counter.quota =
2187                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2188
2189 #ifdef BNX2X_PER_PROT_QOS
2190         /* per protocol counter */
2191         for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2192                 /* maximal Mbps for this protocol */
2193                 m_rs_vn.protocol_counters[protocol].rate =
2194                                                 protocol_max_rate[protocol];
2195                 /* the quota in each timer period -
2196                    number of bytes transmitted in this period */
2197                 m_rs_vn.protocol_counters[protocol].quota =
2198                         (u32)(rs_periodic_timeout_usec *
2199                           ((double)m_rs_vn.
2200                                    protocol_counters[protocol].rate/8));
2201         }
2202 #endif
2203
2204         if (wsum) {
2205                 /* credit for each period of the fairness algorithm:
2206                    number of bytes in T_FAIR (the vn share the port rate).
2207                    wsum should not be larger than 10000, thus
2208                    T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2209                 m_fair_vn.vn_credit_delta =
2210                         max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2211                             (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2212                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2213                    m_fair_vn.vn_credit_delta);
2214         }
2215
2216 #ifdef BNX2X_PER_PROT_QOS
2217         do {
2218                 u32 protocolWeightSum = 0;
2219
2220                 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2221                         protocolWeightSum +=
2222                                         drvInit.protocol_min_rate[protocol];
2223                 /* per protocol counter -
2224                    NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2225                 if (protocolWeightSum > 0) {
2226                         for (protocol = 0;
2227                              protocol < NUM_OF_PROTOCOLS; protocol++)
2228                                 /* credit for each period of the
2229                                    fairness algorithm - number of bytes in
2230                                    T_FAIR (the protocol share the vn rate) */
2231                                 m_fair_vn.protocol_credit_delta[protocol] =
2232                                         (u32)((vn_min_rate / 8) * t_fair *
2233                                         protocol_min_rate / protocolWeightSum);
2234                 }
2235         } while (0);
2236 #endif
2237
2238         /* Store it to internal memory */
2239         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2240                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2241                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2242                        ((u32 *)(&m_rs_vn))[i]);
2243
2244         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2245                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2246                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2247                        ((u32 *)(&m_fair_vn))[i]);
2248 }
2249
2250 /* This function is called upon link interrupt */
2251 static void bnx2x_link_attn(struct bnx2x *bp)
2252 {
2253         int vn;
2254
2255         /* Make sure that we are synced with the current statistics */
2256         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2257
2258         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2259
2260         if (bp->link_vars.link_up) {
2261
2262                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2263                         struct host_port_stats *pstats;
2264
2265                         pstats = bnx2x_sp(bp, port_stats);
2266                         /* reset old bmac stats */
2267                         memset(&(pstats->mac_stx[0]), 0,
2268                                sizeof(struct mac_stx));
2269                 }
2270                 if ((bp->state == BNX2X_STATE_OPEN) ||
2271                     (bp->state == BNX2X_STATE_DISABLED))
2272                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2273         }
2274
2275         /* indicate link status */
2276         bnx2x_link_report(bp);
2277
2278         if (IS_E1HMF(bp)) {
2279                 int func;
2280
2281                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2282                         if (vn == BP_E1HVN(bp))
2283                                 continue;
2284
2285                         func = ((vn << 1) | BP_PORT(bp));
2286
2287                         /* Set the attention towards other drivers
2288                            on the same port */
2289                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2290                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2291                 }
2292         }
2293
2294         if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2295                 struct cmng_struct_per_port m_cmng_port;
2296                 u32 wsum;
2297                 int port = BP_PORT(bp);
2298
2299                 /* Init RATE SHAPING and FAIRNESS contexts */
2300                 wsum = bnx2x_calc_vn_wsum(bp);
2301                 bnx2x_init_port_minmax(bp, (int)wsum,
2302                                         bp->link_vars.line_speed,
2303                                         &m_cmng_port);
2304                 if (IS_E1HMF(bp))
2305                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2306                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
2307                                         wsum, bp->link_vars.line_speed,
2308                                                      &m_cmng_port);
2309         }
2310 }
2311
2312 static void bnx2x__link_status_update(struct bnx2x *bp)
2313 {
2314         if (bp->state != BNX2X_STATE_OPEN)
2315                 return;
2316
2317         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2318
2319         if (bp->link_vars.link_up)
2320                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2321         else
2322                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2323
2324         /* indicate link status */
2325         bnx2x_link_report(bp);
2326 }
2327
2328 static void bnx2x_pmf_update(struct bnx2x *bp)
2329 {
2330         int port = BP_PORT(bp);
2331         u32 val;
2332
2333         bp->port.pmf = 1;
2334         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2335
2336         /* enable nig attention */
2337         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2338         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2339         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2340
2341         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2342 }
2343
2344 /* end of Link */
2345
2346 /* slow path */
2347
2348 /*
2349  * General service functions
2350  */
2351
2352 /* the slow path queue is odd since completions arrive on the fastpath ring */
2353 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2354                          u32 data_hi, u32 data_lo, int common)
2355 {
2356         int func = BP_FUNC(bp);
2357
2358         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2359            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2360            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2361            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2362            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2363
2364 #ifdef BNX2X_STOP_ON_ERROR
2365         if (unlikely(bp->panic))
2366                 return -EIO;
2367 #endif
2368
2369         spin_lock_bh(&bp->spq_lock);
2370
2371         if (!bp->spq_left) {
2372                 BNX2X_ERR("BUG! SPQ ring full!\n");
2373                 spin_unlock_bh(&bp->spq_lock);
2374                 bnx2x_panic();
2375                 return -EBUSY;
2376         }
2377
2378         /* CID needs port number to be encoded int it */
2379         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2380                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2381                                      HW_CID(bp, cid)));
2382         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2383         if (common)
2384                 bp->spq_prod_bd->hdr.type |=
2385                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2386
2387         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2388         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2389
2390         bp->spq_left--;
2391
2392         if (bp->spq_prod_bd == bp->spq_last_bd) {
2393                 bp->spq_prod_bd = bp->spq;
2394                 bp->spq_prod_idx = 0;
2395                 DP(NETIF_MSG_TIMER, "end of spq\n");
2396
2397         } else {
2398                 bp->spq_prod_bd++;
2399                 bp->spq_prod_idx++;
2400         }
2401
2402         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2403                bp->spq_prod_idx);
2404
2405         spin_unlock_bh(&bp->spq_lock);
2406         return 0;
2407 }
2408
2409 /* acquire split MCP access lock register */
2410 static int bnx2x_acquire_alr(struct bnx2x *bp)
2411 {
2412         u32 i, j, val;
2413         int rc = 0;
2414
2415         might_sleep();
2416         i = 100;
2417         for (j = 0; j < i*10; j++) {
2418                 val = (1UL << 31);
2419                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2420                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2421                 if (val & (1L << 31))
2422                         break;
2423
2424                 msleep(5);
2425         }
2426         if (!(val & (1L << 31))) {
2427                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2428                 rc = -EBUSY;
2429         }
2430
2431         return rc;
2432 }
2433
2434 /* release split MCP access lock register */
2435 static void bnx2x_release_alr(struct bnx2x *bp)
2436 {
2437         u32 val = 0;
2438
2439         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2440 }
2441
2442 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2443 {
2444         struct host_def_status_block *def_sb = bp->def_status_blk;
2445         u16 rc = 0;
2446
2447         barrier(); /* status block is written to by the chip */
2448         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2449                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2450                 rc |= 1;
2451         }
2452         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2453                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2454                 rc |= 2;
2455         }
2456         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2457                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2458                 rc |= 4;
2459         }
2460         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2461                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2462                 rc |= 8;
2463         }
2464         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2465                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2466                 rc |= 16;
2467         }
2468         return rc;
2469 }
2470
2471 /*
2472  * slow path service functions
2473  */
2474
2475 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2476 {
2477         int port = BP_PORT(bp);
2478         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2479                        COMMAND_REG_ATTN_BITS_SET);
2480         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2481                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2482         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2483                                        NIG_REG_MASK_INTERRUPT_PORT0;
2484         u32 aeu_mask;
2485
2486         if (bp->attn_state & asserted)
2487                 BNX2X_ERR("IGU ERROR\n");
2488
2489         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2490         aeu_mask = REG_RD(bp, aeu_addr);
2491
2492         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2493            aeu_mask, asserted);
2494         aeu_mask &= ~(asserted & 0xff);
2495         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2496
2497         REG_WR(bp, aeu_addr, aeu_mask);
2498         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2499
2500         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2501         bp->attn_state |= asserted;
2502         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2503
2504         if (asserted & ATTN_HARD_WIRED_MASK) {
2505                 if (asserted & ATTN_NIG_FOR_FUNC) {
2506
2507                         bnx2x_acquire_phy_lock(bp);
2508
2509                         /* save nig interrupt mask */
2510                         bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2511                         REG_WR(bp, nig_int_mask_addr, 0);
2512
2513                         bnx2x_link_attn(bp);
2514
2515                         /* handle unicore attn? */
2516                 }
2517                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2518                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2519
2520                 if (asserted & GPIO_2_FUNC)
2521                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2522
2523                 if (asserted & GPIO_3_FUNC)
2524                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2525
2526                 if (asserted & GPIO_4_FUNC)
2527                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2528
2529                 if (port == 0) {
2530                         if (asserted & ATTN_GENERAL_ATTN_1) {
2531                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2532                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2533                         }
2534                         if (asserted & ATTN_GENERAL_ATTN_2) {
2535                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2536                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2537                         }
2538                         if (asserted & ATTN_GENERAL_ATTN_3) {
2539                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2540                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2541                         }
2542                 } else {
2543                         if (asserted & ATTN_GENERAL_ATTN_4) {
2544                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2545                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2546                         }
2547                         if (asserted & ATTN_GENERAL_ATTN_5) {
2548                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2549                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2550                         }
2551                         if (asserted & ATTN_GENERAL_ATTN_6) {
2552                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2553                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2554                         }
2555                 }
2556
2557         } /* if hardwired */
2558
2559         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2560            asserted, hc_addr);
2561         REG_WR(bp, hc_addr, asserted);
2562
2563         /* now set back the mask */
2564         if (asserted & ATTN_NIG_FOR_FUNC) {
2565                 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2566                 bnx2x_release_phy_lock(bp);
2567         }
2568 }
2569
2570 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2571 {
2572         int port = BP_PORT(bp);
2573         int reg_offset;
2574         u32 val;
2575
2576         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2577                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2578
2579         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2580
2581                 val = REG_RD(bp, reg_offset);
2582                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2583                 REG_WR(bp, reg_offset, val);
2584
2585                 BNX2X_ERR("SPIO5 hw attention\n");
2586
2587                 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2588                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
2589                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2590                         /* Fan failure attention */
2591
2592                         /* The PHY reset is controlled by GPIO 1 */
2593                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2594                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2595                         /* Low power mode is controlled by GPIO 2 */
2596                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2597                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2598                         /* mark the failure */
2599                         bp->link_params.ext_phy_config &=
2600                                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2601                         bp->link_params.ext_phy_config |=
2602                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2603                         SHMEM_WR(bp,
2604                                  dev_info.port_hw_config[port].
2605                                                         external_phy_config,
2606                                  bp->link_params.ext_phy_config);
2607                         /* log the failure */
2608                         printk(KERN_ERR PFX "Fan Failure on Network"
2609                                " Controller %s has caused the driver to"
2610                                " shutdown the card to prevent permanent"
2611                                " damage.  Please contact Dell Support for"
2612                                " assistance\n", bp->dev->name);
2613                         break;
2614
2615                 default:
2616                         break;
2617                 }
2618         }
2619
2620         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2621
2622                 val = REG_RD(bp, reg_offset);
2623                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2624                 REG_WR(bp, reg_offset, val);
2625
2626                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2627                           (attn & HW_INTERRUT_ASSERT_SET_0));
2628                 bnx2x_panic();
2629         }
2630 }
2631
2632 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2633 {
2634         u32 val;
2635
2636         if (attn & BNX2X_DOORQ_ASSERT) {
2637
2638                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2639                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2640                 /* DORQ discard attention */
2641                 if (val & 0x2)
2642                         BNX2X_ERR("FATAL error from DORQ\n");
2643         }
2644
2645         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2646
2647                 int port = BP_PORT(bp);
2648                 int reg_offset;
2649
2650                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2651                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2652
2653                 val = REG_RD(bp, reg_offset);
2654                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2655                 REG_WR(bp, reg_offset, val);
2656
2657                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2658                           (attn & HW_INTERRUT_ASSERT_SET_1));
2659                 bnx2x_panic();
2660         }
2661 }
2662
2663 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2664 {
2665         u32 val;
2666
2667         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2668
2669                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2670                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2671                 /* CFC error attention */
2672                 if (val & 0x2)
2673                         BNX2X_ERR("FATAL error from CFC\n");
2674         }
2675
2676         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2677
2678                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2679                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2680                 /* RQ_USDMDP_FIFO_OVERFLOW */
2681                 if (val & 0x18000)
2682                         BNX2X_ERR("FATAL error from PXP\n");
2683         }
2684
2685         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2686
2687                 int port = BP_PORT(bp);
2688                 int reg_offset;
2689
2690                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2691                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2692
2693                 val = REG_RD(bp, reg_offset);
2694                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2695                 REG_WR(bp, reg_offset, val);
2696
2697                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2698                           (attn & HW_INTERRUT_ASSERT_SET_2));
2699                 bnx2x_panic();
2700         }
2701 }
2702
2703 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2704 {
2705         u32 val;
2706
2707         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2708
2709                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2710                         int func = BP_FUNC(bp);
2711
2712                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2713                         bnx2x__link_status_update(bp);
2714                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2715                                                         DRV_STATUS_PMF)
2716                                 bnx2x_pmf_update(bp);
2717
2718                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2719
2720                         BNX2X_ERR("MC assert!\n");
2721                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2722                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2723                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2724                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2725                         bnx2x_panic();
2726
2727                 } else if (attn & BNX2X_MCP_ASSERT) {
2728
2729                         BNX2X_ERR("MCP assert!\n");
2730                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2731                         bnx2x_fw_dump(bp);
2732
2733                 } else
2734                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2735         }
2736
2737         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2738                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2739                 if (attn & BNX2X_GRC_TIMEOUT) {
2740                         val = CHIP_IS_E1H(bp) ?
2741                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2742                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2743                 }
2744                 if (attn & BNX2X_GRC_RSV) {
2745                         val = CHIP_IS_E1H(bp) ?
2746                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2747                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2748                 }
2749                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2750         }
2751 }
2752
2753 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2754 {
2755         struct attn_route attn;
2756         struct attn_route group_mask;
2757         int port = BP_PORT(bp);
2758         int index;
2759         u32 reg_addr;
2760         u32 val;
2761         u32 aeu_mask;
2762
2763         /* need to take HW lock because MCP or other port might also
2764            try to handle this event */
2765         bnx2x_acquire_alr(bp);
2766
2767         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2768         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2769         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2770         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2771         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2772            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2773
2774         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2775                 if (deasserted & (1 << index)) {
2776                         group_mask = bp->attn_group[index];
2777
2778                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2779                            index, group_mask.sig[0], group_mask.sig[1],
2780                            group_mask.sig[2], group_mask.sig[3]);
2781
2782                         bnx2x_attn_int_deasserted3(bp,
2783                                         attn.sig[3] & group_mask.sig[3]);
2784                         bnx2x_attn_int_deasserted1(bp,
2785                                         attn.sig[1] & group_mask.sig[1]);
2786                         bnx2x_attn_int_deasserted2(bp,
2787                                         attn.sig[2] & group_mask.sig[2]);
2788                         bnx2x_attn_int_deasserted0(bp,
2789                                         attn.sig[0] & group_mask.sig[0]);
2790
2791                         if ((attn.sig[0] & group_mask.sig[0] &
2792                                                 HW_PRTY_ASSERT_SET_0) ||
2793                             (attn.sig[1] & group_mask.sig[1] &
2794                                                 HW_PRTY_ASSERT_SET_1) ||
2795                             (attn.sig[2] & group_mask.sig[2] &
2796                                                 HW_PRTY_ASSERT_SET_2))
2797                                 BNX2X_ERR("FATAL HW block parity attention\n");
2798                 }
2799         }
2800
2801         bnx2x_release_alr(bp);
2802
2803         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2804
2805         val = ~deasserted;
2806         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2807            val, reg_addr);
2808         REG_WR(bp, reg_addr, val);
2809
2810         if (~bp->attn_state & deasserted)
2811                 BNX2X_ERR("IGU ERROR\n");
2812
2813         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2814                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2815
2816         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2817         aeu_mask = REG_RD(bp, reg_addr);
2818
2819         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2820            aeu_mask, deasserted);
2821         aeu_mask |= (deasserted & 0xff);
2822         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2823
2824         REG_WR(bp, reg_addr, aeu_mask);
2825         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2826
2827         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2828         bp->attn_state &= ~deasserted;
2829         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2830 }
2831
2832 static void bnx2x_attn_int(struct bnx2x *bp)
2833 {
2834         /* read local copy of bits */
2835         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2836                                                                 attn_bits);
2837         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2838                                                                 attn_bits_ack);
2839         u32 attn_state = bp->attn_state;
2840
2841         /* look for changed bits */
2842         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2843         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2844
2845         DP(NETIF_MSG_HW,
2846            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2847            attn_bits, attn_ack, asserted, deasserted);
2848
2849         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2850                 BNX2X_ERR("BAD attention state\n");
2851
2852         /* handle bits that were raised */
2853         if (asserted)
2854                 bnx2x_attn_int_asserted(bp, asserted);
2855
2856         if (deasserted)
2857                 bnx2x_attn_int_deasserted(bp, deasserted);
2858 }
2859
2860 static void bnx2x_sp_task(struct work_struct *work)
2861 {
2862         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2863         u16 status;
2864
2865
2866         /* Return here if interrupt is disabled */
2867         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2868                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2869                 return;
2870         }
2871
2872         status = bnx2x_update_dsb_idx(bp);
2873 /*      if (status == 0)                                     */
2874 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2875
2876         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2877
2878         /* HW attentions */
2879         if (status & 0x1)
2880                 bnx2x_attn_int(bp);
2881
2882         /* CStorm events: query_stats, port delete ramrod */
2883         if (status & 0x2)
2884                 bp->stats_pending = 0;
2885
2886         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2887                      IGU_INT_NOP, 1);
2888         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2889                      IGU_INT_NOP, 1);
2890         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2891                      IGU_INT_NOP, 1);
2892         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2893                      IGU_INT_NOP, 1);
2894         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2895                      IGU_INT_ENABLE, 1);
2896
2897 }
2898
2899 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2900 {
2901         struct net_device *dev = dev_instance;
2902         struct bnx2x *bp = netdev_priv(dev);
2903
2904         /* Return here if interrupt is disabled */
2905         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2906                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2907                 return IRQ_HANDLED;
2908         }
2909
2910         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2911
2912 #ifdef BNX2X_STOP_ON_ERROR
2913         if (unlikely(bp->panic))
2914                 return IRQ_HANDLED;
2915 #endif
2916
2917         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2918
2919         return IRQ_HANDLED;
2920 }
2921
2922 /* end of slow path */
2923
2924 /* Statistics */
2925
2926 /****************************************************************************
2927 * Macros
2928 ****************************************************************************/
2929
2930 /* sum[hi:lo] += add[hi:lo] */
2931 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2932         do { \
2933                 s_lo += a_lo; \
2934                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2935         } while (0)
2936
2937 /* difference = minuend - subtrahend */
2938 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2939         do { \
2940                 if (m_lo < s_lo) { \
2941                         /* underflow */ \
2942                         d_hi = m_hi - s_hi; \
2943                         if (d_hi > 0) { \
2944                                 /* we can 'loan' 1 */ \
2945                                 d_hi--; \
2946                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2947                         } else { \
2948                                 /* m_hi <= s_hi */ \
2949                                 d_hi = 0; \
2950                                 d_lo = 0; \
2951                         } \
2952                 } else { \
2953                         /* m_lo >= s_lo */ \
2954                         if (m_hi < s_hi) { \
2955                                 d_hi = 0; \
2956                                 d_lo = 0; \
2957                         } else { \
2958                                 /* m_hi >= s_hi */ \
2959                                 d_hi = m_hi - s_hi; \
2960                                 d_lo = m_lo - s_lo; \
2961                         } \
2962                 } \
2963         } while (0)
2964
2965 #define UPDATE_STAT64(s, t) \
2966         do { \
2967                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2968                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2969                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2970                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2971                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2972                        pstats->mac_stx[1].t##_lo, diff.lo); \
2973         } while (0)
2974
2975 #define UPDATE_STAT64_NIG(s, t) \
2976         do { \
2977                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2978                         diff.lo, new->s##_lo, old->s##_lo); \
2979                 ADD_64(estats->t##_hi, diff.hi, \
2980                        estats->t##_lo, diff.lo); \
2981         } while (0)
2982
2983 /* sum[hi:lo] += add */
2984 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2985         do { \
2986                 s_lo += a; \
2987                 s_hi += (s_lo < a) ? 1 : 0; \
2988         } while (0)
2989
2990 #define UPDATE_EXTEND_STAT(s) \
2991         do { \
2992                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2993                               pstats->mac_stx[1].s##_lo, \
2994                               new->s); \
2995         } while (0)
2996
2997 #define UPDATE_EXTEND_TSTAT(s, t) \
2998         do { \
2999                 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
3000                 old_tclient->s = le32_to_cpu(tclient->s); \
3001                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
3002         } while (0)
3003
3004 #define UPDATE_EXTEND_XSTAT(s, t) \
3005         do { \
3006                 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
3007                 old_xclient->s = le32_to_cpu(xclient->s); \
3008                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
3009         } while (0)
3010
3011 /*
3012  * General service functions
3013  */
3014
3015 static inline long bnx2x_hilo(u32 *hiref)
3016 {
3017         u32 lo = *(hiref + 1);
3018 #if (BITS_PER_LONG == 64)
3019         u32 hi = *hiref;
3020
3021         return HILO_U64(hi, lo);
3022 #else
3023         return lo;
3024 #endif
3025 }
3026
3027 /*
3028  * Init service functions
3029  */
3030
3031 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3032 {
3033         if (!bp->stats_pending) {
3034                 struct eth_query_ramrod_data ramrod_data = {0};
3035                 int rc;
3036
3037                 ramrod_data.drv_counter = bp->stats_counter++;
3038                 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3039                 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3040
3041                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3042                                    ((u32 *)&ramrod_data)[1],
3043                                    ((u32 *)&ramrod_data)[0], 0);
3044                 if (rc == 0) {
3045                         /* stats ramrod has it's own slot on the spq */
3046                         bp->spq_left++;
3047                         bp->stats_pending = 1;
3048                 }
3049         }
3050 }
3051
3052 static void bnx2x_stats_init(struct bnx2x *bp)
3053 {
3054         int port = BP_PORT(bp);
3055
3056         bp->executer_idx = 0;
3057         bp->stats_counter = 0;
3058
3059         /* port stats */
3060         if (!BP_NOMCP(bp))
3061                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3062         else
3063                 bp->port.port_stx = 0;
3064         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3065
3066         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3067         bp->port.old_nig_stats.brb_discard =
3068                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3069         bp->port.old_nig_stats.brb_truncate =
3070                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3071         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3072                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3073         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3074                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3075
3076         /* function stats */
3077         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3078         memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3079         memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3080         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3081
3082         bp->stats_state = STATS_STATE_DISABLED;
3083         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3084                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3085 }
3086
3087 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3088 {
3089         struct dmae_command *dmae = &bp->stats_dmae;
3090         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3091
3092         *stats_comp = DMAE_COMP_VAL;
3093
3094         /* loader */
3095         if (bp->executer_idx) {
3096                 int loader_idx = PMF_DMAE_C(bp);
3097
3098                 memset(dmae, 0, sizeof(struct dmae_command));
3099
3100                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3101                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3102                                 DMAE_CMD_DST_RESET |
3103 #ifdef __BIG_ENDIAN
3104                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3105 #else
3106                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3107 #endif
3108                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3109                                                DMAE_CMD_PORT_0) |
3110                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3111                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3112                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3113                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3114                                      sizeof(struct dmae_command) *
3115                                      (loader_idx + 1)) >> 2;
3116                 dmae->dst_addr_hi = 0;
3117                 dmae->len = sizeof(struct dmae_command) >> 2;
3118                 if (CHIP_IS_E1(bp))
3119                         dmae->len--;
3120                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3121                 dmae->comp_addr_hi = 0;
3122                 dmae->comp_val = 1;
3123
3124                 *stats_comp = 0;
3125                 bnx2x_post_dmae(bp, dmae, loader_idx);
3126
3127         } else if (bp->func_stx) {
3128                 *stats_comp = 0;
3129                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3130         }
3131 }
3132
3133 static int bnx2x_stats_comp(struct bnx2x *bp)
3134 {
3135         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3136         int cnt = 10;
3137
3138         might_sleep();
3139         while (*stats_comp != DMAE_COMP_VAL) {
3140                 if (!cnt) {
3141                         BNX2X_ERR("timeout waiting for stats finished\n");
3142                         break;
3143                 }
3144                 cnt--;
3145                 msleep(1);
3146         }
3147         return 1;
3148 }
3149
3150 /*
3151  * Statistics service functions
3152  */
3153
3154 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3155 {
3156         struct dmae_command *dmae;
3157         u32 opcode;
3158         int loader_idx = PMF_DMAE_C(bp);
3159         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3160
3161         /* sanity */
3162         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3163                 BNX2X_ERR("BUG!\n");
3164                 return;
3165         }
3166
3167         bp->executer_idx = 0;
3168
3169         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3170                   DMAE_CMD_C_ENABLE |
3171                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3172 #ifdef __BIG_ENDIAN
3173                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3174 #else
3175                   DMAE_CMD_ENDIANITY_DW_SWAP |
3176 #endif
3177                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3178                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3179
3180         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3181         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3182         dmae->src_addr_lo = bp->port.port_stx >> 2;
3183         dmae->src_addr_hi = 0;
3184         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3185         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3186         dmae->len = DMAE_LEN32_RD_MAX;
3187         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3188         dmae->comp_addr_hi = 0;
3189         dmae->comp_val = 1;
3190
3191         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3192         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3193         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3194         dmae->src_addr_hi = 0;
3195         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3196                                    DMAE_LEN32_RD_MAX * 4);
3197         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3198                                    DMAE_LEN32_RD_MAX * 4);
3199         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3200         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3201         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3202         dmae->comp_val = DMAE_COMP_VAL;
3203
3204         *stats_comp = 0;
3205         bnx2x_hw_stats_post(bp);
3206         bnx2x_stats_comp(bp);
3207 }
3208
3209 static void bnx2x_port_stats_init(struct bnx2x *bp)
3210 {
3211         struct dmae_command *dmae;
3212         int port = BP_PORT(bp);
3213         int vn = BP_E1HVN(bp);
3214         u32 opcode;
3215         int loader_idx = PMF_DMAE_C(bp);
3216         u32 mac_addr;
3217         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3218
3219         /* sanity */
3220         if (!bp->link_vars.link_up || !bp->port.pmf) {
3221                 BNX2X_ERR("BUG!\n");
3222                 return;
3223         }
3224
3225         bp->executer_idx = 0;
3226
3227         /* MCP */
3228         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3229                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3230                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3231 #ifdef __BIG_ENDIAN
3232                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3233 #else
3234                   DMAE_CMD_ENDIANITY_DW_SWAP |
3235 #endif
3236                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3237                   (vn << DMAE_CMD_E1HVN_SHIFT));
3238
3239         if (bp->port.port_stx) {
3240
3241                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3242                 dmae->opcode = opcode;
3243                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3244                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3245                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3246                 dmae->dst_addr_hi = 0;
3247                 dmae->len = sizeof(struct host_port_stats) >> 2;
3248                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3249                 dmae->comp_addr_hi = 0;
3250                 dmae->comp_val = 1;
3251         }
3252
3253         if (bp->func_stx) {
3254
3255                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3256                 dmae->opcode = opcode;
3257                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3258                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3259                 dmae->dst_addr_lo = bp->func_stx >> 2;
3260                 dmae->dst_addr_hi = 0;
3261                 dmae->len = sizeof(struct host_func_stats) >> 2;
3262                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3263                 dmae->comp_addr_hi = 0;
3264                 dmae->comp_val = 1;
3265         }
3266
3267         /* MAC */
3268         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3269                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3270                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3271 #ifdef __BIG_ENDIAN
3272                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3273 #else
3274                   DMAE_CMD_ENDIANITY_DW_SWAP |
3275 #endif
3276                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3277                   (vn << DMAE_CMD_E1HVN_SHIFT));
3278
3279         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3280
3281                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3282                                    NIG_REG_INGRESS_BMAC0_MEM);
3283
3284                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3285                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3286                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3287                 dmae->opcode = opcode;
3288                 dmae->src_addr_lo = (mac_addr +
3289                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3290                 dmae->src_addr_hi = 0;
3291                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3292                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3293                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3294                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3295                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3296                 dmae->comp_addr_hi = 0;
3297                 dmae->comp_val = 1;
3298
3299                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3300                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3301                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3302                 dmae->opcode = opcode;
3303                 dmae->src_addr_lo = (mac_addr +
3304                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3305                 dmae->src_addr_hi = 0;
3306                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3307                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3308                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3309                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3310                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3311                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3312                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3313                 dmae->comp_addr_hi = 0;
3314                 dmae->comp_val = 1;
3315
3316         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3317
3318                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3319
3320                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3321                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3322                 dmae->opcode = opcode;
3323                 dmae->src_addr_lo = (mac_addr +
3324                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3325                 dmae->src_addr_hi = 0;
3326                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3327                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3328                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3329                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3330                 dmae->comp_addr_hi = 0;
3331                 dmae->comp_val = 1;
3332
3333                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3334                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3335                 dmae->opcode = opcode;
3336                 dmae->src_addr_lo = (mac_addr +
3337                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3338                 dmae->src_addr_hi = 0;
3339                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3340                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3341                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3342                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3343                 dmae->len = 1;
3344                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3345                 dmae->comp_addr_hi = 0;
3346                 dmae->comp_val = 1;
3347
3348                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3349                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3350                 dmae->opcode = opcode;
3351                 dmae->src_addr_lo = (mac_addr +
3352                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3353                 dmae->src_addr_hi = 0;
3354                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3355                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3356                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3357                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3358                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3359                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3360                 dmae->comp_addr_hi = 0;
3361                 dmae->comp_val = 1;
3362         }
3363
3364         /* NIG */
3365         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3366         dmae->opcode = opcode;
3367         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3368                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3369         dmae->src_addr_hi = 0;
3370         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3371         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3372         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3373         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3374         dmae->comp_addr_hi = 0;
3375         dmae->comp_val = 1;
3376
3377         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3378         dmae->opcode = opcode;
3379         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3380                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3381         dmae->src_addr_hi = 0;
3382         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3383                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3384         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3385                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3386         dmae->len = (2*sizeof(u32)) >> 2;
3387         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3388         dmae->comp_addr_hi = 0;
3389         dmae->comp_val = 1;
3390
3391         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3392         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3393                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3394                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3395 #ifdef __BIG_ENDIAN
3396                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3397 #else
3398                         DMAE_CMD_ENDIANITY_DW_SWAP |
3399 #endif
3400                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3401                         (vn << DMAE_CMD_E1HVN_SHIFT));
3402         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3403                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3404         dmae->src_addr_hi = 0;
3405         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3406                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3407         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3408                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3409         dmae->len = (2*sizeof(u32)) >> 2;
3410         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3411         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3412         dmae->comp_val = DMAE_COMP_VAL;
3413
3414         *stats_comp = 0;
3415 }
3416
3417 static void bnx2x_func_stats_init(struct bnx2x *bp)
3418 {
3419         struct dmae_command *dmae = &bp->stats_dmae;
3420         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3421
3422         /* sanity */
3423         if (!bp->func_stx) {
3424                 BNX2X_ERR("BUG!\n");
3425                 return;
3426         }
3427
3428         bp->executer_idx = 0;
3429         memset(dmae, 0, sizeof(struct dmae_command));
3430
3431         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3432                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3433                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3434 #ifdef __BIG_ENDIAN
3435                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3436 #else
3437                         DMAE_CMD_ENDIANITY_DW_SWAP |
3438 #endif
3439                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3440                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3441         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3442         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3443         dmae->dst_addr_lo = bp->func_stx >> 2;
3444         dmae->dst_addr_hi = 0;
3445         dmae->len = sizeof(struct host_func_stats) >> 2;
3446         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3447         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3448         dmae->comp_val = DMAE_COMP_VAL;
3449
3450         *stats_comp = 0;
3451 }
3452
3453 static void bnx2x_stats_start(struct bnx2x *bp)
3454 {
3455         if (bp->port.pmf)
3456                 bnx2x_port_stats_init(bp);
3457
3458         else if (bp->func_stx)
3459                 bnx2x_func_stats_init(bp);
3460
3461         bnx2x_hw_stats_post(bp);
3462         bnx2x_storm_stats_post(bp);
3463 }
3464
3465 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3466 {
3467         bnx2x_stats_comp(bp);
3468         bnx2x_stats_pmf_update(bp);
3469         bnx2x_stats_start(bp);
3470 }
3471
3472 static void bnx2x_stats_restart(struct bnx2x *bp)
3473 {
3474         bnx2x_stats_comp(bp);
3475         bnx2x_stats_start(bp);
3476 }
3477
3478 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3479 {
3480         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3481         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3482         struct regpair diff;
3483
3484         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3485         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3486         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3487         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3488         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3489         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3490         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3491         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3492         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3493         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3494         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3495         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3496         UPDATE_STAT64(tx_stat_gt127,
3497                                 tx_stat_etherstatspkts65octetsto127octets);
3498         UPDATE_STAT64(tx_stat_gt255,
3499                                 tx_stat_etherstatspkts128octetsto255octets);
3500         UPDATE_STAT64(tx_stat_gt511,
3501                                 tx_stat_etherstatspkts256octetsto511octets);
3502         UPDATE_STAT64(tx_stat_gt1023,
3503                                 tx_stat_etherstatspkts512octetsto1023octets);
3504         UPDATE_STAT64(tx_stat_gt1518,
3505                                 tx_stat_etherstatspkts1024octetsto1522octets);
3506         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3507         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3508         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3509         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3510         UPDATE_STAT64(tx_stat_gterr,
3511                                 tx_stat_dot3statsinternalmactransmiterrors);
3512         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3513 }
3514
3515 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3516 {
3517         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3518         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3519
3520         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3521         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3522         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3523         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3524         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3525         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3526         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3527         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3528         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3529         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3530         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3531         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3532         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3533         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3534         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3535         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3536         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3537         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3538         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3539         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3540         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3541         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3542         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3543         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3544         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3545         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3546         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3547         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3548         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3549         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3550         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3551 }
3552
3553 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3554 {
3555         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3556         struct nig_stats *old = &(bp->port.old_nig_stats);
3557         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3558         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3559         struct regpair diff;
3560
3561         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3562                 bnx2x_bmac_stats_update(bp);
3563
3564         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3565                 bnx2x_emac_stats_update(bp);
3566
3567         else { /* unreached */
3568                 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3569                 return -1;
3570         }
3571
3572         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3573                       new->brb_discard - old->brb_discard);
3574         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3575                       new->brb_truncate - old->brb_truncate);
3576
3577         UPDATE_STAT64_NIG(egress_mac_pkt0,
3578                                         etherstatspkts1024octetsto1522octets);
3579         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3580
3581         memcpy(old, new, sizeof(struct nig_stats));
3582
3583         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3584                sizeof(struct mac_stx));
3585         estats->brb_drop_hi = pstats->brb_drop_hi;
3586         estats->brb_drop_lo = pstats->brb_drop_lo;
3587
3588         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3589
3590         return 0;
3591 }
3592
3593 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3594 {
3595         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3596         int cl_id = BP_CL_ID(bp);
3597         struct tstorm_per_port_stats *tport =
3598                                 &stats->tstorm_common.port_statistics;
3599         struct tstorm_per_client_stats *tclient =
3600                         &stats->tstorm_common.client_statistics[cl_id];
3601         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3602         struct xstorm_per_client_stats *xclient =
3603                         &stats->xstorm_common.client_statistics[cl_id];
3604         struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3605         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3606         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3607         u32 diff;
3608
3609         /* are storm stats valid? */
3610         if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3611                                                         bp->stats_counter) {
3612                 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3613                    "  tstorm counter (%d) != stats_counter (%d)\n",
3614                    tclient->stats_counter, bp->stats_counter);
3615                 return -1;
3616         }
3617         if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3618                                                         bp->stats_counter) {
3619                 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3620                    "  xstorm counter (%d) != stats_counter (%d)\n",
3621                    xclient->stats_counter, bp->stats_counter);
3622                 return -2;
3623         }
3624
3625         fstats->total_bytes_received_hi =
3626         fstats->valid_bytes_received_hi =
3627                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3628         fstats->total_bytes_received_lo =
3629         fstats->valid_bytes_received_lo =
3630                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3631
3632         estats->error_bytes_received_hi =
3633                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3634         estats->error_bytes_received_lo =
3635                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3636         ADD_64(estats->error_bytes_received_hi,
3637                estats->rx_stat_ifhcinbadoctets_hi,
3638                estats->error_bytes_received_lo,
3639                estats->rx_stat_ifhcinbadoctets_lo);
3640
3641         ADD_64(fstats->total_bytes_received_hi,
3642                estats->error_bytes_received_hi,
3643                fstats->total_bytes_received_lo,
3644                estats->error_bytes_received_lo);
3645
3646         UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3647         UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3648                                 total_multicast_packets_received);
3649         UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3650                                 total_broadcast_packets_received);
3651
3652         fstats->total_bytes_transmitted_hi =
3653                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3654         fstats->total_bytes_transmitted_lo =
3655                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3656
3657         UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3658                                 total_unicast_packets_transmitted);
3659         UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3660                                 total_multicast_packets_transmitted);
3661         UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3662                                 total_broadcast_packets_transmitted);
3663
3664         memcpy(estats, &(fstats->total_bytes_received_hi),
3665                sizeof(struct host_func_stats) - 2*sizeof(u32));
3666
3667         estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3668         estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3669         estats->brb_truncate_discard =
3670                                 le32_to_cpu(tport->brb_truncate_discard);
3671         estats->mac_discard = le32_to_cpu(tport->mac_discard);
3672
3673         old_tclient->rcv_unicast_bytes.hi =
3674                                 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3675         old_tclient->rcv_unicast_bytes.lo =
3676                                 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3677         old_tclient->rcv_broadcast_bytes.hi =
3678                                 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3679         old_tclient->rcv_broadcast_bytes.lo =
3680                                 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3681         old_tclient->rcv_multicast_bytes.hi =
3682                                 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3683         old_tclient->rcv_multicast_bytes.lo =
3684                                 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3685         old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3686
3687         old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3688         old_tclient->packets_too_big_discard =
3689                                 le32_to_cpu(tclient->packets_too_big_discard);
3690         estats->no_buff_discard =
3691         old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3692         old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3693
3694         old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3695         old_xclient->unicast_bytes_sent.hi =
3696                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3697         old_xclient->unicast_bytes_sent.lo =
3698                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3699         old_xclient->multicast_bytes_sent.hi =
3700                                 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3701         old_xclient->multicast_bytes_sent.lo =
3702                                 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3703         old_xclient->broadcast_bytes_sent.hi =
3704                                 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3705         old_xclient->broadcast_bytes_sent.lo =
3706                                 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3707
3708         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3709
3710         return 0;
3711 }
3712
3713 static void bnx2x_net_stats_update(struct bnx2x *bp)
3714 {
3715         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3716         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3717         struct net_device_stats *nstats = &bp->dev->stats;
3718
3719         nstats->rx_packets =
3720                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3721                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3722                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3723
3724         nstats->tx_packets =
3725                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3726                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3727                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3728
3729         nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3730
3731         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3732
3733         nstats->rx_dropped = old_tclient->checksum_discard +
3734                              estats->mac_discard;
3735         nstats->tx_dropped = 0;
3736
3737         nstats->multicast =
3738                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3739
3740         nstats->collisions =
3741                         estats->tx_stat_dot3statssinglecollisionframes_lo +
3742                         estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3743                         estats->tx_stat_dot3statslatecollisions_lo +
3744                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3745
3746         estats->jabber_packets_received =
3747                                 old_tclient->packets_too_big_discard +
3748                                 estats->rx_stat_dot3statsframestoolong_lo;
3749
3750         nstats->rx_length_errors =
3751                                 estats->rx_stat_etherstatsundersizepkts_lo +
3752                                 estats->jabber_packets_received;
3753         nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3754         nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3755         nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3756         nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3757         nstats->rx_missed_errors = estats->xxoverflow_discard;
3758
3759         nstats->rx_errors = nstats->rx_length_errors +
3760                             nstats->rx_over_errors +
3761                             nstats->rx_crc_errors +
3762                             nstats->rx_frame_errors +
3763                             nstats->rx_fifo_errors +
3764                             nstats->rx_missed_errors;
3765
3766         nstats->tx_aborted_errors =
3767                         estats->tx_stat_dot3statslatecollisions_lo +
3768                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3769         nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3770         nstats->tx_fifo_errors = 0;
3771         nstats->tx_heartbeat_errors = 0;
3772         nstats->tx_window_errors = 0;
3773
3774         nstats->tx_errors = nstats->tx_aborted_errors +
3775                             nstats->tx_carrier_errors;
3776 }
3777
3778 static void bnx2x_stats_update(struct bnx2x *bp)
3779 {
3780         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3781         int update = 0;
3782
3783         if (*stats_comp != DMAE_COMP_VAL)
3784                 return;
3785
3786         if (bp->port.pmf)
3787                 update = (bnx2x_hw_stats_update(bp) == 0);
3788
3789         update |= (bnx2x_storm_stats_update(bp) == 0);
3790
3791         if (update)
3792                 bnx2x_net_stats_update(bp);
3793
3794         else {
3795                 if (bp->stats_pending) {
3796                         bp->stats_pending++;
3797                         if (bp->stats_pending == 3) {
3798                                 BNX2X_ERR("stats not updated for 3 times\n");
3799                                 bnx2x_panic();
3800                                 return;
3801                         }
3802                 }
3803         }
3804
3805         if (bp->msglevel & NETIF_MSG_TIMER) {
3806                 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3807                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3808                 struct net_device_stats *nstats = &bp->dev->stats;
3809                 int i;
3810
3811                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3812                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
3813                                   "  tx pkt (%lx)\n",
3814                        bnx2x_tx_avail(bp->fp),
3815                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3816                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
3817                                   "  rx pkt (%lx)\n",
3818                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3819                              bp->fp->rx_comp_cons),
3820                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3821                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u\n",
3822                        netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
3823                        estats->driver_xoff, estats->brb_drop_lo);
3824                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
3825                         "packets_too_big_discard %u  no_buff_discard %u  "
3826                         "mac_discard %u  mac_filter_discard %u  "
3827                         "xxovrflow_discard %u  brb_truncate_discard %u  "
3828                         "ttl0_discard %u\n",
3829                        old_tclient->checksum_discard,
3830                        old_tclient->packets_too_big_discard,
3831                        old_tclient->no_buff_discard, estats->mac_discard,
3832                        estats->mac_filter_discard, estats->xxoverflow_discard,
3833                        estats->brb_truncate_discard,
3834                        old_tclient->ttl0_discard);
3835
3836                 for_each_queue(bp, i) {
3837                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3838                                bnx2x_fp(bp, i, tx_pkt),
3839                                bnx2x_fp(bp, i, rx_pkt),
3840                                bnx2x_fp(bp, i, rx_calls));
3841                 }
3842         }
3843
3844         bnx2x_hw_stats_post(bp);
3845         bnx2x_storm_stats_post(bp);
3846 }
3847
3848 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3849 {
3850         struct dmae_command *dmae;
3851         u32 opcode;
3852         int loader_idx = PMF_DMAE_C(bp);
3853         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3854
3855         bp->executer_idx = 0;
3856
3857         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3858                   DMAE_CMD_C_ENABLE |
3859                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3860 #ifdef __BIG_ENDIAN
3861                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3862 #else
3863                   DMAE_CMD_ENDIANITY_DW_SWAP |
3864 #endif
3865                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3866                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3867
3868         if (bp->port.port_stx) {
3869
3870                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3871                 if (bp->func_stx)
3872                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3873                 else
3874                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3875                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3876                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3877                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3878                 dmae->dst_addr_hi = 0;
3879                 dmae->len = sizeof(struct host_port_stats) >> 2;
3880                 if (bp->func_stx) {
3881                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3882                         dmae->comp_addr_hi = 0;
3883                         dmae->comp_val = 1;
3884                 } else {
3885                         dmae->comp_addr_lo =
3886                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3887                         dmae->comp_addr_hi =
3888                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3889                         dmae->comp_val = DMAE_COMP_VAL;
3890
3891                         *stats_comp = 0;
3892                 }
3893         }
3894
3895         if (bp->func_stx) {
3896
3897                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3898                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3899                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3900                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3901                 dmae->dst_addr_lo = bp->func_stx >> 2;
3902                 dmae->dst_addr_hi = 0;
3903                 dmae->len = sizeof(struct host_func_stats) >> 2;
3904                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3905                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3906                 dmae->comp_val = DMAE_COMP_VAL;
3907
3908                 *stats_comp = 0;
3909         }
3910 }
3911
3912 static void bnx2x_stats_stop(struct bnx2x *bp)
3913 {
3914         int update = 0;
3915
3916         bnx2x_stats_comp(bp);
3917
3918         if (bp->port.pmf)
3919                 update = (bnx2x_hw_stats_update(bp) == 0);
3920
3921         update |= (bnx2x_storm_stats_update(bp) == 0);
3922
3923         if (update) {
3924                 bnx2x_net_stats_update(bp);
3925
3926                 if (bp->port.pmf)
3927                         bnx2x_port_stats_stop(bp);
3928
3929                 bnx2x_hw_stats_post(bp);
3930                 bnx2x_stats_comp(bp);
3931         }
3932 }
3933
3934 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3935 {
3936 }
3937
3938 static const struct {
3939         void (*action)(struct bnx2x *bp);
3940         enum bnx2x_stats_state next_state;
3941 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3942 /* state        event   */
3943 {
3944 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3945 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
3946 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3947 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3948 },
3949 {
3950 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
3951 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
3952 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
3953 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
3954 }
3955 };
3956
3957 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3958 {
3959         enum bnx2x_stats_state state = bp->stats_state;
3960
3961         bnx2x_stats_stm[state][event].action(bp);
3962         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3963
3964         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3965                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3966                    state, event, bp->stats_state);
3967 }
3968
3969 static void bnx2x_timer(unsigned long data)
3970 {
3971         struct bnx2x *bp = (struct bnx2x *) data;
3972
3973         if (!netif_running(bp->dev))
3974                 return;
3975
3976         if (atomic_read(&bp->intr_sem) != 0)
3977                 goto timer_restart;
3978
3979         if (poll) {
3980                 struct bnx2x_fastpath *fp = &bp->fp[0];
3981                 int rc;
3982
3983                 bnx2x_tx_int(fp, 1000);
3984                 rc = bnx2x_rx_int(fp, 1000);
3985         }
3986
3987         if (!BP_NOMCP(bp)) {
3988                 int func = BP_FUNC(bp);
3989                 u32 drv_pulse;
3990                 u32 mcp_pulse;
3991
3992                 ++bp->fw_drv_pulse_wr_seq;
3993                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3994                 /* TBD - add SYSTEM_TIME */
3995                 drv_pulse = bp->fw_drv_pulse_wr_seq;
3996                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
3997
3998                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
3999                              MCP_PULSE_SEQ_MASK);
4000                 /* The delta between driver pulse and mcp response
4001                  * should be 1 (before mcp response) or 0 (after mcp response)
4002                  */
4003                 if ((drv_pulse != mcp_pulse) &&
4004                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4005                         /* someone lost a heartbeat... */
4006                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4007                                   drv_pulse, mcp_pulse);
4008                 }
4009         }
4010
4011         if ((bp->state == BNX2X_STATE_OPEN) ||
4012             (bp->state == BNX2X_STATE_DISABLED))
4013                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4014
4015 timer_restart:
4016         mod_timer(&bp->timer, jiffies + bp->current_interval);
4017 }
4018
4019 /* end of Statistics */
4020
4021 /* nic init */
4022
4023 /*
4024  * nic init service functions
4025  */
4026
4027 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4028 {
4029         int port = BP_PORT(bp);
4030
4031         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4032                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4033                         sizeof(struct ustorm_status_block)/4);
4034         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4035                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4036                         sizeof(struct cstorm_status_block)/4);
4037 }
4038
4039 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4040                           dma_addr_t mapping, int sb_id)
4041 {
4042         int port = BP_PORT(bp);
4043         int func = BP_FUNC(bp);
4044         int index;
4045         u64 section;
4046
4047         /* USTORM */
4048         section = ((u64)mapping) + offsetof(struct host_status_block,
4049                                             u_status_block);
4050         sb->u_status_block.status_block_id = sb_id;
4051
4052         REG_WR(bp, BAR_USTRORM_INTMEM +
4053                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4054         REG_WR(bp, BAR_USTRORM_INTMEM +
4055                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4056                U64_HI(section));
4057         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4058                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4059
4060         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4061                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4062                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4063
4064         /* CSTORM */
4065         section = ((u64)mapping) + offsetof(struct host_status_block,
4066                                             c_status_block);
4067         sb->c_status_block.status_block_id = sb_id;
4068
4069         REG_WR(bp, BAR_CSTRORM_INTMEM +
4070                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4071         REG_WR(bp, BAR_CSTRORM_INTMEM +
4072                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4073                U64_HI(section));
4074         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4075                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4076
4077         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4078                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4079                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4080
4081         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4082 }
4083
4084 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4085 {
4086         int func = BP_FUNC(bp);
4087
4088         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4089                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4090                         sizeof(struct ustorm_def_status_block)/4);
4091         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4092                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4093                         sizeof(struct cstorm_def_status_block)/4);
4094         bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4095                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4096                         sizeof(struct xstorm_def_status_block)/4);
4097         bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4098                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4099                         sizeof(struct tstorm_def_status_block)/4);
4100 }
4101
4102 static void bnx2x_init_def_sb(struct bnx2x *bp,
4103                               struct host_def_status_block *def_sb,
4104                               dma_addr_t mapping, int sb_id)
4105 {
4106         int port = BP_PORT(bp);
4107         int func = BP_FUNC(bp);
4108         int index, val, reg_offset;
4109         u64 section;
4110
4111         /* ATTN */
4112         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4113                                             atten_status_block);
4114         def_sb->atten_status_block.status_block_id = sb_id;
4115
4116         bp->attn_state = 0;
4117
4118         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4119                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4120
4121         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4122                 bp->attn_group[index].sig[0] = REG_RD(bp,
4123                                                      reg_offset + 0x10*index);
4124                 bp->attn_group[index].sig[1] = REG_RD(bp,
4125                                                reg_offset + 0x4 + 0x10*index);
4126                 bp->attn_group[index].sig[2] = REG_RD(bp,
4127                                                reg_offset + 0x8 + 0x10*index);
4128                 bp->attn_group[index].sig[3] = REG_RD(bp,
4129                                                reg_offset + 0xc + 0x10*index);
4130         }
4131
4132         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4133                              HC_REG_ATTN_MSG0_ADDR_L);
4134
4135         REG_WR(bp, reg_offset, U64_LO(section));
4136         REG_WR(bp, reg_offset + 4, U64_HI(section));
4137
4138         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4139
4140         val = REG_RD(bp, reg_offset);
4141         val |= sb_id;
4142         REG_WR(bp, reg_offset, val);
4143
4144         /* USTORM */
4145         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4146                                             u_def_status_block);
4147         def_sb->u_def_status_block.status_block_id = sb_id;
4148
4149         REG_WR(bp, BAR_USTRORM_INTMEM +
4150                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4151         REG_WR(bp, BAR_USTRORM_INTMEM +
4152                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4153                U64_HI(section));
4154         REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4155                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4156
4157         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4158                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4159                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4160
4161         /* CSTORM */
4162         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4163                                             c_def_status_block);
4164         def_sb->c_def_status_block.status_block_id = sb_id;
4165
4166         REG_WR(bp, BAR_CSTRORM_INTMEM +
4167                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4168         REG_WR(bp, BAR_CSTRORM_INTMEM +
4169                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4170                U64_HI(section));
4171         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4172                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4173
4174         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4175                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4176                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4177
4178         /* TSTORM */
4179         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4180                                             t_def_status_block);
4181         def_sb->t_def_status_block.status_block_id = sb_id;
4182
4183         REG_WR(bp, BAR_TSTRORM_INTMEM +
4184                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4185         REG_WR(bp, BAR_TSTRORM_INTMEM +
4186                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4187                U64_HI(section));
4188         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4189                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4190
4191         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4192                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4193                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4194
4195         /* XSTORM */
4196         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4197                                             x_def_status_block);
4198         def_sb->x_def_status_block.status_block_id = sb_id;
4199
4200         REG_WR(bp, BAR_XSTRORM_INTMEM +
4201                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4202         REG_WR(bp, BAR_XSTRORM_INTMEM +
4203                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4204                U64_HI(section));
4205         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4206                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4207
4208         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4209                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4210                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4211
4212         bp->stats_pending = 0;
4213         bp->set_mac_pending = 0;
4214
4215         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4216 }
4217
4218 static void bnx2x_update_coalesce(struct bnx2x *bp)
4219 {
4220         int port = BP_PORT(bp);
4221         int i;
4222
4223         for_each_queue(bp, i) {
4224                 int sb_id = bp->fp[i].sb_id;
4225
4226                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4227                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4228                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4229                                                     U_SB_ETH_RX_CQ_INDEX),
4230                         bp->rx_ticks/12);
4231                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4232                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4233                                                      U_SB_ETH_RX_CQ_INDEX),
4234                          bp->rx_ticks ? 0 : 1);
4235                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4236                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4237                                                      U_SB_ETH_RX_BD_INDEX),
4238                          bp->rx_ticks ? 0 : 1);
4239
4240                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4241                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4242                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4243                                                     C_SB_ETH_TX_CQ_INDEX),
4244                         bp->tx_ticks/12);
4245                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4246                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4247                                                      C_SB_ETH_TX_CQ_INDEX),
4248                          bp->tx_ticks ? 0 : 1);
4249         }
4250 }
4251
4252 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4253                                        struct bnx2x_fastpath *fp, int last)
4254 {
4255         int i;
4256
4257         for (i = 0; i < last; i++) {
4258                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4259                 struct sk_buff *skb = rx_buf->skb;
4260
4261                 if (skb == NULL) {
4262                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4263                         continue;
4264                 }
4265
4266                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4267                         pci_unmap_single(bp->pdev,
4268                                          pci_unmap_addr(rx_buf, mapping),
4269                                          bp->rx_buf_size,
4270                                          PCI_DMA_FROMDEVICE);
4271
4272                 dev_kfree_skb(skb);
4273                 rx_buf->skb = NULL;
4274         }
4275 }
4276
4277 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4278 {
4279         int func = BP_FUNC(bp);
4280         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4281                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4282         u16 ring_prod, cqe_ring_prod;
4283         int i, j;
4284
4285         bp->rx_buf_size = bp->dev->mtu;
4286         bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4287                 BCM_RX_ETH_PAYLOAD_ALIGN;
4288
4289         if (bp->flags & TPA_ENABLE_FLAG) {
4290                 DP(NETIF_MSG_IFUP,
4291                    "rx_buf_size %d  effective_mtu %d\n",
4292                    bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
4293
4294                 for_each_queue(bp, j) {
4295                         struct bnx2x_fastpath *fp = &bp->fp[j];
4296
4297                         for (i = 0; i < max_agg_queues; i++) {
4298                                 fp->tpa_pool[i].skb =
4299                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4300                                 if (!fp->tpa_pool[i].skb) {
4301                                         BNX2X_ERR("Failed to allocate TPA "
4302                                                   "skb pool for queue[%d] - "
4303                                                   "disabling TPA on this "
4304                                                   "queue!\n", j);
4305                                         bnx2x_free_tpa_pool(bp, fp, i);
4306                                         fp->disable_tpa = 1;
4307                                         break;
4308                                 }
4309                                 pci_unmap_addr_set((struct sw_rx_bd *)
4310                                                         &bp->fp->tpa_pool[i],
4311                                                    mapping, 0);
4312                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4313                         }
4314                 }
4315         }
4316
4317         for_each_queue(bp, j) {
4318                 struct bnx2x_fastpath *fp = &bp->fp[j];
4319
4320                 fp->rx_bd_cons = 0;
4321                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4322                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4323
4324                 /* "next page" elements initialization */
4325                 /* SGE ring */
4326                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4327                         struct eth_rx_sge *sge;
4328
4329                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4330                         sge->addr_hi =
4331                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4332                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4333                         sge->addr_lo =
4334                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4335                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4336                 }
4337
4338                 bnx2x_init_sge_ring_bit_mask(fp);
4339
4340                 /* RX BD ring */
4341                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4342                         struct eth_rx_bd *rx_bd;
4343
4344                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4345                         rx_bd->addr_hi =
4346                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4347                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4348                         rx_bd->addr_lo =
4349                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4350                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4351                 }
4352
4353                 /* CQ ring */
4354                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4355                         struct eth_rx_cqe_next_page *nextpg;
4356
4357                         nextpg = (struct eth_rx_cqe_next_page *)
4358                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4359                         nextpg->addr_hi =
4360                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4361                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4362                         nextpg->addr_lo =
4363                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4364                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4365                 }
4366
4367                 /* Allocate SGEs and initialize the ring elements */
4368                 for (i = 0, ring_prod = 0;
4369                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4370
4371                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4372                                 BNX2X_ERR("was only able to allocate "
4373                                           "%d rx sges\n", i);
4374                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4375                                 /* Cleanup already allocated elements */
4376                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4377                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4378                                 fp->disable_tpa = 1;
4379                                 ring_prod = 0;
4380                                 break;
4381                         }
4382                         ring_prod = NEXT_SGE_IDX(ring_prod);
4383                 }
4384                 fp->rx_sge_prod = ring_prod;
4385
4386                 /* Allocate BDs and initialize BD ring */
4387                 fp->rx_comp_cons = 0;
4388                 cqe_ring_prod = ring_prod = 0;
4389                 for (i = 0; i < bp->rx_ring_size; i++) {
4390                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4391                                 BNX2X_ERR("was only able to allocate "
4392                                           "%d rx skbs\n", i);
4393                                 bp->eth_stats.rx_skb_alloc_failed++;
4394                                 break;
4395                         }
4396                         ring_prod = NEXT_RX_IDX(ring_prod);
4397                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4398                         WARN_ON(ring_prod <= i);
4399                 }
4400
4401                 fp->rx_bd_prod = ring_prod;
4402                 /* must not have more available CQEs than BDs */
4403                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4404                                        cqe_ring_prod);
4405                 fp->rx_pkt = fp->rx_calls = 0;
4406
4407                 /* Warning!
4408                  * this will generate an interrupt (to the TSTORM)
4409                  * must only be done after chip is initialized
4410                  */
4411                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4412                                      fp->rx_sge_prod);
4413                 if (j != 0)
4414                         continue;
4415
4416                 REG_WR(bp, BAR_USTRORM_INTMEM +
4417                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4418                        U64_LO(fp->rx_comp_mapping));
4419                 REG_WR(bp, BAR_USTRORM_INTMEM +
4420                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4421                        U64_HI(fp->rx_comp_mapping));
4422         }
4423 }
4424
4425 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4426 {
4427         int i, j;
4428
4429         for_each_queue(bp, j) {
4430                 struct bnx2x_fastpath *fp = &bp->fp[j];
4431
4432                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4433                         struct eth_tx_bd *tx_bd =
4434                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4435
4436                         tx_bd->addr_hi =
4437                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4438                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4439                         tx_bd->addr_lo =
4440                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4441                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4442                 }
4443
4444                 fp->tx_pkt_prod = 0;
4445                 fp->tx_pkt_cons = 0;
4446                 fp->tx_bd_prod = 0;
4447                 fp->tx_bd_cons = 0;
4448                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4449                 fp->tx_pkt = 0;
4450         }
4451 }
4452
4453 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4454 {
4455         int func = BP_FUNC(bp);
4456
4457         spin_lock_init(&bp->spq_lock);
4458
4459         bp->spq_left = MAX_SPQ_PENDING;
4460         bp->spq_prod_idx = 0;
4461         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4462         bp->spq_prod_bd = bp->spq;
4463         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4464
4465         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4466                U64_LO(bp->spq_mapping));
4467         REG_WR(bp,
4468                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4469                U64_HI(bp->spq_mapping));
4470
4471         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4472                bp->spq_prod_idx);
4473 }
4474
4475 static void bnx2x_init_context(struct bnx2x *bp)
4476 {
4477         int i;
4478
4479         for_each_queue(bp, i) {
4480                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4481                 struct bnx2x_fastpath *fp = &bp->fp[i];
4482                 u8 sb_id = FP_SB_ID(fp);
4483
4484                 context->xstorm_st_context.tx_bd_page_base_hi =
4485                                                 U64_HI(fp->tx_desc_mapping);
4486                 context->xstorm_st_context.tx_bd_page_base_lo =
4487                                                 U64_LO(fp->tx_desc_mapping);
4488                 context->xstorm_st_context.db_data_addr_hi =
4489                                                 U64_HI(fp->tx_prods_mapping);
4490                 context->xstorm_st_context.db_data_addr_lo =
4491                                                 U64_LO(fp->tx_prods_mapping);
4492                 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4493                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4494
4495                 context->ustorm_st_context.common.sb_index_numbers =
4496                                                 BNX2X_RX_SB_INDEX_NUM;
4497                 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4498                 context->ustorm_st_context.common.status_block_id = sb_id;
4499                 context->ustorm_st_context.common.flags =
4500                         USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4501                 context->ustorm_st_context.common.mc_alignment_size =
4502                         BCM_RX_ETH_PAYLOAD_ALIGN;
4503                 context->ustorm_st_context.common.bd_buff_size =
4504                                                 bp->rx_buf_size;
4505                 context->ustorm_st_context.common.bd_page_base_hi =
4506                                                 U64_HI(fp->rx_desc_mapping);
4507                 context->ustorm_st_context.common.bd_page_base_lo =
4508                                                 U64_LO(fp->rx_desc_mapping);
4509                 if (!fp->disable_tpa) {
4510                         context->ustorm_st_context.common.flags |=
4511                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4512                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4513                         context->ustorm_st_context.common.sge_buff_size =
4514                                         (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4515                         context->ustorm_st_context.common.sge_page_base_hi =
4516                                                 U64_HI(fp->rx_sge_mapping);
4517                         context->ustorm_st_context.common.sge_page_base_lo =
4518                                                 U64_LO(fp->rx_sge_mapping);
4519                 }
4520
4521                 context->cstorm_st_context.sb_index_number =
4522                                                 C_SB_ETH_TX_CQ_INDEX;
4523                 context->cstorm_st_context.status_block_id = sb_id;
4524
4525                 context->xstorm_ag_context.cdu_reserved =
4526                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4527                                                CDU_REGION_NUMBER_XCM_AG,
4528                                                ETH_CONNECTION_TYPE);
4529                 context->ustorm_ag_context.cdu_usage =
4530                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4531                                                CDU_REGION_NUMBER_UCM_AG,
4532                                                ETH_CONNECTION_TYPE);
4533         }
4534 }
4535
4536 static void bnx2x_init_ind_table(struct bnx2x *bp)
4537 {
4538         int func = BP_FUNC(bp);
4539         int i;
4540
4541         if (!is_multi(bp))
4542                 return;
4543
4544         DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4545         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4546                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4547                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4548                         BP_CL_ID(bp) + (i % bp->num_queues));
4549 }
4550
4551 static void bnx2x_set_client_config(struct bnx2x *bp)
4552 {
4553         struct tstorm_eth_client_config tstorm_client = {0};
4554         int port = BP_PORT(bp);
4555         int i;
4556
4557         tstorm_client.mtu = bp->dev->mtu;
4558         tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4559         tstorm_client.config_flags =
4560                                 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4561 #ifdef BCM_VLAN
4562         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4563                 tstorm_client.config_flags |=
4564                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4565                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4566         }
4567 #endif
4568
4569         if (bp->flags & TPA_ENABLE_FLAG) {
4570                 tstorm_client.max_sges_for_packet =
4571                         SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4572                 tstorm_client.max_sges_for_packet =
4573                         ((tstorm_client.max_sges_for_packet +
4574                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4575                         PAGES_PER_SGE_SHIFT;
4576
4577                 tstorm_client.config_flags |=
4578                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4579         }
4580
4581         for_each_queue(bp, i) {
4582                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4583                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4584                        ((u32 *)&tstorm_client)[0]);
4585                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4586                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4587                        ((u32 *)&tstorm_client)[1]);
4588         }
4589
4590         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4591            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4592 }
4593
4594 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4595 {
4596         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4597         int mode = bp->rx_mode;
4598         int mask = (1 << BP_L_ID(bp));
4599         int func = BP_FUNC(bp);
4600         int i;
4601
4602         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
4603
4604         switch (mode) {
4605         case BNX2X_RX_MODE_NONE: /* no Rx */
4606                 tstorm_mac_filter.ucast_drop_all = mask;
4607                 tstorm_mac_filter.mcast_drop_all = mask;
4608                 tstorm_mac_filter.bcast_drop_all = mask;
4609                 break;
4610         case BNX2X_RX_MODE_NORMAL:
4611                 tstorm_mac_filter.bcast_accept_all = mask;
4612                 break;
4613         case BNX2X_RX_MODE_ALLMULTI:
4614                 tstorm_mac_filter.mcast_accept_all = mask;
4615                 tstorm_mac_filter.bcast_accept_all = mask;
4616                 break;
4617         case BNX2X_RX_MODE_PROMISC:
4618                 tstorm_mac_filter.ucast_accept_all = mask;
4619                 tstorm_mac_filter.mcast_accept_all = mask;
4620                 tstorm_mac_filter.bcast_accept_all = mask;
4621                 break;
4622         default:
4623                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4624                 break;
4625         }
4626
4627         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4628                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4629                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4630                        ((u32 *)&tstorm_mac_filter)[i]);
4631
4632 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4633                    ((u32 *)&tstorm_mac_filter)[i]); */
4634         }
4635
4636         if (mode != BNX2X_RX_MODE_NONE)
4637                 bnx2x_set_client_config(bp);
4638 }
4639
4640 static void bnx2x_init_internal_common(struct bnx2x *bp)
4641 {
4642         int i;
4643
4644         if (bp->flags & TPA_ENABLE_FLAG) {
4645                 struct tstorm_eth_tpa_exist tpa = {0};
4646
4647                 tpa.tpa_exist = 1;
4648
4649                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4650                        ((u32 *)&tpa)[0]);
4651                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4652                        ((u32 *)&tpa)[1]);
4653         }
4654
4655         /* Zero this manually as its initialization is
4656            currently missing in the initTool */
4657         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4658                 REG_WR(bp, BAR_USTRORM_INTMEM +
4659                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4660 }
4661
4662 static void bnx2x_init_internal_port(struct bnx2x *bp)
4663 {
4664         int port = BP_PORT(bp);
4665
4666         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4667         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4668         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4669         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4670 }
4671
4672 static void bnx2x_init_internal_func(struct bnx2x *bp)
4673 {
4674         struct tstorm_eth_function_common_config tstorm_config = {0};
4675         struct stats_indication_flags stats_flags = {0};
4676         int port = BP_PORT(bp);
4677         int func = BP_FUNC(bp);
4678         int i;
4679         u16 max_agg_size;
4680
4681         if (is_multi(bp)) {
4682                 tstorm_config.config_flags = MULTI_FLAGS;
4683                 tstorm_config.rss_result_mask = MULTI_MASK;
4684         }
4685
4686         tstorm_config.leading_client_id = BP_L_ID(bp);
4687
4688         REG_WR(bp, BAR_TSTRORM_INTMEM +
4689                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4690                (*(u32 *)&tstorm_config));
4691
4692         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4693         bnx2x_set_storm_rx_mode(bp);
4694
4695         /* reset xstorm per client statistics */
4696         for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4697                 REG_WR(bp, BAR_XSTRORM_INTMEM +
4698                        XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4699                        i*4, 0);
4700         }
4701         /* reset tstorm per client statistics */
4702         for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4703                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4704                        TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4705                        i*4, 0);
4706         }
4707
4708         /* Init statistics related context */
4709         stats_flags.collect_eth = 1;
4710
4711         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4712                ((u32 *)&stats_flags)[0]);
4713         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4714                ((u32 *)&stats_flags)[1]);
4715
4716         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4717                ((u32 *)&stats_flags)[0]);
4718         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4719                ((u32 *)&stats_flags)[1]);
4720
4721         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4722                ((u32 *)&stats_flags)[0]);
4723         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4724                ((u32 *)&stats_flags)[1]);
4725
4726         REG_WR(bp, BAR_XSTRORM_INTMEM +
4727                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4728                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4729         REG_WR(bp, BAR_XSTRORM_INTMEM +
4730                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4731                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4732
4733         REG_WR(bp, BAR_TSTRORM_INTMEM +
4734                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4735                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4736         REG_WR(bp, BAR_TSTRORM_INTMEM +
4737                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4738                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4739
4740         if (CHIP_IS_E1H(bp)) {
4741                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4742                         IS_E1HMF(bp));
4743                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4744                         IS_E1HMF(bp));
4745                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4746                         IS_E1HMF(bp));
4747                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4748                         IS_E1HMF(bp));
4749
4750                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4751                          bp->e1hov);
4752         }
4753
4754         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4755         max_agg_size =
4756                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4757                           SGE_PAGE_SIZE * PAGES_PER_SGE),
4758                     (u32)0xffff);
4759         for_each_queue(bp, i) {
4760                 struct bnx2x_fastpath *fp = &bp->fp[i];
4761
4762                 REG_WR(bp, BAR_USTRORM_INTMEM +
4763                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4764                        U64_LO(fp->rx_comp_mapping));
4765                 REG_WR(bp, BAR_USTRORM_INTMEM +
4766                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4767                        U64_HI(fp->rx_comp_mapping));
4768
4769                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4770                          USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4771                          max_agg_size);
4772         }
4773 }
4774
4775 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4776 {
4777         switch (load_code) {
4778         case FW_MSG_CODE_DRV_LOAD_COMMON:
4779                 bnx2x_init_internal_common(bp);
4780                 /* no break */
4781
4782         case FW_MSG_CODE_DRV_LOAD_PORT:
4783                 bnx2x_init_internal_port(bp);
4784                 /* no break */
4785
4786         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4787                 bnx2x_init_internal_func(bp);
4788                 break;
4789
4790         default:
4791                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4792                 break;
4793         }
4794 }
4795
4796 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4797 {
4798         int i;
4799
4800         for_each_queue(bp, i) {
4801                 struct bnx2x_fastpath *fp = &bp->fp[i];
4802
4803                 fp->bp = bp;
4804                 fp->state = BNX2X_FP_STATE_CLOSED;
4805                 fp->index = i;
4806                 fp->cl_id = BP_L_ID(bp) + i;
4807                 fp->sb_id = fp->cl_id;
4808                 DP(NETIF_MSG_IFUP,
4809                    "bnx2x_init_sb(%p,%p) index %d  cl_id %d  sb %d\n",
4810                    bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4811                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4812                               FP_SB_ID(fp));
4813                 bnx2x_update_fpsb_idx(fp);
4814         }
4815
4816         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4817                           DEF_SB_ID);
4818         bnx2x_update_dsb_idx(bp);
4819         bnx2x_update_coalesce(bp);
4820         bnx2x_init_rx_rings(bp);
4821         bnx2x_init_tx_ring(bp);
4822         bnx2x_init_sp_ring(bp);
4823         bnx2x_init_context(bp);
4824         bnx2x_init_internal(bp, load_code);
4825         bnx2x_init_ind_table(bp);
4826         bnx2x_stats_init(bp);
4827
4828         /* At this point, we are ready for interrupts */
4829         atomic_set(&bp->intr_sem, 0);
4830
4831         /* flush all before enabling interrupts */
4832         mb();
4833         mmiowb();
4834
4835         bnx2x_int_enable(bp);
4836 }
4837
4838 /* end of nic init */
4839
4840 /*
4841  * gzip service functions
4842  */
4843
4844 static int bnx2x_gunzip_init(struct bnx2x *bp)
4845 {
4846         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4847                                               &bp->gunzip_mapping);
4848         if (bp->gunzip_buf  == NULL)
4849                 goto gunzip_nomem1;
4850
4851         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4852         if (bp->strm  == NULL)
4853                 goto gunzip_nomem2;
4854
4855         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4856                                       GFP_KERNEL);
4857         if (bp->strm->workspace == NULL)
4858                 goto gunzip_nomem3;
4859
4860         return 0;
4861
4862 gunzip_nomem3:
4863         kfree(bp->strm);
4864         bp->strm = NULL;
4865
4866 gunzip_nomem2:
4867         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4868                             bp->gunzip_mapping);
4869         bp->gunzip_buf = NULL;
4870
4871 gunzip_nomem1:
4872         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4873                " un-compression\n", bp->dev->name);
4874         return -ENOMEM;
4875 }
4876
4877 static void bnx2x_gunzip_end(struct bnx2x *bp)
4878 {
4879         kfree(bp->strm->workspace);
4880
4881         kfree(bp->strm);
4882         bp->strm = NULL;
4883
4884         if (bp->gunzip_buf) {
4885                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4886                                     bp->gunzip_mapping);
4887                 bp->gunzip_buf = NULL;
4888         }
4889 }
4890
4891 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4892 {
4893         int n, rc;
4894
4895         /* check gzip header */
4896         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4897                 return -EINVAL;
4898
4899         n = 10;
4900
4901 #define FNAME                           0x8
4902
4903         if (zbuf[3] & FNAME)
4904                 while ((zbuf[n++] != 0) && (n < len));
4905
4906         bp->strm->next_in = zbuf + n;
4907         bp->strm->avail_in = len - n;
4908         bp->strm->next_out = bp->gunzip_buf;
4909         bp->strm->avail_out = FW_BUF_SIZE;
4910
4911         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4912         if (rc != Z_OK)
4913                 return rc;
4914
4915         rc = zlib_inflate(bp->strm, Z_FINISH);
4916         if ((rc != Z_OK) && (rc != Z_STREAM_END))
4917                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4918                        bp->dev->name, bp->strm->msg);
4919
4920         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4921         if (bp->gunzip_outlen & 0x3)
4922                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4923                                     " gunzip_outlen (%d) not aligned\n",
4924                        bp->dev->name, bp->gunzip_outlen);
4925         bp->gunzip_outlen >>= 2;
4926
4927         zlib_inflateEnd(bp->strm);
4928
4929         if (rc == Z_STREAM_END)
4930                 return 0;
4931
4932         return rc;
4933 }
4934
4935 /* nic load/unload */
4936
4937 /*
4938  * General service functions
4939  */
4940
4941 /* send a NIG loopback debug packet */
4942 static void bnx2x_lb_pckt(struct bnx2x *bp)
4943 {
4944         u32 wb_write[3];
4945
4946         /* Ethernet source and destination addresses */
4947         wb_write[0] = 0x55555555;
4948         wb_write[1] = 0x55555555;
4949         wb_write[2] = 0x20;             /* SOP */
4950         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4951
4952         /* NON-IP protocol */
4953         wb_write[0] = 0x09000000;
4954         wb_write[1] = 0x55555555;
4955         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
4956         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4957 }
4958
4959 /* some of the internal memories
4960  * are not directly readable from the driver
4961  * to test them we send debug packets
4962  */
4963 static int bnx2x_int_mem_test(struct bnx2x *bp)
4964 {
4965         int factor;
4966         int count, i;
4967         u32 val = 0;
4968
4969         if (CHIP_REV_IS_FPGA(bp))
4970                 factor = 120;
4971         else if (CHIP_REV_IS_EMUL(bp))
4972                 factor = 200;
4973         else
4974                 factor = 1;
4975
4976         DP(NETIF_MSG_HW, "start part1\n");
4977
4978         /* Disable inputs of parser neighbor blocks */
4979         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4980         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4981         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4982         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4983
4984         /*  Write 0 to parser credits for CFC search request */
4985         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4986
4987         /* send Ethernet packet */
4988         bnx2x_lb_pckt(bp);
4989
4990         /* TODO do i reset NIG statistic? */
4991         /* Wait until NIG register shows 1 packet of size 0x10 */
4992         count = 1000 * factor;
4993         while (count) {
4994
4995                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4996                 val = *bnx2x_sp(bp, wb_data[0]);
4997                 if (val == 0x10)
4998                         break;
4999
5000                 msleep(10);
5001                 count--;
5002         }
5003         if (val != 0x10) {
5004                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5005                 return -1;
5006         }
5007
5008         /* Wait until PRS register shows 1 packet */
5009         count = 1000 * factor;
5010         while (count) {
5011                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5012                 if (val == 1)
5013                         break;
5014
5015                 msleep(10);
5016                 count--;
5017         }
5018         if (val != 0x1) {
5019                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5020                 return -2;
5021         }
5022
5023         /* Reset and init BRB, PRS */
5024         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5025         msleep(50);
5026         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5027         msleep(50);
5028         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5029         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5030
5031         DP(NETIF_MSG_HW, "part2\n");
5032
5033         /* Disable inputs of parser neighbor blocks */
5034         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5035         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5036         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5037         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5038
5039         /* Write 0 to parser credits for CFC search request */
5040         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5041
5042         /* send 10 Ethernet packets */
5043         for (i = 0; i < 10; i++)
5044                 bnx2x_lb_pckt(bp);
5045
5046         /* Wait until NIG register shows 10 + 1
5047            packets of size 11*0x10 = 0xb0 */
5048         count = 1000 * factor;
5049         while (count) {
5050
5051                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5052                 val = *bnx2x_sp(bp, wb_data[0]);
5053                 if (val == 0xb0)
5054                         break;
5055
5056                 msleep(10);
5057                 count--;
5058         }
5059         if (val != 0xb0) {
5060                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5061                 return -3;
5062         }
5063
5064         /* Wait until PRS register shows 2 packets */
5065         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5066         if (val != 2)
5067                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5068
5069         /* Write 1 to parser credits for CFC search request */
5070         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5071
5072         /* Wait until PRS register shows 3 packets */
5073         msleep(10 * factor);
5074         /* Wait until NIG register shows 1 packet of size 0x10 */
5075         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5076         if (val != 3)
5077                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5078
5079         /* clear NIG EOP FIFO */
5080         for (i = 0; i < 11; i++)
5081                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5082         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5083         if (val != 1) {
5084                 BNX2X_ERR("clear of NIG failed\n");
5085                 return -4;
5086         }
5087
5088         /* Reset and init BRB, PRS, NIG */
5089         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5090         msleep(50);
5091         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5092         msleep(50);
5093         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5094         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5095 #ifndef BCM_ISCSI
5096         /* set NIC mode */
5097         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5098 #endif
5099
5100         /* Enable inputs of parser neighbor blocks */
5101         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5102         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5103         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5104         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5105
5106         DP(NETIF_MSG_HW, "done\n");
5107
5108         return 0; /* OK */
5109 }
5110
5111 static void enable_blocks_attention(struct bnx2x *bp)
5112 {
5113         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5114         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5115         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5116         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5117         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5118         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5119         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5120         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5121         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5122 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5123 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5124         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5125         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5126         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5127 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5128 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5129         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5130         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5131         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5132         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5133 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5134 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5135         if (CHIP_REV_IS_FPGA(bp))
5136                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5137         else
5138                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5139         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5140         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5141         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5142 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5143 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5144         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5145         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5146 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5147         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5148 }
5149
5150
5151 static int bnx2x_init_common(struct bnx2x *bp)
5152 {
5153         u32 val, i;
5154
5155         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5156
5157         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5158         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5159
5160         bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5161         if (CHIP_IS_E1H(bp))
5162                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5163
5164         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5165         msleep(30);
5166         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5167
5168         bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5169         if (CHIP_IS_E1(bp)) {
5170                 /* enable HW interrupt from PXP on USDM overflow
5171                    bit 16 on INT_MASK_0 */
5172                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5173         }
5174
5175         bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5176         bnx2x_init_pxp(bp);
5177
5178 #ifdef __BIG_ENDIAN
5179         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5180         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5181         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5182         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5183         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5184
5185 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5186         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5187         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5188         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5189         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5190 #endif
5191
5192         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5193 #ifdef BCM_ISCSI
5194         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5195         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5196         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5197 #endif
5198
5199         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5200                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5201
5202         /* let the HW do it's magic ... */
5203         msleep(100);
5204         /* finish PXP init */
5205         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5206         if (val != 1) {
5207                 BNX2X_ERR("PXP2 CFG failed\n");
5208                 return -EBUSY;
5209         }
5210         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5211         if (val != 1) {
5212                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5213                 return -EBUSY;
5214         }
5215
5216         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5217         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5218
5219         bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5220
5221         /* clean the DMAE memory */
5222         bp->dmae_ready = 1;
5223         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5224
5225         bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5226         bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5227         bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5228         bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5229
5230         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5231         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5232         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5233         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5234
5235         bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5236         /* soft reset pulse */
5237         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5238         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5239
5240 #ifdef BCM_ISCSI
5241         bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5242 #endif
5243
5244         bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5245         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5246         if (!CHIP_REV_IS_SLOW(bp)) {
5247                 /* enable hw interrupt from doorbell Q */
5248                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5249         }
5250
5251         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5252         if (CHIP_REV_IS_SLOW(bp)) {
5253                 /* fix for emulation and FPGA for no pause */
5254                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5255                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5256                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5257                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5258         }
5259
5260         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5261         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5262         /* set NIC mode */
5263         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5264         if (CHIP_IS_E1H(bp))
5265                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5266
5267         bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5268         bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5269         bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5270         bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5271
5272         if (CHIP_IS_E1H(bp)) {
5273                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5274                                 STORM_INTMEM_SIZE_E1H/2);
5275                 bnx2x_init_fill(bp,
5276                                 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5277                                 0, STORM_INTMEM_SIZE_E1H/2);
5278                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5279                                 STORM_INTMEM_SIZE_E1H/2);
5280                 bnx2x_init_fill(bp,
5281                                 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5282                                 0, STORM_INTMEM_SIZE_E1H/2);
5283                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5284                                 STORM_INTMEM_SIZE_E1H/2);
5285                 bnx2x_init_fill(bp,
5286                                 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5287                                 0, STORM_INTMEM_SIZE_E1H/2);
5288                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5289                                 STORM_INTMEM_SIZE_E1H/2);
5290                 bnx2x_init_fill(bp,
5291                                 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5292                                 0, STORM_INTMEM_SIZE_E1H/2);
5293         } else { /* E1 */
5294                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5295                                 STORM_INTMEM_SIZE_E1);
5296                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5297                                 STORM_INTMEM_SIZE_E1);
5298                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5299                                 STORM_INTMEM_SIZE_E1);
5300                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5301                                 STORM_INTMEM_SIZE_E1);
5302         }
5303
5304         bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5305         bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5306         bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5307         bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5308
5309         /* sync semi rtc */
5310         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5311                0x80000000);
5312         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5313                0x80000000);
5314
5315         bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5316         bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5317         bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5318
5319         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5320         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5321                 REG_WR(bp, i, 0xc0cac01a);
5322                 /* TODO: replace with something meaningful */
5323         }
5324         if (CHIP_IS_E1H(bp))
5325                 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5326         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5327
5328         if (sizeof(union cdu_context) != 1024)
5329                 /* we currently assume that a context is 1024 bytes */
5330                 printk(KERN_ALERT PFX "please adjust the size of"
5331                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5332
5333         bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5334         val = (4 << 24) + (0 << 12) + 1024;
5335         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5336         if (CHIP_IS_E1(bp)) {
5337                 /* !!! fix pxp client crdit until excel update */
5338                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5339                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5340         }
5341
5342         bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5343         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5344
5345         bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5346         bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5347
5348         /* PXPCS COMMON comes here */
5349         /* Reset PCIE errors for debug */
5350         REG_WR(bp, 0x2814, 0xffffffff);
5351         REG_WR(bp, 0x3820, 0xffffffff);
5352
5353         /* EMAC0 COMMON comes here */
5354         /* EMAC1 COMMON comes here */
5355         /* DBU COMMON comes here */
5356         /* DBG COMMON comes here */
5357
5358         bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5359         if (CHIP_IS_E1H(bp)) {
5360                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5361                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5362         }
5363
5364         if (CHIP_REV_IS_SLOW(bp))
5365                 msleep(200);
5366
5367         /* finish CFC init */
5368         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5369         if (val != 1) {
5370                 BNX2X_ERR("CFC LL_INIT failed\n");
5371                 return -EBUSY;
5372         }
5373         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5374         if (val != 1) {
5375                 BNX2X_ERR("CFC AC_INIT failed\n");
5376                 return -EBUSY;
5377         }
5378         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5379         if (val != 1) {
5380                 BNX2X_ERR("CFC CAM_INIT failed\n");
5381                 return -EBUSY;
5382         }
5383         REG_WR(bp, CFC_REG_DEBUG0, 0);
5384
5385         /* read NIG statistic
5386            to see if this is our first up since powerup */
5387         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5388         val = *bnx2x_sp(bp, wb_data[0]);
5389
5390         /* do internal memory self test */
5391         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5392                 BNX2X_ERR("internal mem self test failed\n");
5393                 return -EBUSY;
5394         }
5395
5396         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5397         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5398         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5399                 /* Fan failure is indicated by SPIO 5 */
5400                 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5401                                MISC_REGISTERS_SPIO_INPUT_HI_Z);
5402
5403                 /* set to active low mode */
5404                 val = REG_RD(bp, MISC_REG_SPIO_INT);
5405                 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5406                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5407                 REG_WR(bp, MISC_REG_SPIO_INT, val);
5408
5409                 /* enable interrupt to signal the IGU */
5410                 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5411                 val |= (1 << MISC_REGISTERS_SPIO_5);
5412                 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5413                 break;
5414
5415         default:
5416                 break;
5417         }
5418
5419         /* clear PXP2 attentions */
5420         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5421
5422         enable_blocks_attention(bp);
5423
5424         if (!BP_NOMCP(bp)) {
5425                 bnx2x_acquire_phy_lock(bp);
5426                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5427                 bnx2x_release_phy_lock(bp);
5428         } else
5429                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5430
5431         return 0;
5432 }
5433
5434 static int bnx2x_init_port(struct bnx2x *bp)
5435 {
5436         int port = BP_PORT(bp);
5437         u32 val;
5438
5439         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
5440
5441         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5442
5443         /* Port PXP comes here */
5444         /* Port PXP2 comes here */
5445 #ifdef BCM_ISCSI
5446         /* Port0  1
5447          * Port1  385 */
5448         i++;
5449         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5450         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5451         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5452         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5453
5454         /* Port0  2
5455          * Port1  386 */
5456         i++;
5457         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5458         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5459         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5460         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5461
5462         /* Port0  3
5463          * Port1  387 */
5464         i++;
5465         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5466         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5467         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5468         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5469 #endif
5470         /* Port CMs come here */
5471
5472         /* Port QM comes here */
5473 #ifdef BCM_ISCSI
5474         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5475         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5476
5477         bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5478                              func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5479 #endif
5480         /* Port DQ comes here */
5481         /* Port BRB1 comes here */
5482         /* Port PRS comes here */
5483         /* Port TSDM comes here */
5484         /* Port CSDM comes here */
5485         /* Port USDM comes here */
5486         /* Port XSDM comes here */
5487         bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5488                              port ? TSEM_PORT1_END : TSEM_PORT0_END);
5489         bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5490                              port ? USEM_PORT1_END : USEM_PORT0_END);
5491         bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5492                              port ? CSEM_PORT1_END : CSEM_PORT0_END);
5493         bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5494                              port ? XSEM_PORT1_END : XSEM_PORT0_END);
5495         /* Port UPB comes here */
5496         /* Port XPB comes here */
5497
5498         bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5499                              port ? PBF_PORT1_END : PBF_PORT0_END);
5500
5501         /* configure PBF to work without PAUSE mtu 9000 */
5502         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5503
5504         /* update threshold */
5505         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5506         /* update init credit */
5507         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5508
5509         /* probe changes */
5510         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5511         msleep(5);
5512         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5513
5514 #ifdef BCM_ISCSI
5515         /* tell the searcher where the T2 table is */
5516         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5517
5518         wb_write[0] = U64_LO(bp->t2_mapping);
5519         wb_write[1] = U64_HI(bp->t2_mapping);
5520         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5521         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5522         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5523         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5524
5525         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5526         /* Port SRCH comes here */
5527 #endif
5528         /* Port CDU comes here */
5529         /* Port CFC comes here */
5530
5531         if (CHIP_IS_E1(bp)) {
5532                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5533                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5534         }
5535         bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5536                              port ? HC_PORT1_END : HC_PORT0_END);
5537
5538         bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5539                                     MISC_AEU_PORT0_START,
5540                              port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5541         /* init aeu_mask_attn_func_0/1:
5542          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5543          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5544          *             bits 4-7 are used for "per vn group attention" */
5545         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5546                (IS_E1HMF(bp) ? 0xF7 : 0x7));
5547
5548         /* Port PXPCS comes here */
5549         /* Port EMAC0 comes here */
5550         /* Port EMAC1 comes here */
5551         /* Port DBU comes here */
5552         /* Port DBG comes here */
5553         bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5554                              port ? NIG_PORT1_END : NIG_PORT0_END);
5555
5556         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5557
5558         if (CHIP_IS_E1H(bp)) {
5559                 u32 wsum;
5560                 struct cmng_struct_per_port m_cmng_port;
5561                 int vn;
5562
5563                 /* 0x2 disable e1hov, 0x1 enable */
5564                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5565                        (IS_E1HMF(bp) ? 0x1 : 0x2));
5566
5567                 /* Init RATE SHAPING and FAIRNESS contexts.
5568                    Initialize as if there is 10G link. */
5569                 wsum = bnx2x_calc_vn_wsum(bp);
5570                 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5571                 if (IS_E1HMF(bp))
5572                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
5573                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
5574                                         wsum, 10000, &m_cmng_port);
5575         }
5576
5577         /* Port MCP comes here */
5578         /* Port DMAE comes here */
5579
5580         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5581         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5582         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5583                 /* add SPIO 5 to group 0 */
5584                 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5585                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5586                 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5587                 break;
5588
5589         default:
5590                 break;
5591         }
5592
5593         bnx2x__link_reset(bp);
5594
5595         return 0;
5596 }
5597
5598 #define ILT_PER_FUNC            (768/2)
5599 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
5600 /* the phys address is shifted right 12 bits and has an added
5601    1=valid bit added to the 53rd bit
5602    then since this is a wide register(TM)
5603    we split it into two 32 bit writes
5604  */
5605 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5606 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
5607 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
5608 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
5609
5610 #define CNIC_ILT_LINES          0
5611
5612 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5613 {
5614         int reg;
5615
5616         if (CHIP_IS_E1H(bp))
5617                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5618         else /* E1 */
5619                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5620
5621         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5622 }
5623
5624 static int bnx2x_init_func(struct bnx2x *bp)
5625 {
5626         int port = BP_PORT(bp);
5627         int func = BP_FUNC(bp);
5628         int i;
5629
5630         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
5631
5632         i = FUNC_ILT_BASE(func);
5633
5634         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5635         if (CHIP_IS_E1H(bp)) {
5636                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5637                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5638         } else /* E1 */
5639                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5640                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5641
5642
5643         if (CHIP_IS_E1H(bp)) {
5644                 for (i = 0; i < 9; i++)
5645                         bnx2x_init_block(bp,
5646                                          cm_start[func][i], cm_end[func][i]);
5647
5648                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5649                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5650         }
5651
5652         /* HC init per function */
5653         if (CHIP_IS_E1H(bp)) {
5654                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5655
5656                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5657                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5658         }
5659         bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5660
5661         if (CHIP_IS_E1H(bp))
5662                 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5663
5664         /* Reset PCIE errors for debug */
5665         REG_WR(bp, 0x2114, 0xffffffff);
5666         REG_WR(bp, 0x2120, 0xffffffff);
5667
5668         return 0;
5669 }
5670
5671 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5672 {
5673         int i, rc = 0;
5674
5675         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
5676            BP_FUNC(bp), load_code);
5677
5678         bp->dmae_ready = 0;
5679         mutex_init(&bp->dmae_mutex);
5680         bnx2x_gunzip_init(bp);
5681
5682         switch (load_code) {
5683         case FW_MSG_CODE_DRV_LOAD_COMMON:
5684                 rc = bnx2x_init_common(bp);
5685                 if (rc)
5686                         goto init_hw_err;
5687                 /* no break */
5688
5689         case FW_MSG_CODE_DRV_LOAD_PORT:
5690                 bp->dmae_ready = 1;
5691                 rc = bnx2x_init_port(bp);
5692                 if (rc)
5693                         goto init_hw_err;
5694                 /* no break */
5695
5696         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5697                 bp->dmae_ready = 1;
5698                 rc = bnx2x_init_func(bp);
5699                 if (rc)
5700                         goto init_hw_err;
5701                 break;
5702
5703         default:
5704                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5705                 break;
5706         }
5707
5708         if (!BP_NOMCP(bp)) {
5709                 int func = BP_FUNC(bp);
5710
5711                 bp->fw_drv_pulse_wr_seq =
5712                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5713                                  DRV_PULSE_SEQ_MASK);
5714                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5715                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  func_stx 0x%x\n",
5716                    bp->fw_drv_pulse_wr_seq, bp->func_stx);
5717         } else
5718                 bp->func_stx = 0;
5719
5720         /* this needs to be done before gunzip end */
5721         bnx2x_zero_def_sb(bp);
5722         for_each_queue(bp, i)
5723                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5724
5725 init_hw_err:
5726         bnx2x_gunzip_end(bp);
5727
5728         return rc;
5729 }
5730
5731 /* send the MCP a request, block until there is a reply */
5732 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5733 {
5734         int func = BP_FUNC(bp);
5735         u32 seq = ++bp->fw_seq;
5736         u32 rc = 0;
5737         u32 cnt = 1;
5738         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5739
5740         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5741         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5742
5743         do {
5744                 /* let the FW do it's magic ... */
5745                 msleep(delay);
5746
5747                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5748
5749                 /* Give the FW up to 2 second (200*10ms) */
5750         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5751
5752         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5753            cnt*delay, rc, seq);
5754
5755         /* is this a reply to our command? */
5756         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5757                 rc &= FW_MSG_CODE_MASK;
5758
5759         } else {
5760                 /* FW BUG! */
5761                 BNX2X_ERR("FW failed to respond!\n");
5762                 bnx2x_fw_dump(bp);
5763                 rc = 0;
5764         }
5765
5766         return rc;
5767 }
5768
5769 static void bnx2x_free_mem(struct bnx2x *bp)
5770 {
5771
5772 #define BNX2X_PCI_FREE(x, y, size) \
5773         do { \
5774                 if (x) { \
5775                         pci_free_consistent(bp->pdev, size, x, y); \
5776                         x = NULL; \
5777                         y = 0; \
5778                 } \
5779         } while (0)
5780
5781 #define BNX2X_FREE(x) \
5782         do { \
5783                 if (x) { \
5784                         vfree(x); \
5785                         x = NULL; \
5786                 } \
5787         } while (0)
5788
5789         int i;
5790
5791         /* fastpath */
5792         for_each_queue(bp, i) {
5793
5794                 /* Status blocks */
5795                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5796                                bnx2x_fp(bp, i, status_blk_mapping),
5797                                sizeof(struct host_status_block) +
5798                                sizeof(struct eth_tx_db_data));
5799
5800                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5801                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5802                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5803                                bnx2x_fp(bp, i, tx_desc_mapping),
5804                                sizeof(struct eth_tx_bd) * NUM_TX_BD);
5805
5806                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5807                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5808                                bnx2x_fp(bp, i, rx_desc_mapping),
5809                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
5810
5811                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5812                                bnx2x_fp(bp, i, rx_comp_mapping),
5813                                sizeof(struct eth_fast_path_rx_cqe) *
5814                                NUM_RCQ_BD);
5815
5816                 /* SGE ring */
5817                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5818                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5819                                bnx2x_fp(bp, i, rx_sge_mapping),
5820                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5821         }
5822         /* end of fastpath */
5823
5824         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5825                        sizeof(struct host_def_status_block));
5826
5827         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5828                        sizeof(struct bnx2x_slowpath));
5829
5830 #ifdef BCM_ISCSI
5831         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5832         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5833         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5834         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5835 #endif
5836         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5837
5838 #undef BNX2X_PCI_FREE
5839 #undef BNX2X_KFREE
5840 }
5841
5842 static int bnx2x_alloc_mem(struct bnx2x *bp)
5843 {
5844
5845 #define BNX2X_PCI_ALLOC(x, y, size) \
5846         do { \
5847                 x = pci_alloc_consistent(bp->pdev, size, y); \
5848                 if (x == NULL) \
5849                         goto alloc_mem_err; \
5850                 memset(x, 0, size); \
5851         } while (0)
5852
5853 #define BNX2X_ALLOC(x, size) \
5854         do { \
5855                 x = vmalloc(size); \
5856                 if (x == NULL) \
5857                         goto alloc_mem_err; \
5858                 memset(x, 0, size); \
5859         } while (0)
5860
5861         int i;
5862
5863         /* fastpath */
5864         for_each_queue(bp, i) {
5865                 bnx2x_fp(bp, i, bp) = bp;
5866
5867                 /* Status blocks */
5868                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5869                                 &bnx2x_fp(bp, i, status_blk_mapping),
5870                                 sizeof(struct host_status_block) +
5871                                 sizeof(struct eth_tx_db_data));
5872
5873                 bnx2x_fp(bp, i, hw_tx_prods) =
5874                                 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5875
5876                 bnx2x_fp(bp, i, tx_prods_mapping) =
5877                                 bnx2x_fp(bp, i, status_blk_mapping) +
5878                                 sizeof(struct host_status_block);
5879
5880                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5881                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5882                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5883                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5884                                 &bnx2x_fp(bp, i, tx_desc_mapping),
5885                                 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5886
5887                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5888                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5889                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5890                                 &bnx2x_fp(bp, i, rx_desc_mapping),
5891                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5892
5893                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5894                                 &bnx2x_fp(bp, i, rx_comp_mapping),
5895                                 sizeof(struct eth_fast_path_rx_cqe) *
5896                                 NUM_RCQ_BD);
5897
5898                 /* SGE ring */
5899                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5900                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5901                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5902                                 &bnx2x_fp(bp, i, rx_sge_mapping),
5903                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5904         }
5905         /* end of fastpath */
5906
5907         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5908                         sizeof(struct host_def_status_block));
5909
5910         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5911                         sizeof(struct bnx2x_slowpath));
5912
5913 #ifdef BCM_ISCSI
5914         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5915
5916         /* Initialize T1 */
5917         for (i = 0; i < 64*1024; i += 64) {
5918                 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5919                 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5920         }
5921
5922         /* allocate searcher T2 table
5923            we allocate 1/4 of alloc num for T2
5924           (which is not entered into the ILT) */
5925         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5926
5927         /* Initialize T2 */
5928         for (i = 0; i < 16*1024; i += 64)
5929                 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5930
5931         /* now fixup the last line in the block to point to the next block */
5932         *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5933
5934         /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5935         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5936
5937         /* QM queues (128*MAX_CONN) */
5938         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5939 #endif
5940
5941         /* Slow path ring */
5942         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5943
5944         return 0;
5945
5946 alloc_mem_err:
5947         bnx2x_free_mem(bp);
5948         return -ENOMEM;
5949
5950 #undef BNX2X_PCI_ALLOC
5951 #undef BNX2X_ALLOC
5952 }
5953
5954 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5955 {
5956         int i;
5957
5958         for_each_queue(bp, i) {
5959                 struct bnx2x_fastpath *fp = &bp->fp[i];
5960
5961                 u16 bd_cons = fp->tx_bd_cons;
5962                 u16 sw_prod = fp->tx_pkt_prod;
5963                 u16 sw_cons = fp->tx_pkt_cons;
5964
5965                 while (sw_cons != sw_prod) {
5966                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5967                         sw_cons++;
5968                 }
5969         }
5970 }
5971
5972 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5973 {
5974         int i, j;
5975
5976         for_each_queue(bp, j) {
5977                 struct bnx2x_fastpath *fp = &bp->fp[j];
5978
5979                 for (i = 0; i < NUM_RX_BD; i++) {
5980                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5981                         struct sk_buff *skb = rx_buf->skb;
5982
5983                         if (skb == NULL)
5984                                 continue;
5985
5986                         pci_unmap_single(bp->pdev,
5987                                          pci_unmap_addr(rx_buf, mapping),
5988                                          bp->rx_buf_size,
5989                                          PCI_DMA_FROMDEVICE);
5990
5991                         rx_buf->skb = NULL;
5992                         dev_kfree_skb(skb);
5993                 }
5994                 if (!fp->disable_tpa)
5995                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5996                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
5997                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
5998         }
5999 }
6000
6001 static void bnx2x_free_skbs(struct bnx2x *bp)
6002 {
6003         bnx2x_free_tx_skbs(bp);
6004         bnx2x_free_rx_skbs(bp);
6005 }
6006
6007 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6008 {
6009         int i, offset = 1;
6010
6011         free_irq(bp->msix_table[0].vector, bp->dev);
6012         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6013            bp->msix_table[0].vector);
6014
6015         for_each_queue(bp, i) {
6016                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
6017                    "state %x\n", i, bp->msix_table[i + offset].vector,
6018                    bnx2x_fp(bp, i, state));
6019
6020                 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
6021                         BNX2X_ERR("IRQ of fp #%d being freed while "
6022                                   "state != closed\n", i);
6023
6024                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6025         }
6026 }
6027
6028 static void bnx2x_free_irq(struct bnx2x *bp)
6029 {
6030         if (bp->flags & USING_MSIX_FLAG) {
6031                 bnx2x_free_msix_irqs(bp);
6032                 pci_disable_msix(bp->pdev);
6033                 bp->flags &= ~USING_MSIX_FLAG;
6034
6035         } else
6036                 free_irq(bp->pdev->irq, bp->dev);
6037 }
6038
6039 static int bnx2x_enable_msix(struct bnx2x *bp)
6040 {
6041         int i, rc, offset;
6042
6043         bp->msix_table[0].entry = 0;
6044         offset = 1;
6045         DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
6046
6047         for_each_queue(bp, i) {
6048                 int igu_vec = offset + i + BP_L_ID(bp);
6049
6050                 bp->msix_table[i + offset].entry = igu_vec;
6051                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6052                    "(fastpath #%u)\n", i + offset, igu_vec, i);
6053         }
6054
6055         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6056                              bp->num_queues + offset);
6057         if (rc) {
6058                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6059                 return -1;
6060         }
6061         bp->flags |= USING_MSIX_FLAG;
6062
6063         return 0;
6064 }
6065
6066 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6067 {
6068         int i, rc, offset = 1;
6069
6070         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6071                          bp->dev->name, bp->dev);
6072         if (rc) {
6073                 BNX2X_ERR("request sp irq failed\n");
6074                 return -EBUSY;
6075         }
6076
6077         for_each_queue(bp, i) {
6078                 rc = request_irq(bp->msix_table[i + offset].vector,
6079                                  bnx2x_msix_fp_int, 0,
6080                                  bp->dev->name, &bp->fp[i]);
6081                 if (rc) {
6082                         BNX2X_ERR("request fp #%d irq failed  rc -%d\n",
6083                                   i + offset, -rc);
6084                         bnx2x_free_msix_irqs(bp);
6085                         return -EBUSY;
6086                 }
6087
6088                 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6089         }
6090
6091         return 0;
6092 }
6093
6094 static int bnx2x_req_irq(struct bnx2x *bp)
6095 {
6096         int rc;
6097
6098         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6099                          bp->dev->name, bp->dev);
6100         if (!rc)
6101                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6102
6103         return rc;
6104 }
6105
6106 static void bnx2x_napi_enable(struct bnx2x *bp)
6107 {
6108         int i;
6109
6110         for_each_queue(bp, i)
6111                 napi_enable(&bnx2x_fp(bp, i, napi));
6112 }
6113
6114 static void bnx2x_napi_disable(struct bnx2x *bp)
6115 {
6116         int i;
6117
6118         for_each_queue(bp, i)
6119                 napi_disable(&bnx2x_fp(bp, i, napi));
6120 }
6121
6122 static void bnx2x_netif_start(struct bnx2x *bp)
6123 {
6124         if (atomic_dec_and_test(&bp->intr_sem)) {
6125                 if (netif_running(bp->dev)) {
6126                         if (bp->state == BNX2X_STATE_OPEN)
6127                                 netif_wake_queue(bp->dev);
6128                         bnx2x_napi_enable(bp);
6129                         bnx2x_int_enable(bp);
6130                 }
6131         }
6132 }
6133
6134 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6135 {
6136         bnx2x_int_disable_sync(bp, disable_hw);
6137         if (netif_running(bp->dev)) {
6138                 bnx2x_napi_disable(bp);
6139                 netif_tx_disable(bp->dev);
6140                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6141         }
6142 }
6143
6144 /*
6145  * Init service functions
6146  */
6147
6148 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6149 {
6150         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6151         int port = BP_PORT(bp);
6152
6153         /* CAM allocation
6154          * unicasts 0-31:port0 32-63:port1
6155          * multicast 64-127:port0 128-191:port1
6156          */
6157         config->hdr.length_6b = 2;
6158         config->hdr.offset = port ? 32 : 0;
6159         config->hdr.client_id = BP_CL_ID(bp);
6160         config->hdr.reserved1 = 0;
6161
6162         /* primary MAC */
6163         config->config_table[0].cam_entry.msb_mac_addr =
6164                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6165         config->config_table[0].cam_entry.middle_mac_addr =
6166                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6167         config->config_table[0].cam_entry.lsb_mac_addr =
6168                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6169         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6170         if (set)
6171                 config->config_table[0].target_table_entry.flags = 0;
6172         else
6173                 CAM_INVALIDATE(config->config_table[0]);
6174         config->config_table[0].target_table_entry.client_id = 0;
6175         config->config_table[0].target_table_entry.vlan_id = 0;
6176
6177         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6178            (set ? "setting" : "clearing"),
6179            config->config_table[0].cam_entry.msb_mac_addr,
6180            config->config_table[0].cam_entry.middle_mac_addr,
6181            config->config_table[0].cam_entry.lsb_mac_addr);
6182
6183         /* broadcast */
6184         config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6185         config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6186         config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6187         config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6188         if (set)
6189                 config->config_table[1].target_table_entry.flags =
6190                                 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6191         else
6192                 CAM_INVALIDATE(config->config_table[1]);
6193         config->config_table[1].target_table_entry.client_id = 0;
6194         config->config_table[1].target_table_entry.vlan_id = 0;
6195
6196         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6197                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6198                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6199 }
6200
6201 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6202 {
6203         struct mac_configuration_cmd_e1h *config =
6204                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6205
6206         if (set && (bp->state != BNX2X_STATE_OPEN)) {
6207                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6208                 return;
6209         }
6210
6211         /* CAM allocation for E1H
6212          * unicasts: by func number
6213          * multicast: 20+FUNC*20, 20 each
6214          */
6215         config->hdr.length_6b = 1;
6216         config->hdr.offset = BP_FUNC(bp);
6217         config->hdr.client_id = BP_CL_ID(bp);
6218         config->hdr.reserved1 = 0;
6219
6220         /* primary MAC */
6221         config->config_table[0].msb_mac_addr =
6222                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6223         config->config_table[0].middle_mac_addr =
6224                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6225         config->config_table[0].lsb_mac_addr =
6226                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6227         config->config_table[0].client_id = BP_L_ID(bp);
6228         config->config_table[0].vlan_id = 0;
6229         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6230         if (set)
6231                 config->config_table[0].flags = BP_PORT(bp);
6232         else
6233                 config->config_table[0].flags =
6234                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6235
6236         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID %d\n",
6237            (set ? "setting" : "clearing"),
6238            config->config_table[0].msb_mac_addr,
6239            config->config_table[0].middle_mac_addr,
6240            config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6241
6242         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6243                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6244                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6245 }
6246
6247 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6248                              int *state_p, int poll)
6249 {
6250         /* can take a while if any port is running */
6251         int cnt = 500;
6252
6253         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6254            poll ? "polling" : "waiting", state, idx);
6255
6256         might_sleep();
6257         while (cnt--) {
6258                 if (poll) {
6259                         bnx2x_rx_int(bp->fp, 10);
6260                         /* if index is different from 0
6261                          * the reply for some commands will
6262                          * be on the non default queue
6263                          */
6264                         if (idx)
6265                                 bnx2x_rx_int(&bp->fp[idx], 10);
6266                 }
6267
6268                 mb(); /* state is changed by bnx2x_sp_event() */
6269                 if (*state_p == state)
6270                         return 0;
6271
6272                 msleep(1);
6273         }
6274
6275         /* timeout! */
6276         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6277                   poll ? "polling" : "waiting", state, idx);
6278 #ifdef BNX2X_STOP_ON_ERROR
6279         bnx2x_panic();
6280 #endif
6281
6282         return -EBUSY;
6283 }
6284
6285 static int bnx2x_setup_leading(struct bnx2x *bp)
6286 {
6287         int rc;
6288
6289         /* reset IGU state */
6290         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6291
6292         /* SETUP ramrod */
6293         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6294
6295         /* Wait for completion */
6296         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6297
6298         return rc;
6299 }
6300
6301 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6302 {
6303         /* reset IGU state */
6304         bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6305
6306         /* SETUP ramrod */
6307         bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6308         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6309
6310         /* Wait for completion */
6311         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6312                                  &(bp->fp[index].state), 0);
6313 }
6314
6315 static int bnx2x_poll(struct napi_struct *napi, int budget);
6316 static void bnx2x_set_rx_mode(struct net_device *dev);
6317
6318 /* must be called with rtnl_lock */
6319 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6320 {
6321         u32 load_code;
6322         int i, rc;
6323 #ifdef BNX2X_STOP_ON_ERROR
6324         if (unlikely(bp->panic))
6325                 return -EPERM;
6326 #endif
6327
6328         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6329
6330         /* Send LOAD_REQUEST command to MCP
6331            Returns the type of LOAD command:
6332            if it is the first port to be initialized
6333            common blocks should be initialized, otherwise - not
6334         */
6335         if (!BP_NOMCP(bp)) {
6336                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6337                 if (!load_code) {
6338                         BNX2X_ERR("MCP response failure, aborting\n");
6339                         return -EBUSY;
6340                 }
6341                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6342                         return -EBUSY; /* other port in diagnostic mode */
6343
6344         } else {
6345                 int port = BP_PORT(bp);
6346
6347                 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6348                    load_count[0], load_count[1], load_count[2]);
6349                 load_count[0]++;
6350                 load_count[1 + port]++;
6351                 DP(NETIF_MSG_IFUP, "NO MCP new load counts       %d, %d, %d\n",
6352                    load_count[0], load_count[1], load_count[2]);
6353                 if (load_count[0] == 1)
6354                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6355                 else if (load_count[1 + port] == 1)
6356                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6357                 else
6358                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6359         }
6360
6361         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6362             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6363                 bp->port.pmf = 1;
6364         else
6365                 bp->port.pmf = 0;
6366         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6367
6368         /* if we can't use MSI-X we only need one fp,
6369          * so try to enable MSI-X with the requested number of fp's
6370          * and fallback to inta with one fp
6371          */
6372         if (use_inta) {
6373                 bp->num_queues = 1;
6374
6375         } else {
6376                 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6377                         /* user requested number */
6378                         bp->num_queues = use_multi;
6379
6380                 else if (use_multi)
6381                         bp->num_queues = min_t(u32, num_online_cpus(),
6382                                                BP_MAX_QUEUES(bp));
6383                 else
6384                         bp->num_queues = 1;
6385
6386                 if (bnx2x_enable_msix(bp)) {
6387                         /* failed to enable MSI-X */
6388                         bp->num_queues = 1;
6389                         if (use_multi)
6390                                 BNX2X_ERR("Multi requested but failed"
6391                                           " to enable MSI-X\n");
6392                 }
6393         }
6394         DP(NETIF_MSG_IFUP,
6395            "set number of queues to %d\n", bp->num_queues);
6396
6397         if (bnx2x_alloc_mem(bp))
6398                 return -ENOMEM;
6399
6400         for_each_queue(bp, i)
6401                 bnx2x_fp(bp, i, disable_tpa) =
6402                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
6403
6404         if (bp->flags & USING_MSIX_FLAG) {
6405                 rc = bnx2x_req_msix_irqs(bp);
6406                 if (rc) {
6407                         pci_disable_msix(bp->pdev);
6408                         goto load_error;
6409                 }
6410         } else {
6411                 bnx2x_ack_int(bp);
6412                 rc = bnx2x_req_irq(bp);
6413                 if (rc) {
6414                         BNX2X_ERR("IRQ request failed, aborting\n");
6415                         goto load_error;
6416                 }
6417         }
6418
6419         for_each_queue(bp, i)
6420                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6421                                bnx2x_poll, 128);
6422
6423         /* Initialize HW */
6424         rc = bnx2x_init_hw(bp, load_code);
6425         if (rc) {
6426                 BNX2X_ERR("HW init failed, aborting\n");
6427                 goto load_int_disable;
6428         }
6429
6430         /* Setup NIC internals and enable interrupts */
6431         bnx2x_nic_init(bp, load_code);
6432
6433         /* Send LOAD_DONE command to MCP */
6434         if (!BP_NOMCP(bp)) {
6435                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6436                 if (!load_code) {
6437                         BNX2X_ERR("MCP response failure, aborting\n");
6438                         rc = -EBUSY;
6439                         goto load_rings_free;
6440                 }
6441         }
6442
6443         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6444
6445         rc = bnx2x_setup_leading(bp);
6446         if (rc) {
6447                 BNX2X_ERR("Setup leading failed!\n");
6448                 goto load_netif_stop;
6449         }
6450
6451         if (CHIP_IS_E1H(bp))
6452                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6453                         BNX2X_ERR("!!!  mf_cfg function disabled\n");
6454                         bp->state = BNX2X_STATE_DISABLED;
6455                 }
6456
6457         if (bp->state == BNX2X_STATE_OPEN)
6458                 for_each_nondefault_queue(bp, i) {
6459                         rc = bnx2x_setup_multi(bp, i);
6460                         if (rc)
6461                                 goto load_netif_stop;
6462                 }
6463
6464         if (CHIP_IS_E1(bp))
6465                 bnx2x_set_mac_addr_e1(bp, 1);
6466         else
6467                 bnx2x_set_mac_addr_e1h(bp, 1);
6468
6469         if (bp->port.pmf)
6470                 bnx2x_initial_phy_init(bp);
6471
6472         /* Start fast path */
6473         switch (load_mode) {
6474         case LOAD_NORMAL:
6475                 /* Tx queue should be only reenabled */
6476                 netif_wake_queue(bp->dev);
6477                 bnx2x_set_rx_mode(bp->dev);
6478                 break;
6479
6480         case LOAD_OPEN:
6481                 netif_start_queue(bp->dev);
6482                 bnx2x_set_rx_mode(bp->dev);
6483                 if (bp->flags & USING_MSIX_FLAG)
6484                         printk(KERN_INFO PFX "%s: using MSI-X\n",
6485                                bp->dev->name);
6486                 break;
6487
6488         case LOAD_DIAG:
6489                 bnx2x_set_rx_mode(bp->dev);
6490                 bp->state = BNX2X_STATE_DIAG;
6491                 break;
6492
6493         default:
6494                 break;
6495         }
6496
6497         if (!bp->port.pmf)
6498                 bnx2x__link_status_update(bp);
6499
6500         /* start the timer */
6501         mod_timer(&bp->timer, jiffies + bp->current_interval);
6502
6503
6504         return 0;
6505
6506 load_netif_stop:
6507         bnx2x_napi_disable(bp);
6508 load_rings_free:
6509         /* Free SKBs, SGEs, TPA pool and driver internals */
6510         bnx2x_free_skbs(bp);
6511         for_each_queue(bp, i)
6512                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6513 load_int_disable:
6514         bnx2x_int_disable_sync(bp, 1);
6515         /* Release IRQs */
6516         bnx2x_free_irq(bp);
6517 load_error:
6518         bnx2x_free_mem(bp);
6519         bp->port.pmf = 0;
6520
6521         /* TBD we really need to reset the chip
6522            if we want to recover from this */
6523         return rc;
6524 }
6525
6526 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6527 {
6528         int rc;
6529
6530         /* halt the connection */
6531         bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6532         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
6533
6534         /* Wait for completion */
6535         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6536                                &(bp->fp[index].state), 1);
6537         if (rc) /* timeout */
6538                 return rc;
6539
6540         /* delete cfc entry */
6541         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6542
6543         /* Wait for completion */
6544         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6545                                &(bp->fp[index].state), 1);
6546         return rc;
6547 }
6548
6549 static int bnx2x_stop_leading(struct bnx2x *bp)
6550 {
6551         u16 dsb_sp_prod_idx;
6552         /* if the other port is handling traffic,
6553            this can take a lot of time */
6554         int cnt = 500;
6555         int rc;
6556
6557         might_sleep();
6558
6559         /* Send HALT ramrod */
6560         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6561         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6562
6563         /* Wait for completion */
6564         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6565                                &(bp->fp[0].state), 1);
6566         if (rc) /* timeout */
6567                 return rc;
6568
6569         dsb_sp_prod_idx = *bp->dsb_sp_prod;
6570
6571         /* Send PORT_DELETE ramrod */
6572         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6573
6574         /* Wait for completion to arrive on default status block
6575            we are going to reset the chip anyway
6576            so there is not much to do if this times out
6577          */
6578         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6579                 if (!cnt) {
6580                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6581                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6582                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
6583 #ifdef BNX2X_STOP_ON_ERROR
6584                         bnx2x_panic();
6585 #else
6586                         rc = -EBUSY;
6587 #endif
6588                         break;
6589                 }
6590                 cnt--;
6591                 msleep(1);
6592         }
6593         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6594         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6595
6596         return rc;
6597 }
6598
6599 static void bnx2x_reset_func(struct bnx2x *bp)
6600 {
6601         int port = BP_PORT(bp);
6602         int func = BP_FUNC(bp);
6603         int base, i;
6604
6605         /* Configure IGU */
6606         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6607         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6608
6609         REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6610
6611         /* Clear ILT */
6612         base = FUNC_ILT_BASE(func);
6613         for (i = base; i < base + ILT_PER_FUNC; i++)
6614                 bnx2x_ilt_wr(bp, i, 0);
6615 }
6616
6617 static void bnx2x_reset_port(struct bnx2x *bp)
6618 {
6619         int port = BP_PORT(bp);
6620         u32 val;
6621
6622         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6623
6624         /* Do not rcv packets to BRB */
6625         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6626         /* Do not direct rcv packets that are not for MCP to the BRB */
6627         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6628                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6629
6630         /* Configure AEU */
6631         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6632
6633         msleep(100);
6634         /* Check for BRB port occupancy */
6635         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6636         if (val)
6637                 DP(NETIF_MSG_IFDOWN,
6638                    "BRB1 is not empty  %d blocks are occupied\n", val);
6639
6640         /* TODO: Close Doorbell port? */
6641 }
6642
6643 static void bnx2x_reset_common(struct bnx2x *bp)
6644 {
6645         /* reset_common */
6646         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6647                0xd3ffff7f);
6648         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6649 }
6650
6651 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6652 {
6653         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
6654            BP_FUNC(bp), reset_code);
6655
6656         switch (reset_code) {
6657         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6658                 bnx2x_reset_port(bp);
6659                 bnx2x_reset_func(bp);
6660                 bnx2x_reset_common(bp);
6661                 break;
6662
6663         case FW_MSG_CODE_DRV_UNLOAD_PORT:
6664                 bnx2x_reset_port(bp);
6665                 bnx2x_reset_func(bp);
6666                 break;
6667
6668         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6669                 bnx2x_reset_func(bp);
6670                 break;
6671
6672         default:
6673                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6674                 break;
6675         }
6676 }
6677
6678 /* must be called with rtnl_lock */
6679 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6680 {
6681         int port = BP_PORT(bp);
6682         u32 reset_code = 0;
6683         int i, cnt, rc;
6684
6685         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6686
6687         bp->rx_mode = BNX2X_RX_MODE_NONE;
6688         bnx2x_set_storm_rx_mode(bp);
6689
6690         bnx2x_netif_stop(bp, 1);
6691         if (!netif_running(bp->dev))
6692                 bnx2x_napi_disable(bp);
6693         del_timer_sync(&bp->timer);
6694         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6695                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6696         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6697
6698         /* Release IRQs */
6699         bnx2x_free_irq(bp);
6700
6701         /* Wait until tx fast path tasks complete */
6702         for_each_queue(bp, i) {
6703                 struct bnx2x_fastpath *fp = &bp->fp[i];
6704
6705                 cnt = 1000;
6706                 smp_rmb();
6707                 while (bnx2x_has_tx_work(fp)) {
6708
6709                         bnx2x_tx_int(fp, 1000);
6710                         if (!cnt) {
6711                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
6712                                           i);
6713 #ifdef BNX2X_STOP_ON_ERROR
6714                                 bnx2x_panic();
6715                                 return -EBUSY;
6716 #else
6717                                 break;
6718 #endif
6719                         }
6720                         cnt--;
6721                         msleep(1);
6722                         smp_rmb();
6723                 }
6724         }
6725         /* Give HW time to discard old tx messages */
6726         msleep(1);
6727
6728         if (CHIP_IS_E1(bp)) {
6729                 struct mac_configuration_cmd *config =
6730                                                 bnx2x_sp(bp, mcast_config);
6731
6732                 bnx2x_set_mac_addr_e1(bp, 0);
6733
6734                 for (i = 0; i < config->hdr.length_6b; i++)
6735                         CAM_INVALIDATE(config->config_table[i]);
6736
6737                 config->hdr.length_6b = i;
6738                 if (CHIP_REV_IS_SLOW(bp))
6739                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6740                 else
6741                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6742                 config->hdr.client_id = BP_CL_ID(bp);
6743                 config->hdr.reserved1 = 0;
6744
6745                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6746                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6747                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6748
6749         } else { /* E1H */
6750                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6751
6752                 bnx2x_set_mac_addr_e1h(bp, 0);
6753
6754                 for (i = 0; i < MC_HASH_SIZE; i++)
6755                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6756         }
6757
6758         if (unload_mode == UNLOAD_NORMAL)
6759                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6760
6761         else if (bp->flags & NO_WOL_FLAG) {
6762                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6763                 if (CHIP_IS_E1H(bp))
6764                         REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6765
6766         } else if (bp->wol) {
6767                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6768                 u8 *mac_addr = bp->dev->dev_addr;
6769                 u32 val;
6770                 /* The mac address is written to entries 1-4 to
6771                    preserve entry 0 which is used by the PMF */
6772                 u8 entry = (BP_E1HVN(bp) + 1)*8;
6773
6774                 val = (mac_addr[0] << 8) | mac_addr[1];
6775                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6776
6777                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6778                       (mac_addr[4] << 8) | mac_addr[5];
6779                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6780
6781                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6782
6783         } else
6784                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6785
6786         /* Close multi and leading connections
6787            Completions for ramrods are collected in a synchronous way */
6788         for_each_nondefault_queue(bp, i)
6789                 if (bnx2x_stop_multi(bp, i))
6790                         goto unload_error;
6791
6792         rc = bnx2x_stop_leading(bp);
6793         if (rc) {
6794                 BNX2X_ERR("Stop leading failed!\n");
6795 #ifdef BNX2X_STOP_ON_ERROR
6796                 return -EBUSY;
6797 #else
6798                 goto unload_error;
6799 #endif
6800         }
6801
6802 unload_error:
6803         if (!BP_NOMCP(bp))
6804                 reset_code = bnx2x_fw_command(bp, reset_code);
6805         else {
6806                 DP(NETIF_MSG_IFDOWN, "NO MCP load counts      %d, %d, %d\n",
6807                    load_count[0], load_count[1], load_count[2]);
6808                 load_count[0]--;
6809                 load_count[1 + port]--;
6810                 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts  %d, %d, %d\n",
6811                    load_count[0], load_count[1], load_count[2]);
6812                 if (load_count[0] == 0)
6813                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6814                 else if (load_count[1 + port] == 0)
6815                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6816                 else
6817                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6818         }
6819
6820         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6821             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6822                 bnx2x__link_reset(bp);
6823
6824         /* Reset the chip */
6825         bnx2x_reset_chip(bp, reset_code);
6826
6827         /* Report UNLOAD_DONE to MCP */
6828         if (!BP_NOMCP(bp))
6829                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6830         bp->port.pmf = 0;
6831
6832         /* Free SKBs, SGEs, TPA pool and driver internals */
6833         bnx2x_free_skbs(bp);
6834         for_each_queue(bp, i)
6835                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6836         bnx2x_free_mem(bp);
6837
6838         bp->state = BNX2X_STATE_CLOSED;
6839
6840         netif_carrier_off(bp->dev);
6841
6842         return 0;
6843 }
6844
6845 static void bnx2x_reset_task(struct work_struct *work)
6846 {
6847         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6848
6849 #ifdef BNX2X_STOP_ON_ERROR
6850         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6851                   " so reset not done to allow debug dump,\n"
6852          KERN_ERR " you will need to reboot when done\n");
6853         return;
6854 #endif
6855
6856         rtnl_lock();
6857
6858         if (!netif_running(bp->dev))
6859                 goto reset_task_exit;
6860
6861         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6862         bnx2x_nic_load(bp, LOAD_NORMAL);
6863
6864 reset_task_exit:
6865         rtnl_unlock();
6866 }
6867
6868 /* end of nic load/unload */
6869
6870 /* ethtool_ops */
6871
6872 /*
6873  * Init service functions
6874  */
6875
6876 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6877 {
6878         u32 val;
6879
6880         /* Check if there is any driver already loaded */
6881         val = REG_RD(bp, MISC_REG_UNPREPARED);
6882         if (val == 0x1) {
6883                 /* Check if it is the UNDI driver
6884                  * UNDI driver initializes CID offset for normal bell to 0x7
6885                  */
6886                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6887                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6888                 if (val == 0x7) {
6889                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6890                         /* save our func */
6891                         int func = BP_FUNC(bp);
6892                         u32 swap_en;
6893                         u32 swap_val;
6894
6895                         /* clear the UNDI indication */
6896                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6897
6898                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
6899
6900                         /* try unload UNDI on port 0 */
6901                         bp->func = 0;
6902                         bp->fw_seq =
6903                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6904                                 DRV_MSG_SEQ_NUMBER_MASK);
6905                         reset_code = bnx2x_fw_command(bp, reset_code);
6906
6907                         /* if UNDI is loaded on the other port */
6908                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6909
6910                                 /* send "DONE" for previous unload */
6911                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6912
6913                                 /* unload UNDI on port 1 */
6914                                 bp->func = 1;
6915                                 bp->fw_seq =
6916                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6917                                         DRV_MSG_SEQ_NUMBER_MASK);
6918                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6919
6920                                 bnx2x_fw_command(bp, reset_code);
6921                         }
6922
6923                         /* now it's safe to release the lock */
6924                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6925
6926                         REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6927                                     HC_REG_CONFIG_0), 0x1000);
6928
6929                         /* close input traffic and wait for it */
6930                         /* Do not rcv packets to BRB */
6931                         REG_WR(bp,
6932                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6933                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6934                         /* Do not direct rcv packets that are not for MCP to
6935                          * the BRB */
6936                         REG_WR(bp,
6937                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6938                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6939                         /* clear AEU */
6940                         REG_WR(bp,
6941                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6942                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6943                         msleep(10);
6944
6945                         /* save NIG port swap info */
6946                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6947                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6948                         /* reset device */
6949                         REG_WR(bp,
6950                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6951                                0xd3ffffff);
6952                         REG_WR(bp,
6953                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6954                                0x1403);
6955                         /* take the NIG out of reset and restore swap values */
6956                         REG_WR(bp,
6957                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6958                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
6959                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6960                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6961
6962                         /* send unload done to the MCP */
6963                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6964
6965                         /* restore our func and fw_seq */
6966                         bp->func = func;
6967                         bp->fw_seq =
6968                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6969                                 DRV_MSG_SEQ_NUMBER_MASK);
6970
6971                 } else
6972                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6973         }
6974 }
6975
6976 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6977 {
6978         u32 val, val2, val3, val4, id;
6979         u16 pmc;
6980
6981         /* Get the chip revision id and number. */
6982         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6983         val = REG_RD(bp, MISC_REG_CHIP_NUM);
6984         id = ((val & 0xffff) << 16);
6985         val = REG_RD(bp, MISC_REG_CHIP_REV);
6986         id |= ((val & 0xf) << 12);
6987         val = REG_RD(bp, MISC_REG_CHIP_METAL);
6988         id |= ((val & 0xff) << 4);
6989         val = REG_RD(bp, MISC_REG_BOND_ID);
6990         id |= (val & 0xf);
6991         bp->common.chip_id = id;
6992         bp->link_params.chip_id = bp->common.chip_id;
6993         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6994
6995         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6996         bp->common.flash_size = (NVRAM_1MB_SIZE <<
6997                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
6998         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6999                        bp->common.flash_size, bp->common.flash_size);
7000
7001         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7002         bp->link_params.shmem_base = bp->common.shmem_base;
7003         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7004
7005         if (!bp->common.shmem_base ||
7006             (bp->common.shmem_base < 0xA0000) ||
7007             (bp->common.shmem_base >= 0xC0000)) {
7008                 BNX2X_DEV_INFO("MCP not active\n");
7009                 bp->flags |= NO_MCP_FLAG;
7010                 return;
7011         }
7012
7013         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7014         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7015                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7016                 BNX2X_ERR("BAD MCP validity signature\n");
7017
7018         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7019         bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
7020
7021         BNX2X_DEV_INFO("hw_config 0x%08x  board 0x%08x\n",
7022                        bp->common.hw_config, bp->common.board);
7023
7024         bp->link_params.hw_led_mode = ((bp->common.hw_config &
7025                                         SHARED_HW_CFG_LED_MODE_MASK) >>
7026                                        SHARED_HW_CFG_LED_MODE_SHIFT);
7027
7028         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7029         bp->common.bc_ver = val;
7030         BNX2X_DEV_INFO("bc_ver %X\n", val);
7031         if (val < BNX2X_BC_VER) {
7032                 /* for now only warn
7033                  * later we might need to enforce this */
7034                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7035                           " please upgrade BC\n", BNX2X_BC_VER, val);
7036         }
7037
7038         if (BP_E1HVN(bp) == 0) {
7039                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7040                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7041         } else {
7042                 /* no WOL capability for E1HVN != 0 */
7043                 bp->flags |= NO_WOL_FLAG;
7044         }
7045         BNX2X_DEV_INFO("%sWoL capable\n",
7046                        (bp->flags & NO_WOL_FLAG) ? "Not " : "");
7047
7048         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7049         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7050         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7051         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7052
7053         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7054                val, val2, val3, val4);
7055 }
7056
7057 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7058                                                     u32 switch_cfg)
7059 {
7060         int port = BP_PORT(bp);
7061         u32 ext_phy_type;
7062
7063         switch (switch_cfg) {
7064         case SWITCH_CFG_1G:
7065                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7066
7067                 ext_phy_type =
7068                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7069                 switch (ext_phy_type) {
7070                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7071                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7072                                        ext_phy_type);
7073
7074                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7075                                                SUPPORTED_10baseT_Full |
7076                                                SUPPORTED_100baseT_Half |
7077                                                SUPPORTED_100baseT_Full |
7078                                                SUPPORTED_1000baseT_Full |
7079                                                SUPPORTED_2500baseX_Full |
7080                                                SUPPORTED_TP |
7081                                                SUPPORTED_FIBRE |
7082                                                SUPPORTED_Autoneg |
7083                                                SUPPORTED_Pause |
7084                                                SUPPORTED_Asym_Pause);
7085                         break;
7086
7087                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7088                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7089                                        ext_phy_type);
7090
7091                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7092                                                SUPPORTED_10baseT_Full |
7093                                                SUPPORTED_100baseT_Half |
7094                                                SUPPORTED_100baseT_Full |
7095                                                SUPPORTED_1000baseT_Full |
7096                                                SUPPORTED_TP |
7097                                                SUPPORTED_FIBRE |
7098                                                SUPPORTED_Autoneg |
7099                                                SUPPORTED_Pause |
7100                                                SUPPORTED_Asym_Pause);
7101                         break;
7102
7103                 default:
7104                         BNX2X_ERR("NVRAM config error. "
7105                                   "BAD SerDes ext_phy_config 0x%x\n",
7106                                   bp->link_params.ext_phy_config);
7107                         return;
7108                 }
7109
7110                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7111                                            port*0x10);
7112                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7113                 break;
7114
7115         case SWITCH_CFG_10G:
7116                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7117
7118                 ext_phy_type =
7119                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7120                 switch (ext_phy_type) {
7121                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7122                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7123                                        ext_phy_type);
7124
7125                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7126                                                SUPPORTED_10baseT_Full |
7127                                                SUPPORTED_100baseT_Half |
7128                                                SUPPORTED_100baseT_Full |
7129                                                SUPPORTED_1000baseT_Full |
7130                                                SUPPORTED_2500baseX_Full |
7131                                                SUPPORTED_10000baseT_Full |
7132                                                SUPPORTED_TP |
7133                                                SUPPORTED_FIBRE |
7134                                                SUPPORTED_Autoneg |
7135                                                SUPPORTED_Pause |
7136                                                SUPPORTED_Asym_Pause);
7137                         break;
7138
7139                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7140                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7141                                        ext_phy_type);
7142
7143                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7144                                                SUPPORTED_FIBRE |
7145                                                SUPPORTED_Pause |
7146                                                SUPPORTED_Asym_Pause);
7147                         break;
7148
7149                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7150                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7151                                        ext_phy_type);
7152
7153                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7154                                                SUPPORTED_1000baseT_Full |
7155                                                SUPPORTED_FIBRE |
7156                                                SUPPORTED_Pause |
7157                                                SUPPORTED_Asym_Pause);
7158                         break;
7159
7160                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7161                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7162                                        ext_phy_type);
7163
7164                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7165                                                SUPPORTED_1000baseT_Full |
7166                                                SUPPORTED_FIBRE |
7167                                                SUPPORTED_Autoneg |
7168                                                SUPPORTED_Pause |
7169                                                SUPPORTED_Asym_Pause);
7170                         break;
7171
7172                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7173                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7174                                        ext_phy_type);
7175
7176                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7177                                                SUPPORTED_2500baseX_Full |
7178                                                SUPPORTED_1000baseT_Full |
7179                                                SUPPORTED_FIBRE |
7180                                                SUPPORTED_Autoneg |
7181                                                SUPPORTED_Pause |
7182                                                SUPPORTED_Asym_Pause);
7183                         break;
7184
7185                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7186                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7187                                        ext_phy_type);
7188
7189                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7190                                                SUPPORTED_TP |
7191                                                SUPPORTED_Autoneg |
7192                                                SUPPORTED_Pause |
7193                                                SUPPORTED_Asym_Pause);
7194                         break;
7195
7196                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7197                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7198                                   bp->link_params.ext_phy_config);
7199                         break;
7200
7201                 default:
7202                         BNX2X_ERR("NVRAM config error. "
7203                                   "BAD XGXS ext_phy_config 0x%x\n",
7204                                   bp->link_params.ext_phy_config);
7205                         return;
7206                 }
7207
7208                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7209                                            port*0x18);
7210                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7211
7212                 break;
7213
7214         default:
7215                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7216                           bp->port.link_config);
7217                 return;
7218         }
7219         bp->link_params.phy_addr = bp->port.phy_addr;
7220
7221         /* mask what we support according to speed_cap_mask */
7222         if (!(bp->link_params.speed_cap_mask &
7223                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7224                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7225
7226         if (!(bp->link_params.speed_cap_mask &
7227                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7228                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7229
7230         if (!(bp->link_params.speed_cap_mask &
7231                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7232                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7233
7234         if (!(bp->link_params.speed_cap_mask &
7235                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7236                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7237
7238         if (!(bp->link_params.speed_cap_mask &
7239                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7240                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7241                                         SUPPORTED_1000baseT_Full);
7242
7243         if (!(bp->link_params.speed_cap_mask &
7244                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7245                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7246
7247         if (!(bp->link_params.speed_cap_mask &
7248                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7249                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7250
7251         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7252 }
7253
7254 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7255 {
7256         bp->link_params.req_duplex = DUPLEX_FULL;
7257
7258         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7259         case PORT_FEATURE_LINK_SPEED_AUTO:
7260                 if (bp->port.supported & SUPPORTED_Autoneg) {
7261                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7262                         bp->port.advertising = bp->port.supported;
7263                 } else {
7264                         u32 ext_phy_type =
7265                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7266
7267                         if ((ext_phy_type ==
7268                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7269                             (ext_phy_type ==
7270                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7271                                 /* force 10G, no AN */
7272                                 bp->link_params.req_line_speed = SPEED_10000;
7273                                 bp->port.advertising =
7274                                                 (ADVERTISED_10000baseT_Full |
7275                                                  ADVERTISED_FIBRE);
7276                                 break;
7277                         }
7278                         BNX2X_ERR("NVRAM config error. "
7279                                   "Invalid link_config 0x%x"
7280                                   "  Autoneg not supported\n",
7281                                   bp->port.link_config);
7282                         return;
7283                 }
7284                 break;
7285
7286         case PORT_FEATURE_LINK_SPEED_10M_FULL:
7287                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7288                         bp->link_params.req_line_speed = SPEED_10;
7289                         bp->port.advertising = (ADVERTISED_10baseT_Full |
7290                                                 ADVERTISED_TP);
7291                 } else {
7292                         BNX2X_ERR("NVRAM config error. "
7293                                   "Invalid link_config 0x%x"
7294                                   "  speed_cap_mask 0x%x\n",
7295                                   bp->port.link_config,
7296                                   bp->link_params.speed_cap_mask);
7297                         return;
7298                 }
7299                 break;
7300
7301         case PORT_FEATURE_LINK_SPEED_10M_HALF:
7302                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7303                         bp->link_params.req_line_speed = SPEED_10;
7304                         bp->link_params.req_duplex = DUPLEX_HALF;
7305                         bp->port.advertising = (ADVERTISED_10baseT_Half |
7306                                                 ADVERTISED_TP);
7307                 } else {
7308                         BNX2X_ERR("NVRAM config error. "
7309                                   "Invalid link_config 0x%x"
7310                                   "  speed_cap_mask 0x%x\n",
7311                                   bp->port.link_config,
7312                                   bp->link_params.speed_cap_mask);
7313                         return;
7314                 }
7315                 break;
7316
7317         case PORT_FEATURE_LINK_SPEED_100M_FULL:
7318                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7319                         bp->link_params.req_line_speed = SPEED_100;
7320                         bp->port.advertising = (ADVERTISED_100baseT_Full |
7321                                                 ADVERTISED_TP);
7322                 } else {
7323                         BNX2X_ERR("NVRAM config error. "
7324                                   "Invalid link_config 0x%x"
7325                                   "  speed_cap_mask 0x%x\n",
7326                                   bp->port.link_config,
7327                                   bp->link_params.speed_cap_mask);
7328                         return;
7329                 }
7330                 break;
7331
7332         case PORT_FEATURE_LINK_SPEED_100M_HALF:
7333                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7334                         bp->link_params.req_line_speed = SPEED_100;
7335                         bp->link_params.req_duplex = DUPLEX_HALF;
7336                         bp->port.advertising = (ADVERTISED_100baseT_Half |
7337                                                 ADVERTISED_TP);
7338                 } else {
7339                         BNX2X_ERR("NVRAM config error. "
7340                                   "Invalid link_config 0x%x"
7341                                   "  speed_cap_mask 0x%x\n",
7342                                   bp->port.link_config,
7343                                   bp->link_params.speed_cap_mask);
7344                         return;
7345                 }
7346                 break;
7347
7348         case PORT_FEATURE_LINK_SPEED_1G:
7349                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7350                         bp->link_params.req_line_speed = SPEED_1000;
7351                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
7352                                                 ADVERTISED_TP);
7353                 } else {
7354                         BNX2X_ERR("NVRAM config error. "
7355                                   "Invalid link_config 0x%x"
7356                                   "  speed_cap_mask 0x%x\n",
7357                                   bp->port.link_config,
7358                                   bp->link_params.speed_cap_mask);
7359                         return;
7360                 }
7361                 break;
7362
7363         case PORT_FEATURE_LINK_SPEED_2_5G:
7364                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7365                         bp->link_params.req_line_speed = SPEED_2500;
7366                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
7367                                                 ADVERTISED_TP);
7368                 } else {
7369                         BNX2X_ERR("NVRAM config error. "
7370                                   "Invalid link_config 0x%x"
7371                                   "  speed_cap_mask 0x%x\n",
7372                                   bp->port.link_config,
7373                                   bp->link_params.speed_cap_mask);
7374                         return;
7375                 }
7376                 break;
7377
7378         case PORT_FEATURE_LINK_SPEED_10G_CX4:
7379         case PORT_FEATURE_LINK_SPEED_10G_KX4:
7380         case PORT_FEATURE_LINK_SPEED_10G_KR:
7381                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7382                         bp->link_params.req_line_speed = SPEED_10000;
7383                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
7384                                                 ADVERTISED_FIBRE);
7385                 } else {
7386                         BNX2X_ERR("NVRAM config error. "
7387                                   "Invalid link_config 0x%x"
7388                                   "  speed_cap_mask 0x%x\n",
7389                                   bp->port.link_config,
7390                                   bp->link_params.speed_cap_mask);
7391                         return;
7392                 }
7393                 break;
7394
7395         default:
7396                 BNX2X_ERR("NVRAM config error. "
7397                           "BAD link speed link_config 0x%x\n",
7398                           bp->port.link_config);
7399                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7400                 bp->port.advertising = bp->port.supported;
7401                 break;
7402         }
7403
7404         bp->link_params.req_flow_ctrl = (bp->port.link_config &
7405                                          PORT_FEATURE_FLOW_CONTROL_MASK);
7406         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
7407             !(bp->port.supported & SUPPORTED_Autoneg))
7408                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7409
7410         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
7411                        "  advertising 0x%x\n",
7412                        bp->link_params.req_line_speed,
7413                        bp->link_params.req_duplex,
7414                        bp->link_params.req_flow_ctrl, bp->port.advertising);
7415 }
7416
7417 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7418 {
7419         int port = BP_PORT(bp);
7420         u32 val, val2;
7421
7422         bp->link_params.bp = bp;
7423         bp->link_params.port = port;
7424
7425         bp->link_params.serdes_config =
7426                 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7427         bp->link_params.lane_config =
7428                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7429         bp->link_params.ext_phy_config =
7430                 SHMEM_RD(bp,
7431                          dev_info.port_hw_config[port].external_phy_config);
7432         bp->link_params.speed_cap_mask =
7433                 SHMEM_RD(bp,
7434                          dev_info.port_hw_config[port].speed_capability_mask);
7435
7436         bp->port.link_config =
7437                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7438
7439         BNX2X_DEV_INFO("serdes_config 0x%08x  lane_config 0x%08x\n"
7440              KERN_INFO "  ext_phy_config 0x%08x  speed_cap_mask 0x%08x"
7441                        "  link_config 0x%08x\n",
7442                        bp->link_params.serdes_config,
7443                        bp->link_params.lane_config,
7444                        bp->link_params.ext_phy_config,
7445                        bp->link_params.speed_cap_mask, bp->port.link_config);
7446
7447         bp->link_params.switch_cfg = (bp->port.link_config &
7448                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
7449         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7450
7451         bnx2x_link_settings_requested(bp);
7452
7453         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7454         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7455         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7456         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7457         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7458         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7459         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7460         bp->dev->dev_addr[5] = (u8)(val & 0xff);
7461         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7462         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7463 }
7464
7465 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7466 {
7467         int func = BP_FUNC(bp);
7468         u32 val, val2;
7469         int rc = 0;
7470
7471         bnx2x_get_common_hwinfo(bp);
7472
7473         bp->e1hov = 0;
7474         bp->e1hmf = 0;
7475         if (CHIP_IS_E1H(bp)) {
7476                 bp->mf_config =
7477                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7478
7479                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7480                        FUNC_MF_CFG_E1HOV_TAG_MASK);
7481                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7482
7483                         bp->e1hov = val;
7484                         bp->e1hmf = 1;
7485                         BNX2X_DEV_INFO("MF mode  E1HOV for func %d is %d "
7486                                        "(0x%04x)\n",
7487                                        func, bp->e1hov, bp->e1hov);
7488                 } else {
7489                         BNX2X_DEV_INFO("Single function mode\n");
7490                         if (BP_E1HVN(bp)) {
7491                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
7492                                           "  aborting\n", func);
7493                                 rc = -EPERM;
7494                         }
7495                 }
7496         }
7497
7498         if (!BP_NOMCP(bp)) {
7499                 bnx2x_get_port_hwinfo(bp);
7500
7501                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7502                               DRV_MSG_SEQ_NUMBER_MASK);
7503                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7504         }
7505
7506         if (IS_E1HMF(bp)) {
7507                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7508                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
7509                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7510                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7511                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7512                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7513                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7514                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7515                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7516                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
7517                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7518                                ETH_ALEN);
7519                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7520                                ETH_ALEN);
7521                 }
7522
7523                 return rc;
7524         }
7525
7526         if (BP_NOMCP(bp)) {
7527                 /* only supposed to happen on emulation/FPGA */
7528                 BNX2X_ERR("warning random MAC workaround active\n");
7529                 random_ether_addr(bp->dev->dev_addr);
7530                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7531         }
7532
7533         return rc;
7534 }
7535
7536 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7537 {
7538         int func = BP_FUNC(bp);
7539         int rc;
7540
7541         /* Disable interrupt handling until HW is initialized */
7542         atomic_set(&bp->intr_sem, 1);
7543
7544         mutex_init(&bp->port.phy_mutex);
7545
7546         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
7547         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7548
7549         rc = bnx2x_get_hwinfo(bp);
7550
7551         /* need to reset chip if undi was active */
7552         if (!BP_NOMCP(bp))
7553                 bnx2x_undi_unload(bp);
7554
7555         if (CHIP_REV_IS_FPGA(bp))
7556                 printk(KERN_ERR PFX "FPGA detected\n");
7557
7558         if (BP_NOMCP(bp) && (func == 0))
7559                 printk(KERN_ERR PFX
7560                        "MCP disabled, must load devices in order!\n");
7561
7562         /* Set TPA flags */
7563         if (disable_tpa) {
7564                 bp->flags &= ~TPA_ENABLE_FLAG;
7565                 bp->dev->features &= ~NETIF_F_LRO;
7566         } else {
7567                 bp->flags |= TPA_ENABLE_FLAG;
7568                 bp->dev->features |= NETIF_F_LRO;
7569         }
7570
7571
7572         bp->tx_ring_size = MAX_TX_AVAIL;
7573         bp->rx_ring_size = MAX_RX_AVAIL;
7574
7575         bp->rx_csum = 1;
7576         bp->rx_offset = 0;
7577
7578         bp->tx_ticks = 50;
7579         bp->rx_ticks = 25;
7580
7581         bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7582         bp->current_interval = (poll ? poll : bp->timer_interval);
7583
7584         init_timer(&bp->timer);
7585         bp->timer.expires = jiffies + bp->current_interval;
7586         bp->timer.data = (unsigned long) bp;
7587         bp->timer.function = bnx2x_timer;
7588
7589         return rc;
7590 }
7591
7592 /*
7593  * ethtool service functions
7594  */
7595
7596 /* All ethtool functions called with rtnl_lock */
7597
7598 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7599 {
7600         struct bnx2x *bp = netdev_priv(dev);
7601
7602         cmd->supported = bp->port.supported;
7603         cmd->advertising = bp->port.advertising;
7604
7605         if (netif_carrier_ok(dev)) {
7606                 cmd->speed = bp->link_vars.line_speed;
7607                 cmd->duplex = bp->link_vars.duplex;
7608         } else {
7609                 cmd->speed = bp->link_params.req_line_speed;
7610                 cmd->duplex = bp->link_params.req_duplex;
7611         }
7612         if (IS_E1HMF(bp)) {
7613                 u16 vn_max_rate;
7614
7615                 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7616                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7617                 if (vn_max_rate < cmd->speed)
7618                         cmd->speed = vn_max_rate;
7619         }
7620
7621         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7622                 u32 ext_phy_type =
7623                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7624
7625                 switch (ext_phy_type) {
7626                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7627                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7628                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7629                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7630                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7631                         cmd->port = PORT_FIBRE;
7632                         break;
7633
7634                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7635                         cmd->port = PORT_TP;
7636                         break;
7637
7638                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7639                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7640                                   bp->link_params.ext_phy_config);
7641                         break;
7642
7643                 default:
7644                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7645                            bp->link_params.ext_phy_config);
7646                         break;
7647                 }
7648         } else
7649                 cmd->port = PORT_TP;
7650
7651         cmd->phy_address = bp->port.phy_addr;
7652         cmd->transceiver = XCVR_INTERNAL;
7653
7654         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7655                 cmd->autoneg = AUTONEG_ENABLE;
7656         else
7657                 cmd->autoneg = AUTONEG_DISABLE;
7658
7659         cmd->maxtxpkt = 0;
7660         cmd->maxrxpkt = 0;
7661
7662         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7663            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7664            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7665            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7666            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7667            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7668            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7669
7670         return 0;
7671 }
7672
7673 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7674 {
7675         struct bnx2x *bp = netdev_priv(dev);
7676         u32 advertising;
7677
7678         if (IS_E1HMF(bp))
7679                 return 0;
7680
7681         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7682            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7683            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7684            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7685            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7686            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7687            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7688
7689         if (cmd->autoneg == AUTONEG_ENABLE) {
7690                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7691                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7692                         return -EINVAL;
7693                 }
7694
7695                 /* advertise the requested speed and duplex if supported */
7696                 cmd->advertising &= bp->port.supported;
7697
7698                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7699                 bp->link_params.req_duplex = DUPLEX_FULL;
7700                 bp->port.advertising |= (ADVERTISED_Autoneg |
7701                                          cmd->advertising);
7702
7703         } else { /* forced speed */
7704                 /* advertise the requested speed and duplex if supported */
7705                 switch (cmd->speed) {
7706                 case SPEED_10:
7707                         if (cmd->duplex == DUPLEX_FULL) {
7708                                 if (!(bp->port.supported &
7709                                       SUPPORTED_10baseT_Full)) {
7710                                         DP(NETIF_MSG_LINK,
7711                                            "10M full not supported\n");
7712                                         return -EINVAL;
7713                                 }
7714
7715                                 advertising = (ADVERTISED_10baseT_Full |
7716                                                ADVERTISED_TP);
7717                         } else {
7718                                 if (!(bp->port.supported &
7719                                       SUPPORTED_10baseT_Half)) {
7720                                         DP(NETIF_MSG_LINK,
7721                                            "10M half not supported\n");
7722                                         return -EINVAL;
7723                                 }
7724
7725                                 advertising = (ADVERTISED_10baseT_Half |
7726                                                ADVERTISED_TP);
7727                         }
7728                         break;
7729
7730                 case SPEED_100:
7731                         if (cmd->duplex == DUPLEX_FULL) {
7732                                 if (!(bp->port.supported &
7733                                                 SUPPORTED_100baseT_Full)) {
7734                                         DP(NETIF_MSG_LINK,
7735                                            "100M full not supported\n");
7736                                         return -EINVAL;
7737                                 }
7738
7739                                 advertising = (ADVERTISED_100baseT_Full |
7740                                                ADVERTISED_TP);
7741                         } else {
7742                                 if (!(bp->port.supported &
7743                                                 SUPPORTED_100baseT_Half)) {
7744                                         DP(NETIF_MSG_LINK,
7745                                            "100M half not supported\n");
7746                                         return -EINVAL;
7747                                 }
7748
7749                                 advertising = (ADVERTISED_100baseT_Half |
7750                                                ADVERTISED_TP);
7751                         }
7752                         break;
7753
7754                 case SPEED_1000:
7755                         if (cmd->duplex != DUPLEX_FULL) {
7756                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
7757                                 return -EINVAL;
7758                         }
7759
7760                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7761                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
7762                                 return -EINVAL;
7763                         }
7764
7765                         advertising = (ADVERTISED_1000baseT_Full |
7766                                        ADVERTISED_TP);
7767                         break;
7768
7769                 case SPEED_2500:
7770                         if (cmd->duplex != DUPLEX_FULL) {
7771                                 DP(NETIF_MSG_LINK,
7772                                    "2.5G half not supported\n");
7773                                 return -EINVAL;
7774                         }
7775
7776                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7777                                 DP(NETIF_MSG_LINK,
7778                                    "2.5G full not supported\n");
7779                                 return -EINVAL;
7780                         }
7781
7782                         advertising = (ADVERTISED_2500baseX_Full |
7783                                        ADVERTISED_TP);
7784                         break;
7785
7786                 case SPEED_10000:
7787                         if (cmd->duplex != DUPLEX_FULL) {
7788                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
7789                                 return -EINVAL;
7790                         }
7791
7792                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7793                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
7794                                 return -EINVAL;
7795                         }
7796
7797                         advertising = (ADVERTISED_10000baseT_Full |
7798                                        ADVERTISED_FIBRE);
7799                         break;
7800
7801                 default:
7802                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
7803                         return -EINVAL;
7804                 }
7805
7806                 bp->link_params.req_line_speed = cmd->speed;
7807                 bp->link_params.req_duplex = cmd->duplex;
7808                 bp->port.advertising = advertising;
7809         }
7810
7811         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
7812            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
7813            bp->link_params.req_line_speed, bp->link_params.req_duplex,
7814            bp->port.advertising);
7815
7816         if (netif_running(dev)) {
7817                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7818                 bnx2x_link_set(bp);
7819         }
7820
7821         return 0;
7822 }
7823
7824 #define PHY_FW_VER_LEN                  10
7825
7826 static void bnx2x_get_drvinfo(struct net_device *dev,
7827                               struct ethtool_drvinfo *info)
7828 {
7829         struct bnx2x *bp = netdev_priv(dev);
7830         u8 phy_fw_ver[PHY_FW_VER_LEN];
7831
7832         strcpy(info->driver, DRV_MODULE_NAME);
7833         strcpy(info->version, DRV_MODULE_VERSION);
7834
7835         phy_fw_ver[0] = '\0';
7836         if (bp->port.pmf) {
7837                 bnx2x_acquire_phy_lock(bp);
7838                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7839                                              (bp->state != BNX2X_STATE_CLOSED),
7840                                              phy_fw_ver, PHY_FW_VER_LEN);
7841                 bnx2x_release_phy_lock(bp);
7842         }
7843
7844         snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7845                  (bp->common.bc_ver & 0xff0000) >> 16,
7846                  (bp->common.bc_ver & 0xff00) >> 8,
7847                  (bp->common.bc_ver & 0xff),
7848                  ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
7849         strcpy(info->bus_info, pci_name(bp->pdev));
7850         info->n_stats = BNX2X_NUM_STATS;
7851         info->testinfo_len = BNX2X_NUM_TESTS;
7852         info->eedump_len = bp->common.flash_size;
7853         info->regdump_len = 0;
7854 }
7855
7856 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7857 {
7858         struct bnx2x *bp = netdev_priv(dev);
7859
7860         if (bp->flags & NO_WOL_FLAG) {
7861                 wol->supported = 0;
7862                 wol->wolopts = 0;
7863         } else {
7864                 wol->supported = WAKE_MAGIC;
7865                 if (bp->wol)
7866                         wol->wolopts = WAKE_MAGIC;
7867                 else
7868                         wol->wolopts = 0;
7869         }
7870         memset(&wol->sopass, 0, sizeof(wol->sopass));
7871 }
7872
7873 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7874 {
7875         struct bnx2x *bp = netdev_priv(dev);
7876
7877         if (wol->wolopts & ~WAKE_MAGIC)
7878                 return -EINVAL;
7879
7880         if (wol->wolopts & WAKE_MAGIC) {
7881                 if (bp->flags & NO_WOL_FLAG)
7882                         return -EINVAL;
7883
7884                 bp->wol = 1;
7885         } else
7886                 bp->wol = 0;
7887
7888         return 0;
7889 }
7890
7891 static u32 bnx2x_get_msglevel(struct net_device *dev)
7892 {
7893         struct bnx2x *bp = netdev_priv(dev);
7894
7895         return bp->msglevel;
7896 }
7897
7898 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7899 {
7900         struct bnx2x *bp = netdev_priv(dev);
7901
7902         if (capable(CAP_NET_ADMIN))
7903                 bp->msglevel = level;
7904 }
7905
7906 static int bnx2x_nway_reset(struct net_device *dev)
7907 {
7908         struct bnx2x *bp = netdev_priv(dev);
7909
7910         if (!bp->port.pmf)
7911                 return 0;
7912
7913         if (netif_running(dev)) {
7914                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7915                 bnx2x_link_set(bp);
7916         }
7917
7918         return 0;
7919 }
7920
7921 static int bnx2x_get_eeprom_len(struct net_device *dev)
7922 {
7923         struct bnx2x *bp = netdev_priv(dev);
7924
7925         return bp->common.flash_size;
7926 }
7927
7928 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7929 {
7930         int port = BP_PORT(bp);
7931         int count, i;
7932         u32 val = 0;
7933
7934         /* adjust timeout for emulation/FPGA */
7935         count = NVRAM_TIMEOUT_COUNT;
7936         if (CHIP_REV_IS_SLOW(bp))
7937                 count *= 100;
7938
7939         /* request access to nvram interface */
7940         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7941                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7942
7943         for (i = 0; i < count*10; i++) {
7944                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7945                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7946                         break;
7947
7948                 udelay(5);
7949         }
7950
7951         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7952                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
7953                 return -EBUSY;
7954         }
7955
7956         return 0;
7957 }
7958
7959 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7960 {
7961         int port = BP_PORT(bp);
7962         int count, i;
7963         u32 val = 0;
7964
7965         /* adjust timeout for emulation/FPGA */
7966         count = NVRAM_TIMEOUT_COUNT;
7967         if (CHIP_REV_IS_SLOW(bp))
7968                 count *= 100;
7969
7970         /* relinquish nvram interface */
7971         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7972                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7973
7974         for (i = 0; i < count*10; i++) {
7975                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7976                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7977                         break;
7978
7979                 udelay(5);
7980         }
7981
7982         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
7983                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
7984                 return -EBUSY;
7985         }
7986
7987         return 0;
7988 }
7989
7990 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7991 {
7992         u32 val;
7993
7994         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7995
7996         /* enable both bits, even on read */
7997         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7998                (val | MCPR_NVM_ACCESS_ENABLE_EN |
7999                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
8000 }
8001
8002 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8003 {
8004         u32 val;
8005
8006         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8007
8008         /* disable both bits, even after read */
8009         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8010                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8011                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8012 }
8013
8014 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8015                                   u32 cmd_flags)
8016 {
8017         int count, i, rc;
8018         u32 val;
8019
8020         /* build the command word */
8021         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8022
8023         /* need to clear DONE bit separately */
8024         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8025
8026         /* address of the NVRAM to read from */
8027         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8028                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8029
8030         /* issue a read command */
8031         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8032
8033         /* adjust timeout for emulation/FPGA */
8034         count = NVRAM_TIMEOUT_COUNT;
8035         if (CHIP_REV_IS_SLOW(bp))
8036                 count *= 100;
8037
8038         /* wait for completion */
8039         *ret_val = 0;
8040         rc = -EBUSY;
8041         for (i = 0; i < count; i++) {
8042                 udelay(5);
8043                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8044
8045                 if (val & MCPR_NVM_COMMAND_DONE) {
8046                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8047                         /* we read nvram data in cpu order
8048                          * but ethtool sees it as an array of bytes
8049                          * converting to big-endian will do the work */
8050                         val = cpu_to_be32(val);
8051                         *ret_val = val;
8052                         rc = 0;
8053                         break;
8054                 }
8055         }
8056
8057         return rc;
8058 }
8059
8060 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8061                             int buf_size)
8062 {
8063         int rc;
8064         u32 cmd_flags;
8065         u32 val;
8066
8067         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8068                 DP(BNX2X_MSG_NVM,
8069                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8070                    offset, buf_size);
8071                 return -EINVAL;
8072         }
8073
8074         if (offset + buf_size > bp->common.flash_size) {
8075                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8076                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8077                    offset, buf_size, bp->common.flash_size);
8078                 return -EINVAL;
8079         }
8080
8081         /* request access to nvram interface */
8082         rc = bnx2x_acquire_nvram_lock(bp);
8083         if (rc)
8084                 return rc;
8085
8086         /* enable access to nvram interface */
8087         bnx2x_enable_nvram_access(bp);
8088
8089         /* read the first word(s) */
8090         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8091         while ((buf_size > sizeof(u32)) && (rc == 0)) {
8092                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8093                 memcpy(ret_buf, &val, 4);
8094
8095                 /* advance to the next dword */
8096                 offset += sizeof(u32);
8097                 ret_buf += sizeof(u32);
8098                 buf_size -= sizeof(u32);
8099                 cmd_flags = 0;
8100         }
8101
8102         if (rc == 0) {
8103                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8104                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8105                 memcpy(ret_buf, &val, 4);
8106         }
8107
8108         /* disable access to nvram interface */
8109         bnx2x_disable_nvram_access(bp);
8110         bnx2x_release_nvram_lock(bp);
8111
8112         return rc;
8113 }
8114
8115 static int bnx2x_get_eeprom(struct net_device *dev,
8116                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8117 {
8118         struct bnx2x *bp = netdev_priv(dev);
8119         int rc;
8120
8121         if (!netif_running(dev))
8122                 return -EAGAIN;
8123
8124         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8125            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8126            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8127            eeprom->len, eeprom->len);
8128
8129         /* parameters already validated in ethtool_get_eeprom */
8130
8131         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8132
8133         return rc;
8134 }
8135
8136 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8137                                    u32 cmd_flags)
8138 {
8139         int count, i, rc;
8140
8141         /* build the command word */
8142         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8143
8144         /* need to clear DONE bit separately */
8145         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8146
8147         /* write the data */
8148         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8149
8150         /* address of the NVRAM to write to */
8151         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8152                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8153
8154         /* issue the write command */
8155         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8156
8157         /* adjust timeout for emulation/FPGA */
8158         count = NVRAM_TIMEOUT_COUNT;
8159         if (CHIP_REV_IS_SLOW(bp))
8160                 count *= 100;
8161
8162         /* wait for completion */
8163         rc = -EBUSY;
8164         for (i = 0; i < count; i++) {
8165                 udelay(5);
8166                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8167                 if (val & MCPR_NVM_COMMAND_DONE) {
8168                         rc = 0;
8169                         break;
8170                 }
8171         }
8172
8173         return rc;
8174 }
8175
8176 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
8177
8178 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8179                               int buf_size)
8180 {
8181         int rc;
8182         u32 cmd_flags;
8183         u32 align_offset;
8184         u32 val;
8185
8186         if (offset + buf_size > bp->common.flash_size) {
8187                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8188                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8189                    offset, buf_size, bp->common.flash_size);
8190                 return -EINVAL;
8191         }
8192
8193         /* request access to nvram interface */
8194         rc = bnx2x_acquire_nvram_lock(bp);
8195         if (rc)
8196                 return rc;
8197
8198         /* enable access to nvram interface */
8199         bnx2x_enable_nvram_access(bp);
8200
8201         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8202         align_offset = (offset & ~0x03);
8203         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8204
8205         if (rc == 0) {
8206                 val &= ~(0xff << BYTE_OFFSET(offset));
8207                 val |= (*data_buf << BYTE_OFFSET(offset));
8208
8209                 /* nvram data is returned as an array of bytes
8210                  * convert it back to cpu order */
8211                 val = be32_to_cpu(val);
8212
8213                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8214                                              cmd_flags);
8215         }
8216
8217         /* disable access to nvram interface */
8218         bnx2x_disable_nvram_access(bp);
8219         bnx2x_release_nvram_lock(bp);
8220
8221         return rc;
8222 }
8223
8224 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8225                              int buf_size)
8226 {
8227         int rc;
8228         u32 cmd_flags;
8229         u32 val;
8230         u32 written_so_far;
8231
8232         if (buf_size == 1)      /* ethtool */
8233                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8234
8235         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8236                 DP(BNX2X_MSG_NVM,
8237                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8238                    offset, buf_size);
8239                 return -EINVAL;
8240         }
8241
8242         if (offset + buf_size > bp->common.flash_size) {
8243                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8244                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8245                    offset, buf_size, bp->common.flash_size);
8246                 return -EINVAL;
8247         }
8248
8249         /* request access to nvram interface */
8250         rc = bnx2x_acquire_nvram_lock(bp);
8251         if (rc)
8252                 return rc;
8253
8254         /* enable access to nvram interface */
8255         bnx2x_enable_nvram_access(bp);
8256
8257         written_so_far = 0;
8258         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8259         while ((written_so_far < buf_size) && (rc == 0)) {
8260                 if (written_so_far == (buf_size - sizeof(u32)))
8261                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8262                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8263                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8264                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8265                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8266
8267                 memcpy(&val, data_buf, 4);
8268
8269                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8270
8271                 /* advance to the next dword */
8272                 offset += sizeof(u32);
8273                 data_buf += sizeof(u32);
8274                 written_so_far += sizeof(u32);
8275                 cmd_flags = 0;
8276         }
8277
8278         /* disable access to nvram interface */
8279         bnx2x_disable_nvram_access(bp);
8280         bnx2x_release_nvram_lock(bp);
8281
8282         return rc;
8283 }
8284
8285 static int bnx2x_set_eeprom(struct net_device *dev,
8286                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8287 {
8288         struct bnx2x *bp = netdev_priv(dev);
8289         int rc;
8290
8291         if (!netif_running(dev))
8292                 return -EAGAIN;
8293
8294         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8295            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8296            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8297            eeprom->len, eeprom->len);
8298
8299         /* parameters already validated in ethtool_set_eeprom */
8300
8301         /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8302         if (eeprom->magic == 0x00504859)
8303                 if (bp->port.pmf) {
8304
8305                         bnx2x_acquire_phy_lock(bp);
8306                         rc = bnx2x_flash_download(bp, BP_PORT(bp),
8307                                              bp->link_params.ext_phy_config,
8308                                              (bp->state != BNX2X_STATE_CLOSED),
8309                                              eebuf, eeprom->len);
8310                         if ((bp->state == BNX2X_STATE_OPEN) ||
8311                             (bp->state == BNX2X_STATE_DISABLED)) {
8312                                 rc |= bnx2x_link_reset(&bp->link_params,
8313                                                        &bp->link_vars);
8314                                 rc |= bnx2x_phy_init(&bp->link_params,
8315                                                      &bp->link_vars);
8316                         }
8317                         bnx2x_release_phy_lock(bp);
8318
8319                 } else /* Only the PMF can access the PHY */
8320                         return -EINVAL;
8321         else
8322                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8323
8324         return rc;
8325 }
8326
8327 static int bnx2x_get_coalesce(struct net_device *dev,
8328                               struct ethtool_coalesce *coal)
8329 {
8330         struct bnx2x *bp = netdev_priv(dev);
8331
8332         memset(coal, 0, sizeof(struct ethtool_coalesce));
8333
8334         coal->rx_coalesce_usecs = bp->rx_ticks;
8335         coal->tx_coalesce_usecs = bp->tx_ticks;
8336
8337         return 0;
8338 }
8339
8340 static int bnx2x_set_coalesce(struct net_device *dev,
8341                               struct ethtool_coalesce *coal)
8342 {
8343         struct bnx2x *bp = netdev_priv(dev);
8344
8345         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8346         if (bp->rx_ticks > 3000)
8347                 bp->rx_ticks = 3000;
8348
8349         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8350         if (bp->tx_ticks > 0x3000)
8351                 bp->tx_ticks = 0x3000;
8352
8353         if (netif_running(dev))
8354                 bnx2x_update_coalesce(bp);
8355
8356         return 0;
8357 }
8358
8359 static void bnx2x_get_ringparam(struct net_device *dev,
8360                                 struct ethtool_ringparam *ering)
8361 {
8362         struct bnx2x *bp = netdev_priv(dev);
8363
8364         ering->rx_max_pending = MAX_RX_AVAIL;
8365         ering->rx_mini_max_pending = 0;
8366         ering->rx_jumbo_max_pending = 0;
8367
8368         ering->rx_pending = bp->rx_ring_size;
8369         ering->rx_mini_pending = 0;
8370         ering->rx_jumbo_pending = 0;
8371
8372         ering->tx_max_pending = MAX_TX_AVAIL;
8373         ering->tx_pending = bp->tx_ring_size;
8374 }
8375
8376 static int bnx2x_set_ringparam(struct net_device *dev,
8377                                struct ethtool_ringparam *ering)
8378 {
8379         struct bnx2x *bp = netdev_priv(dev);
8380         int rc = 0;
8381
8382         if ((ering->rx_pending > MAX_RX_AVAIL) ||
8383             (ering->tx_pending > MAX_TX_AVAIL) ||
8384             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8385                 return -EINVAL;
8386
8387         bp->rx_ring_size = ering->rx_pending;
8388         bp->tx_ring_size = ering->tx_pending;
8389
8390         if (netif_running(dev)) {
8391                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8392                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8393         }
8394
8395         return rc;
8396 }
8397
8398 static void bnx2x_get_pauseparam(struct net_device *dev,
8399                                  struct ethtool_pauseparam *epause)
8400 {
8401         struct bnx2x *bp = netdev_priv(dev);
8402
8403         epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8404                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8405
8406         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8407                             BNX2X_FLOW_CTRL_RX);
8408         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8409                             BNX2X_FLOW_CTRL_TX);
8410
8411         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8412            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8413            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8414 }
8415
8416 static int bnx2x_set_pauseparam(struct net_device *dev,
8417                                 struct ethtool_pauseparam *epause)
8418 {
8419         struct bnx2x *bp = netdev_priv(dev);
8420
8421         if (IS_E1HMF(bp))
8422                 return 0;
8423
8424         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8425            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8426            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8427
8428         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8429
8430         if (epause->rx_pause)
8431                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
8432
8433         if (epause->tx_pause)
8434                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
8435
8436         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8437                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8438
8439         if (epause->autoneg) {
8440                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8441                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
8442                         return -EINVAL;
8443                 }
8444
8445                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8446                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8447         }
8448
8449         DP(NETIF_MSG_LINK,
8450            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8451
8452         if (netif_running(dev)) {
8453                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8454                 bnx2x_link_set(bp);
8455         }
8456
8457         return 0;
8458 }
8459
8460 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8461 {
8462         struct bnx2x *bp = netdev_priv(dev);
8463         int changed = 0;
8464         int rc = 0;
8465
8466         /* TPA requires Rx CSUM offloading */
8467         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8468                 if (!(dev->features & NETIF_F_LRO)) {
8469                         dev->features |= NETIF_F_LRO;
8470                         bp->flags |= TPA_ENABLE_FLAG;
8471                         changed = 1;
8472                 }
8473
8474         } else if (dev->features & NETIF_F_LRO) {
8475                 dev->features &= ~NETIF_F_LRO;
8476                 bp->flags &= ~TPA_ENABLE_FLAG;
8477                 changed = 1;
8478         }
8479
8480         if (changed && netif_running(dev)) {
8481                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8482                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8483         }
8484
8485         return rc;
8486 }
8487
8488 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8489 {
8490         struct bnx2x *bp = netdev_priv(dev);
8491
8492         return bp->rx_csum;
8493 }
8494
8495 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8496 {
8497         struct bnx2x *bp = netdev_priv(dev);
8498         int rc = 0;
8499
8500         bp->rx_csum = data;
8501
8502         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8503            TPA'ed packets will be discarded due to wrong TCP CSUM */
8504         if (!data) {
8505                 u32 flags = ethtool_op_get_flags(dev);
8506
8507                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8508         }
8509
8510         return rc;
8511 }
8512
8513 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8514 {
8515         if (data) {
8516                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8517                 dev->features |= NETIF_F_TSO6;
8518         } else {
8519                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8520                 dev->features &= ~NETIF_F_TSO6;
8521         }
8522
8523         return 0;
8524 }
8525
8526 static const struct {
8527         char string[ETH_GSTRING_LEN];
8528 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8529         { "register_test (offline)" },
8530         { "memory_test (offline)" },
8531         { "loopback_test (offline)" },
8532         { "nvram_test (online)" },
8533         { "interrupt_test (online)" },
8534         { "link_test (online)" },
8535         { "idle check (online)" },
8536         { "MC errors (online)" }
8537 };
8538
8539 static int bnx2x_self_test_count(struct net_device *dev)
8540 {
8541         return BNX2X_NUM_TESTS;
8542 }
8543
8544 static int bnx2x_test_registers(struct bnx2x *bp)
8545 {
8546         int idx, i, rc = -ENODEV;
8547         u32 wr_val = 0;
8548         int port = BP_PORT(bp);
8549         static const struct {
8550                 u32  offset0;
8551                 u32  offset1;
8552                 u32  mask;
8553         } reg_tbl[] = {
8554 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
8555                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
8556                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
8557                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
8558                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
8559                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
8560                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
8561                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
8562                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
8563                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
8564 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
8565                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
8566                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
8567                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
8568                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
8569                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8570                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
8571                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
8572                 { NIG_REG_EGRESS_MNG0_FIFO,           20, 0xffffffff },
8573                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
8574 /* 20 */        { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
8575                 { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
8576                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
8577                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
8578                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
8579                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
8580                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
8581                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
8582                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
8583                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
8584 /* 30 */        { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
8585                 { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
8586                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
8587                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
8588                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8589                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
8590                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8591                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
8592
8593                 { 0xffffffff, 0, 0x00000000 }
8594         };
8595
8596         if (!netif_running(bp->dev))
8597                 return rc;
8598
8599         /* Repeat the test twice:
8600            First by writing 0x00000000, second by writing 0xffffffff */
8601         for (idx = 0; idx < 2; idx++) {
8602
8603                 switch (idx) {
8604                 case 0:
8605                         wr_val = 0;
8606                         break;
8607                 case 1:
8608                         wr_val = 0xffffffff;
8609                         break;
8610                 }
8611
8612                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8613                         u32 offset, mask, save_val, val;
8614
8615                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8616                         mask = reg_tbl[i].mask;
8617
8618                         save_val = REG_RD(bp, offset);
8619
8620                         REG_WR(bp, offset, wr_val);
8621                         val = REG_RD(bp, offset);
8622
8623                         /* Restore the original register's value */
8624                         REG_WR(bp, offset, save_val);
8625
8626                         /* verify that value is as expected value */
8627                         if ((val & mask) != (wr_val & mask))
8628                                 goto test_reg_exit;
8629                 }
8630         }
8631
8632         rc = 0;
8633
8634 test_reg_exit:
8635         return rc;
8636 }
8637
8638 static int bnx2x_test_memory(struct bnx2x *bp)
8639 {
8640         int i, j, rc = -ENODEV;
8641         u32 val;
8642         static const struct {
8643                 u32 offset;
8644                 int size;
8645         } mem_tbl[] = {
8646                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
8647                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8648                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
8649                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
8650                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
8651                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
8652                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
8653
8654                 { 0xffffffff, 0 }
8655         };
8656         static const struct {
8657                 char *name;
8658                 u32 offset;
8659                 u32 e1_mask;
8660                 u32 e1h_mask;
8661         } prty_tbl[] = {
8662                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
8663                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
8664                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
8665                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
8666                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
8667                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
8668
8669                 { NULL, 0xffffffff, 0, 0 }
8670         };
8671
8672         if (!netif_running(bp->dev))
8673                 return rc;
8674
8675         /* Go through all the memories */
8676         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8677                 for (j = 0; j < mem_tbl[i].size; j++)
8678                         REG_RD(bp, mem_tbl[i].offset + j*4);
8679
8680         /* Check the parity status */
8681         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8682                 val = REG_RD(bp, prty_tbl[i].offset);
8683                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8684                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
8685                         DP(NETIF_MSG_HW,
8686                            "%s is 0x%x\n", prty_tbl[i].name, val);
8687                         goto test_mem_exit;
8688                 }
8689         }
8690
8691         rc = 0;
8692
8693 test_mem_exit:
8694         return rc;
8695 }
8696
8697 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8698 {
8699         int cnt = 1000;
8700
8701         if (link_up)
8702                 while (bnx2x_link_test(bp) && cnt--)
8703                         msleep(10);
8704 }
8705
8706 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8707 {
8708         unsigned int pkt_size, num_pkts, i;
8709         struct sk_buff *skb;
8710         unsigned char *packet;
8711         struct bnx2x_fastpath *fp = &bp->fp[0];
8712         u16 tx_start_idx, tx_idx;
8713         u16 rx_start_idx, rx_idx;
8714         u16 pkt_prod;
8715         struct sw_tx_bd *tx_buf;
8716         struct eth_tx_bd *tx_bd;
8717         dma_addr_t mapping;
8718         union eth_rx_cqe *cqe;
8719         u8 cqe_fp_flags;
8720         struct sw_rx_bd *rx_buf;
8721         u16 len;
8722         int rc = -ENODEV;
8723
8724         if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8725                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8726                 bnx2x_acquire_phy_lock(bp);
8727                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8728                 bnx2x_release_phy_lock(bp);
8729
8730         } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8731                 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8732                 bnx2x_acquire_phy_lock(bp);
8733                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8734                 bnx2x_release_phy_lock(bp);
8735                 /* wait until link state is restored */
8736                 bnx2x_wait_for_link(bp, link_up);
8737
8738         } else
8739                 return -EINVAL;
8740
8741         pkt_size = 1514;
8742         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8743         if (!skb) {
8744                 rc = -ENOMEM;
8745                 goto test_loopback_exit;
8746         }
8747         packet = skb_put(skb, pkt_size);
8748         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8749         memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8750         for (i = ETH_HLEN; i < pkt_size; i++)
8751                 packet[i] = (unsigned char) (i & 0xff);
8752
8753         num_pkts = 0;
8754         tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8755         rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8756
8757         pkt_prod = fp->tx_pkt_prod++;
8758         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8759         tx_buf->first_bd = fp->tx_bd_prod;
8760         tx_buf->skb = skb;
8761
8762         tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8763         mapping = pci_map_single(bp->pdev, skb->data,
8764                                  skb_headlen(skb), PCI_DMA_TODEVICE);
8765         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8766         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8767         tx_bd->nbd = cpu_to_le16(1);
8768         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8769         tx_bd->vlan = cpu_to_le16(pkt_prod);
8770         tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8771                                        ETH_TX_BD_FLAGS_END_BD);
8772         tx_bd->general_data = ((UNICAST_ADDRESS <<
8773                                 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8774
8775         wmb();
8776
8777         fp->hw_tx_prods->bds_prod =
8778                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8779         mb(); /* FW restriction: must not reorder writing nbd and packets */
8780         fp->hw_tx_prods->packets_prod =
8781                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8782         DOORBELL(bp, FP_IDX(fp), 0);
8783
8784         mmiowb();
8785
8786         num_pkts++;
8787         fp->tx_bd_prod++;
8788         bp->dev->trans_start = jiffies;
8789
8790         udelay(100);
8791
8792         tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8793         if (tx_idx != tx_start_idx + num_pkts)
8794                 goto test_loopback_exit;
8795
8796         rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8797         if (rx_idx != rx_start_idx + num_pkts)
8798                 goto test_loopback_exit;
8799
8800         cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8801         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8802         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8803                 goto test_loopback_rx_exit;
8804
8805         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8806         if (len != pkt_size)
8807                 goto test_loopback_rx_exit;
8808
8809         rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8810         skb = rx_buf->skb;
8811         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8812         for (i = ETH_HLEN; i < pkt_size; i++)
8813                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8814                         goto test_loopback_rx_exit;
8815
8816         rc = 0;
8817
8818 test_loopback_rx_exit:
8819
8820         fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8821         fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8822         fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8823         fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8824
8825         /* Update producers */
8826         bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8827                              fp->rx_sge_prod);
8828
8829 test_loopback_exit:
8830         bp->link_params.loopback_mode = LOOPBACK_NONE;
8831
8832         return rc;
8833 }
8834
8835 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8836 {
8837         int rc = 0;
8838
8839         if (!netif_running(bp->dev))
8840                 return BNX2X_LOOPBACK_FAILED;
8841
8842         bnx2x_netif_stop(bp, 1);
8843
8844         if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8845                 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8846                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8847         }
8848
8849         if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8850                 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8851                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8852         }
8853
8854         bnx2x_netif_start(bp);
8855
8856         return rc;
8857 }
8858
8859 #define CRC32_RESIDUAL                  0xdebb20e3
8860
8861 static int bnx2x_test_nvram(struct bnx2x *bp)
8862 {
8863         static const struct {
8864                 int offset;
8865                 int size;
8866         } nvram_tbl[] = {
8867                 {     0,  0x14 }, /* bootstrap */
8868                 {  0x14,  0xec }, /* dir */
8869                 { 0x100, 0x350 }, /* manuf_info */
8870                 { 0x450,  0xf0 }, /* feature_info */
8871                 { 0x640,  0x64 }, /* upgrade_key_info */
8872                 { 0x6a4,  0x64 },
8873                 { 0x708,  0x70 }, /* manuf_key_info */
8874                 { 0x778,  0x70 },
8875                 {     0,     0 }
8876         };
8877         u32 buf[0x350 / 4];
8878         u8 *data = (u8 *)buf;
8879         int i, rc;
8880         u32 magic, csum;
8881
8882         rc = bnx2x_nvram_read(bp, 0, data, 4);
8883         if (rc) {
8884                 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8885                 goto test_nvram_exit;
8886         }
8887
8888         magic = be32_to_cpu(buf[0]);
8889         if (magic != 0x669955aa) {
8890                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8891                 rc = -ENODEV;
8892                 goto test_nvram_exit;
8893         }
8894
8895         for (i = 0; nvram_tbl[i].size; i++) {
8896
8897                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8898                                       nvram_tbl[i].size);
8899                 if (rc) {
8900                         DP(NETIF_MSG_PROBE,
8901                            "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8902                         goto test_nvram_exit;
8903                 }
8904
8905                 csum = ether_crc_le(nvram_tbl[i].size, data);
8906                 if (csum != CRC32_RESIDUAL) {
8907                         DP(NETIF_MSG_PROBE,
8908                            "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8909                         rc = -ENODEV;
8910                         goto test_nvram_exit;
8911                 }
8912         }
8913
8914 test_nvram_exit:
8915         return rc;
8916 }
8917
8918 static int bnx2x_test_intr(struct bnx2x *bp)
8919 {
8920         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8921         int i, rc;
8922
8923         if (!netif_running(bp->dev))
8924                 return -ENODEV;
8925
8926         config->hdr.length_6b = 0;
8927         if (CHIP_IS_E1(bp))
8928                 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
8929         else
8930                 config->hdr.offset = BP_FUNC(bp);
8931         config->hdr.client_id = BP_CL_ID(bp);
8932         config->hdr.reserved1 = 0;
8933
8934         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8935                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8936                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8937         if (rc == 0) {
8938                 bp->set_mac_pending++;
8939                 for (i = 0; i < 10; i++) {
8940                         if (!bp->set_mac_pending)
8941                                 break;
8942                         msleep_interruptible(10);
8943                 }
8944                 if (i == 10)
8945                         rc = -ENODEV;
8946         }
8947
8948         return rc;
8949 }
8950
8951 static void bnx2x_self_test(struct net_device *dev,
8952                             struct ethtool_test *etest, u64 *buf)
8953 {
8954         struct bnx2x *bp = netdev_priv(dev);
8955
8956         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8957
8958         if (!netif_running(dev))
8959                 return;
8960
8961         /* offline tests are not supported in MF mode */
8962         if (IS_E1HMF(bp))
8963                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8964
8965         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8966                 u8 link_up;
8967
8968                 link_up = bp->link_vars.link_up;
8969                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8970                 bnx2x_nic_load(bp, LOAD_DIAG);
8971                 /* wait until link state is restored */
8972                 bnx2x_wait_for_link(bp, link_up);
8973
8974                 if (bnx2x_test_registers(bp) != 0) {
8975                         buf[0] = 1;
8976                         etest->flags |= ETH_TEST_FL_FAILED;
8977                 }
8978                 if (bnx2x_test_memory(bp) != 0) {
8979                         buf[1] = 1;
8980                         etest->flags |= ETH_TEST_FL_FAILED;
8981                 }
8982                 buf[2] = bnx2x_test_loopback(bp, link_up);
8983                 if (buf[2] != 0)
8984                         etest->flags |= ETH_TEST_FL_FAILED;
8985
8986                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8987                 bnx2x_nic_load(bp, LOAD_NORMAL);
8988                 /* wait until link state is restored */
8989                 bnx2x_wait_for_link(bp, link_up);
8990         }
8991         if (bnx2x_test_nvram(bp) != 0) {
8992                 buf[3] = 1;
8993                 etest->flags |= ETH_TEST_FL_FAILED;
8994         }
8995         if (bnx2x_test_intr(bp) != 0) {
8996                 buf[4] = 1;
8997                 etest->flags |= ETH_TEST_FL_FAILED;
8998         }
8999         if (bp->port.pmf)
9000                 if (bnx2x_link_test(bp) != 0) {
9001                         buf[5] = 1;
9002                         etest->flags |= ETH_TEST_FL_FAILED;
9003                 }
9004         buf[7] = bnx2x_mc_assert(bp);
9005         if (buf[7] != 0)
9006                 etest->flags |= ETH_TEST_FL_FAILED;
9007
9008 #ifdef BNX2X_EXTRA_DEBUG
9009         bnx2x_panic_dump(bp);
9010 #endif
9011 }
9012
9013 static const struct {
9014         long offset;
9015         int size;
9016         u32 flags;
9017 #define STATS_FLAGS_PORT                1
9018 #define STATS_FLAGS_FUNC                2
9019         u8 string[ETH_GSTRING_LEN];
9020 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9021 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
9022                                 8, STATS_FLAGS_FUNC, "rx_bytes" },
9023         { STATS_OFFSET32(error_bytes_received_hi),
9024                                 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
9025         { STATS_OFFSET32(total_bytes_transmitted_hi),
9026                                 8, STATS_FLAGS_FUNC, "tx_bytes" },
9027         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9028                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9029         { STATS_OFFSET32(total_unicast_packets_received_hi),
9030                                 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
9031         { STATS_OFFSET32(total_multicast_packets_received_hi),
9032                                 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
9033         { STATS_OFFSET32(total_broadcast_packets_received_hi),
9034                                 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
9035         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9036                                 8, STATS_FLAGS_FUNC, "tx_packets" },
9037         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9038                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9039 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9040                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9041         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9042                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9043         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9044                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
9045         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9046                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9047         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9048                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9049         { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9050                                 8, STATS_FLAGS_PORT, "tx_deferred" },
9051         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9052                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9053         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9054                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9055         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9056                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9057         { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9058                                 8, STATS_FLAGS_PORT, "rx_fragments" },
9059 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9060                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
9061         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9062                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9063         { STATS_OFFSET32(jabber_packets_received),
9064                                 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
9065         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9066                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9067         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9068                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9069         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9070                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9071         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9072                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9073         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9074                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9075         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9076                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9077         { STATS_OFFSET32(etherstatspktsover1522octets_hi),
9078                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9079 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
9080                                 8, STATS_FLAGS_PORT, "rx_xon_frames" },
9081         { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
9082                                 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9083         { STATS_OFFSET32(tx_stat_outxonsent_hi),
9084                                 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9085         { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9086                                 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
9087         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9088                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9089         { STATS_OFFSET32(mac_filter_discard),
9090                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9091         { STATS_OFFSET32(no_buff_discard),
9092                                 4, STATS_FLAGS_FUNC, "rx_discards" },
9093         { STATS_OFFSET32(xxoverflow_discard),
9094                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9095         { STATS_OFFSET32(brb_drop_hi),
9096                                 8, STATS_FLAGS_PORT, "brb_discard" },
9097         { STATS_OFFSET32(brb_truncate_hi),
9098                                 8, STATS_FLAGS_PORT, "brb_truncate" },
9099 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9100                                 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9101         { STATS_OFFSET32(rx_skb_alloc_failed),
9102                                 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9103 /* 42 */{ STATS_OFFSET32(hw_csum_err),
9104                                 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
9105 };
9106
9107 #define IS_NOT_E1HMF_STAT(bp, i) \
9108                 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9109
9110 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9111 {
9112         struct bnx2x *bp = netdev_priv(dev);
9113         int i, j;
9114
9115         switch (stringset) {
9116         case ETH_SS_STATS:
9117                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9118                         if (IS_NOT_E1HMF_STAT(bp, i))
9119                                 continue;
9120                         strcpy(buf + j*ETH_GSTRING_LEN,
9121                                bnx2x_stats_arr[i].string);
9122                         j++;
9123                 }
9124                 break;
9125
9126         case ETH_SS_TEST:
9127                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9128                 break;
9129         }
9130 }
9131
9132 static int bnx2x_get_stats_count(struct net_device *dev)
9133 {
9134         struct bnx2x *bp = netdev_priv(dev);
9135         int i, num_stats = 0;
9136
9137         for (i = 0; i < BNX2X_NUM_STATS; i++) {
9138                 if (IS_NOT_E1HMF_STAT(bp, i))
9139                         continue;
9140                 num_stats++;
9141         }
9142         return num_stats;
9143 }
9144
9145 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9146                                     struct ethtool_stats *stats, u64 *buf)
9147 {
9148         struct bnx2x *bp = netdev_priv(dev);
9149         u32 *hw_stats = (u32 *)&bp->eth_stats;
9150         int i, j;
9151
9152         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9153                 if (IS_NOT_E1HMF_STAT(bp, i))
9154                         continue;
9155
9156                 if (bnx2x_stats_arr[i].size == 0) {
9157                         /* skip this counter */
9158                         buf[j] = 0;
9159                         j++;
9160                         continue;
9161                 }
9162                 if (bnx2x_stats_arr[i].size == 4) {
9163                         /* 4-byte counter */
9164                         buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9165                         j++;
9166                         continue;
9167                 }
9168                 /* 8-byte counter */
9169                 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9170                                   *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9171                 j++;
9172         }
9173 }
9174
9175 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9176 {
9177         struct bnx2x *bp = netdev_priv(dev);
9178         int port = BP_PORT(bp);
9179         int i;
9180
9181         if (!netif_running(dev))
9182                 return 0;
9183
9184         if (!bp->port.pmf)
9185                 return 0;
9186
9187         if (data == 0)
9188                 data = 2;
9189
9190         for (i = 0; i < (data * 2); i++) {
9191                 if ((i % 2) == 0)
9192                         bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9193                                       bp->link_params.hw_led_mode,
9194                                       bp->link_params.chip_id);
9195                 else
9196                         bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9197                                       bp->link_params.hw_led_mode,
9198                                       bp->link_params.chip_id);
9199
9200                 msleep_interruptible(500);
9201                 if (signal_pending(current))
9202                         break;
9203         }
9204
9205         if (bp->link_vars.link_up)
9206                 bnx2x_set_led(bp, port, LED_MODE_OPER,
9207                               bp->link_vars.line_speed,
9208                               bp->link_params.hw_led_mode,
9209                               bp->link_params.chip_id);
9210
9211         return 0;
9212 }
9213
9214 static struct ethtool_ops bnx2x_ethtool_ops = {
9215         .get_settings           = bnx2x_get_settings,
9216         .set_settings           = bnx2x_set_settings,
9217         .get_drvinfo            = bnx2x_get_drvinfo,
9218         .get_wol                = bnx2x_get_wol,
9219         .set_wol                = bnx2x_set_wol,
9220         .get_msglevel           = bnx2x_get_msglevel,
9221         .set_msglevel           = bnx2x_set_msglevel,
9222         .nway_reset             = bnx2x_nway_reset,
9223         .get_link               = ethtool_op_get_link,
9224         .get_eeprom_len         = bnx2x_get_eeprom_len,
9225         .get_eeprom             = bnx2x_get_eeprom,
9226         .set_eeprom             = bnx2x_set_eeprom,
9227         .get_coalesce           = bnx2x_get_coalesce,
9228         .set_coalesce           = bnx2x_set_coalesce,
9229         .get_ringparam          = bnx2x_get_ringparam,
9230         .set_ringparam          = bnx2x_set_ringparam,
9231         .get_pauseparam         = bnx2x_get_pauseparam,
9232         .set_pauseparam         = bnx2x_set_pauseparam,
9233         .get_rx_csum            = bnx2x_get_rx_csum,
9234         .set_rx_csum            = bnx2x_set_rx_csum,
9235         .get_tx_csum            = ethtool_op_get_tx_csum,
9236         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
9237         .set_flags              = bnx2x_set_flags,
9238         .get_flags              = ethtool_op_get_flags,
9239         .get_sg                 = ethtool_op_get_sg,
9240         .set_sg                 = ethtool_op_set_sg,
9241         .get_tso                = ethtool_op_get_tso,
9242         .set_tso                = bnx2x_set_tso,
9243         .self_test_count        = bnx2x_self_test_count,
9244         .self_test              = bnx2x_self_test,
9245         .get_strings            = bnx2x_get_strings,
9246         .phys_id                = bnx2x_phys_id,
9247         .get_stats_count        = bnx2x_get_stats_count,
9248         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
9249 };
9250
9251 /* end of ethtool_ops */
9252
9253 /****************************************************************************
9254 * General service functions
9255 ****************************************************************************/
9256
9257 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9258 {
9259         u16 pmcsr;
9260
9261         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9262
9263         switch (state) {
9264         case PCI_D0:
9265                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9266                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9267                                        PCI_PM_CTRL_PME_STATUS));
9268
9269                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9270                         /* delay required during transition out of D3hot */
9271                         msleep(20);
9272                 break;
9273
9274         case PCI_D3hot:
9275                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9276                 pmcsr |= 3;
9277
9278                 if (bp->wol)
9279                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9280
9281                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9282                                       pmcsr);
9283
9284                 /* No more memory access after this point until
9285                 * device is brought back to D0.
9286                 */
9287                 break;
9288
9289         default:
9290                 return -EINVAL;
9291         }
9292         return 0;
9293 }
9294
9295 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
9296 {
9297         u16 rx_cons_sb;
9298
9299         /* Tell compiler that status block fields can change */
9300         barrier();
9301         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9302         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9303                 rx_cons_sb++;
9304         return (fp->rx_comp_cons != rx_cons_sb);
9305 }
9306
9307 /*
9308  * net_device service functions
9309  */
9310
9311 static int bnx2x_poll(struct napi_struct *napi, int budget)
9312 {
9313         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9314                                                  napi);
9315         struct bnx2x *bp = fp->bp;
9316         int work_done = 0;
9317
9318 #ifdef BNX2X_STOP_ON_ERROR
9319         if (unlikely(bp->panic))
9320                 goto poll_panic;
9321 #endif
9322
9323         prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9324         prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9325         prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9326
9327         bnx2x_update_fpsb_idx(fp);
9328
9329         if (bnx2x_has_tx_work(fp))
9330                 bnx2x_tx_int(fp, budget);
9331
9332         if (bnx2x_has_rx_work(fp))
9333                 work_done = bnx2x_rx_int(fp, budget);
9334         rmb(); /* BNX2X_HAS_WORK() reads the status block */
9335
9336         /* must not complete if we consumed full budget */
9337         if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9338
9339 #ifdef BNX2X_STOP_ON_ERROR
9340 poll_panic:
9341 #endif
9342                 netif_rx_complete(napi);
9343
9344                 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9345                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9346                 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9347                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9348         }
9349         return work_done;
9350 }
9351
9352
9353 /* we split the first BD into headers and data BDs
9354  * to ease the pain of our fellow microcode engineers
9355  * we use one mapping for both BDs
9356  * So far this has only been observed to happen
9357  * in Other Operating Systems(TM)
9358  */
9359 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9360                                    struct bnx2x_fastpath *fp,
9361                                    struct eth_tx_bd **tx_bd, u16 hlen,
9362                                    u16 bd_prod, int nbd)
9363 {
9364         struct eth_tx_bd *h_tx_bd = *tx_bd;
9365         struct eth_tx_bd *d_tx_bd;
9366         dma_addr_t mapping;
9367         int old_len = le16_to_cpu(h_tx_bd->nbytes);
9368
9369         /* first fix first BD */
9370         h_tx_bd->nbd = cpu_to_le16(nbd);
9371         h_tx_bd->nbytes = cpu_to_le16(hlen);
9372
9373         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9374            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9375            h_tx_bd->addr_lo, h_tx_bd->nbd);
9376
9377         /* now get a new data BD
9378          * (after the pbd) and fill it */
9379         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9380         d_tx_bd = &fp->tx_desc_ring[bd_prod];
9381
9382         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9383                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9384
9385         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9386         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9387         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9388         d_tx_bd->vlan = 0;
9389         /* this marks the BD as one that has no individual mapping
9390          * the FW ignores this flag in a BD not marked start
9391          */
9392         d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9393         DP(NETIF_MSG_TX_QUEUED,
9394            "TSO split data size is %d (%x:%x)\n",
9395            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9396
9397         /* update tx_bd for marking the last BD flag */
9398         *tx_bd = d_tx_bd;
9399
9400         return bd_prod;
9401 }
9402
9403 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9404 {
9405         if (fix > 0)
9406                 csum = (u16) ~csum_fold(csum_sub(csum,
9407                                 csum_partial(t_header - fix, fix, 0)));
9408
9409         else if (fix < 0)
9410                 csum = (u16) ~csum_fold(csum_add(csum,
9411                                 csum_partial(t_header, -fix, 0)));
9412
9413         return swab16(csum);
9414 }
9415
9416 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9417 {
9418         u32 rc;
9419
9420         if (skb->ip_summed != CHECKSUM_PARTIAL)
9421                 rc = XMIT_PLAIN;
9422
9423         else {
9424                 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9425                         rc = XMIT_CSUM_V6;
9426                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9427                                 rc |= XMIT_CSUM_TCP;
9428
9429                 } else {
9430                         rc = XMIT_CSUM_V4;
9431                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9432                                 rc |= XMIT_CSUM_TCP;
9433                 }
9434         }
9435
9436         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9437                 rc |= XMIT_GSO_V4;
9438
9439         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9440                 rc |= XMIT_GSO_V6;
9441
9442         return rc;
9443 }
9444
9445 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
9446 /* check if packet requires linearization (packet is too fragmented) */
9447 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9448                              u32 xmit_type)
9449 {
9450         int to_copy = 0;
9451         int hlen = 0;
9452         int first_bd_sz = 0;
9453
9454         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9455         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9456
9457                 if (xmit_type & XMIT_GSO) {
9458                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9459                         /* Check if LSO packet needs to be copied:
9460                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9461                         int wnd_size = MAX_FETCH_BD - 3;
9462                         /* Number of windows to check */
9463                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9464                         int wnd_idx = 0;
9465                         int frag_idx = 0;
9466                         u32 wnd_sum = 0;
9467
9468                         /* Headers length */
9469                         hlen = (int)(skb_transport_header(skb) - skb->data) +
9470                                 tcp_hdrlen(skb);
9471
9472                         /* Amount of data (w/o headers) on linear part of SKB*/
9473                         first_bd_sz = skb_headlen(skb) - hlen;
9474
9475                         wnd_sum  = first_bd_sz;
9476
9477                         /* Calculate the first sum - it's special */
9478                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9479                                 wnd_sum +=
9480                                         skb_shinfo(skb)->frags[frag_idx].size;
9481
9482                         /* If there was data on linear skb data - check it */
9483                         if (first_bd_sz > 0) {
9484                                 if (unlikely(wnd_sum < lso_mss)) {
9485                                         to_copy = 1;
9486                                         goto exit_lbl;
9487                                 }
9488
9489                                 wnd_sum -= first_bd_sz;
9490                         }
9491
9492                         /* Others are easier: run through the frag list and
9493                            check all windows */
9494                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9495                                 wnd_sum +=
9496                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9497
9498                                 if (unlikely(wnd_sum < lso_mss)) {
9499                                         to_copy = 1;
9500                                         break;
9501                                 }
9502                                 wnd_sum -=
9503                                         skb_shinfo(skb)->frags[wnd_idx].size;
9504                         }
9505
9506                 } else {
9507                         /* in non-LSO too fragmented packet should always
9508                            be linearized */
9509                         to_copy = 1;
9510                 }
9511         }
9512
9513 exit_lbl:
9514         if (unlikely(to_copy))
9515                 DP(NETIF_MSG_TX_QUEUED,
9516                    "Linearization IS REQUIRED for %s packet. "
9517                    "num_frags %d  hlen %d  first_bd_sz %d\n",
9518                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9519                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9520
9521         return to_copy;
9522 }
9523 #endif
9524
9525 /* called with netif_tx_lock
9526  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9527  * netif_wake_queue()
9528  */
9529 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9530 {
9531         struct bnx2x *bp = netdev_priv(dev);
9532         struct bnx2x_fastpath *fp;
9533         struct sw_tx_bd *tx_buf;
9534         struct eth_tx_bd *tx_bd;
9535         struct eth_tx_parse_bd *pbd = NULL;
9536         u16 pkt_prod, bd_prod;
9537         int nbd, fp_index;
9538         dma_addr_t mapping;
9539         u32 xmit_type = bnx2x_xmit_type(bp, skb);
9540         int vlan_off = (bp->e1hov ? 4 : 0);
9541         int i;
9542         u8 hlen = 0;
9543
9544 #ifdef BNX2X_STOP_ON_ERROR
9545         if (unlikely(bp->panic))
9546                 return NETDEV_TX_BUSY;
9547 #endif
9548
9549         fp_index = (smp_processor_id() % bp->num_queues);
9550         fp = &bp->fp[fp_index];
9551
9552         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
9553                 bp->eth_stats.driver_xoff++,
9554                 netif_stop_queue(dev);
9555                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9556                 return NETDEV_TX_BUSY;
9557         }
9558
9559         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
9560            "  gso type %x  xmit_type %x\n",
9561            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9562            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9563
9564 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
9565         /* First, check if we need to linearize the skb
9566            (due to FW restrictions) */
9567         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9568                 /* Statistics of linearization */
9569                 bp->lin_cnt++;
9570                 if (skb_linearize(skb) != 0) {
9571                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9572                            "silently dropping this SKB\n");
9573                         dev_kfree_skb_any(skb);
9574                         return NETDEV_TX_OK;
9575                 }
9576         }
9577 #endif
9578
9579         /*
9580         Please read carefully. First we use one BD which we mark as start,
9581         then for TSO or xsum we have a parsing info BD,
9582         and only then we have the rest of the TSO BDs.
9583         (don't forget to mark the last one as last,
9584         and to unmap only AFTER you write to the BD ...)
9585         And above all, all pdb sizes are in words - NOT DWORDS!
9586         */
9587
9588         pkt_prod = fp->tx_pkt_prod++;
9589         bd_prod = TX_BD(fp->tx_bd_prod);
9590
9591         /* get a tx_buf and first BD */
9592         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9593         tx_bd = &fp->tx_desc_ring[bd_prod];
9594
9595         tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9596         tx_bd->general_data = (UNICAST_ADDRESS <<
9597                                ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9598         /* header nbd */
9599         tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
9600
9601         /* remember the first BD of the packet */
9602         tx_buf->first_bd = fp->tx_bd_prod;
9603         tx_buf->skb = skb;
9604
9605         DP(NETIF_MSG_TX_QUEUED,
9606            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
9607            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9608
9609 #ifdef BCM_VLAN
9610         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
9611             (bp->flags & HW_VLAN_TX_FLAG)) {
9612                 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9613                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9614                 vlan_off += 4;
9615         } else
9616 #endif
9617                 tx_bd->vlan = cpu_to_le16(pkt_prod);
9618
9619         if (xmit_type) {
9620                 /* turn on parsing and get a BD */
9621                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9622                 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9623
9624                 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9625         }
9626
9627         if (xmit_type & XMIT_CSUM) {
9628                 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9629
9630                 /* for now NS flag is not used in Linux */
9631                 pbd->global_data = (hlen |
9632                                     ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9633                                      ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9634
9635                 pbd->ip_hlen = (skb_transport_header(skb) -
9636                                 skb_network_header(skb)) / 2;
9637
9638                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9639
9640                 pbd->total_hlen = cpu_to_le16(hlen);
9641                 hlen = hlen*2 - vlan_off;
9642
9643                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9644
9645                 if (xmit_type & XMIT_CSUM_V4)
9646                         tx_bd->bd_flags.as_bitfield |=
9647                                                 ETH_TX_BD_FLAGS_IP_CSUM;
9648                 else
9649                         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9650
9651                 if (xmit_type & XMIT_CSUM_TCP) {
9652                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9653
9654                 } else {
9655                         s8 fix = SKB_CS_OFF(skb); /* signed! */
9656
9657                         pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9658                         pbd->cs_offset = fix / 2;
9659
9660                         DP(NETIF_MSG_TX_QUEUED,
9661                            "hlen %d  offset %d  fix %d  csum before fix %x\n",
9662                            le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9663                            SKB_CS(skb));
9664
9665                         /* HW bug: fixup the CSUM */
9666                         pbd->tcp_pseudo_csum =
9667                                 bnx2x_csum_fix(skb_transport_header(skb),
9668                                                SKB_CS(skb), fix);
9669
9670                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9671                            pbd->tcp_pseudo_csum);
9672                 }
9673         }
9674
9675         mapping = pci_map_single(bp->pdev, skb->data,
9676                                  skb_headlen(skb), PCI_DMA_TODEVICE);
9677
9678         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9679         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9680         nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
9681         tx_bd->nbd = cpu_to_le16(nbd);
9682         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9683
9684         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
9685            "  nbytes %d  flags %x  vlan %x\n",
9686            tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9687            le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9688            le16_to_cpu(tx_bd->vlan));
9689
9690         if (xmit_type & XMIT_GSO) {
9691
9692                 DP(NETIF_MSG_TX_QUEUED,
9693                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
9694                    skb->len, hlen, skb_headlen(skb),
9695                    skb_shinfo(skb)->gso_size);
9696
9697                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9698
9699                 if (unlikely(skb_headlen(skb) > hlen))
9700                         bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9701                                                  bd_prod, ++nbd);
9702
9703                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9704                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9705                 pbd->tcp_flags = pbd_tcp_flags(skb);
9706
9707                 if (xmit_type & XMIT_GSO_V4) {
9708                         pbd->ip_id = swab16(ip_hdr(skb)->id);
9709                         pbd->tcp_pseudo_csum =
9710                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9711                                                           ip_hdr(skb)->daddr,
9712                                                           0, IPPROTO_TCP, 0));
9713
9714                 } else
9715                         pbd->tcp_pseudo_csum =
9716                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9717                                                         &ipv6_hdr(skb)->daddr,
9718                                                         0, IPPROTO_TCP, 0));
9719
9720                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9721         }
9722
9723         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9724                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9725
9726                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9727                 tx_bd = &fp->tx_desc_ring[bd_prod];
9728
9729                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9730                                        frag->size, PCI_DMA_TODEVICE);
9731
9732                 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9733                 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9734                 tx_bd->nbytes = cpu_to_le16(frag->size);
9735                 tx_bd->vlan = cpu_to_le16(pkt_prod);
9736                 tx_bd->bd_flags.as_bitfield = 0;
9737
9738                 DP(NETIF_MSG_TX_QUEUED,
9739                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d  flags %x\n",
9740                    i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9741                    le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9742         }
9743
9744         /* now at last mark the BD as the last BD */
9745         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9746
9747         DP(NETIF_MSG_TX_QUEUED, "last bd @%p  flags %x\n",
9748            tx_bd, tx_bd->bd_flags.as_bitfield);
9749
9750         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9751
9752         /* now send a tx doorbell, counting the next BD
9753          * if the packet contains or ends with it
9754          */
9755         if (TX_BD_POFF(bd_prod) < nbd)
9756                 nbd++;
9757
9758         if (pbd)
9759                 DP(NETIF_MSG_TX_QUEUED,
9760                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
9761                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
9762                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9763                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9764                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9765
9766         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
9767
9768         /*
9769          * Make sure that the BD data is updated before updating the producer
9770          * since FW might read the BD right after the producer is updated.
9771          * This is only applicable for weak-ordered memory model archs such
9772          * as IA-64. The following barrier is also mandatory since FW will
9773          * assumes packets must have BDs.
9774          */
9775         wmb();
9776
9777         fp->hw_tx_prods->bds_prod =
9778                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9779         mb(); /* FW restriction: must not reorder writing nbd and packets */
9780         fp->hw_tx_prods->packets_prod =
9781                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9782         DOORBELL(bp, FP_IDX(fp), 0);
9783
9784         mmiowb();
9785
9786         fp->tx_bd_prod += nbd;
9787         dev->trans_start = jiffies;
9788
9789         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9790                 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
9791                    if we put Tx into XOFF state. */
9792                 smp_mb();
9793                 netif_stop_queue(dev);
9794                 bp->eth_stats.driver_xoff++;
9795                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9796                         netif_wake_queue(dev);
9797         }
9798         fp->tx_pkt++;
9799
9800         return NETDEV_TX_OK;
9801 }
9802
9803 /* called with rtnl_lock */
9804 static int bnx2x_open(struct net_device *dev)
9805 {
9806         struct bnx2x *bp = netdev_priv(dev);
9807
9808         bnx2x_set_power_state(bp, PCI_D0);
9809
9810         return bnx2x_nic_load(bp, LOAD_OPEN);
9811 }
9812
9813 /* called with rtnl_lock */
9814 static int bnx2x_close(struct net_device *dev)
9815 {
9816         struct bnx2x *bp = netdev_priv(dev);
9817
9818         /* Unload the driver, release IRQs */
9819         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9820         if (atomic_read(&bp->pdev->enable_cnt) == 1)
9821                 if (!CHIP_REV_IS_SLOW(bp))
9822                         bnx2x_set_power_state(bp, PCI_D3hot);
9823
9824         return 0;
9825 }
9826
9827 /* called with netif_tx_lock from set_multicast */
9828 static void bnx2x_set_rx_mode(struct net_device *dev)
9829 {
9830         struct bnx2x *bp = netdev_priv(dev);
9831         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9832         int port = BP_PORT(bp);
9833
9834         if (bp->state != BNX2X_STATE_OPEN) {
9835                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9836                 return;
9837         }
9838
9839         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9840
9841         if (dev->flags & IFF_PROMISC)
9842                 rx_mode = BNX2X_RX_MODE_PROMISC;
9843
9844         else if ((dev->flags & IFF_ALLMULTI) ||
9845                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9846                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9847
9848         else { /* some multicasts */
9849                 if (CHIP_IS_E1(bp)) {
9850                         int i, old, offset;
9851                         struct dev_mc_list *mclist;
9852                         struct mac_configuration_cmd *config =
9853                                                 bnx2x_sp(bp, mcast_config);
9854
9855                         for (i = 0, mclist = dev->mc_list;
9856                              mclist && (i < dev->mc_count);
9857                              i++, mclist = mclist->next) {
9858
9859                                 config->config_table[i].
9860                                         cam_entry.msb_mac_addr =
9861                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
9862                                 config->config_table[i].
9863                                         cam_entry.middle_mac_addr =
9864                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
9865                                 config->config_table[i].
9866                                         cam_entry.lsb_mac_addr =
9867                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
9868                                 config->config_table[i].cam_entry.flags =
9869                                                         cpu_to_le16(port);
9870                                 config->config_table[i].
9871                                         target_table_entry.flags = 0;
9872                                 config->config_table[i].
9873                                         target_table_entry.client_id = 0;
9874                                 config->config_table[i].
9875                                         target_table_entry.vlan_id = 0;
9876
9877                                 DP(NETIF_MSG_IFUP,
9878                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9879                                    config->config_table[i].
9880                                                 cam_entry.msb_mac_addr,
9881                                    config->config_table[i].
9882                                                 cam_entry.middle_mac_addr,
9883                                    config->config_table[i].
9884                                                 cam_entry.lsb_mac_addr);
9885                         }
9886                         old = config->hdr.length_6b;
9887                         if (old > i) {
9888                                 for (; i < old; i++) {
9889                                         if (CAM_IS_INVALID(config->
9890                                                            config_table[i])) {
9891                                                 /* already invalidated */
9892                                                 break;
9893                                         }
9894                                         /* invalidate */
9895                                         CAM_INVALIDATE(config->
9896                                                        config_table[i]);
9897                                 }
9898                         }
9899
9900                         if (CHIP_REV_IS_SLOW(bp))
9901                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9902                         else
9903                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
9904
9905                         config->hdr.length_6b = i;
9906                         config->hdr.offset = offset;
9907                         config->hdr.client_id = BP_CL_ID(bp);
9908                         config->hdr.reserved1 = 0;
9909
9910                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9911                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9912                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9913                                       0);
9914                 } else { /* E1H */
9915                         /* Accept one or more multicasts */
9916                         struct dev_mc_list *mclist;
9917                         u32 mc_filter[MC_HASH_SIZE];
9918                         u32 crc, bit, regidx;
9919                         int i;
9920
9921                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9922
9923                         for (i = 0, mclist = dev->mc_list;
9924                              mclist && (i < dev->mc_count);
9925                              i++, mclist = mclist->next) {
9926
9927                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
9928                                    mclist->dmi_addr);
9929
9930                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9931                                 bit = (crc >> 24) & 0xff;
9932                                 regidx = bit >> 5;
9933                                 bit &= 0x1f;
9934                                 mc_filter[regidx] |= (1 << bit);
9935                         }
9936
9937                         for (i = 0; i < MC_HASH_SIZE; i++)
9938                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9939                                        mc_filter[i]);
9940                 }
9941         }
9942
9943         bp->rx_mode = rx_mode;
9944         bnx2x_set_storm_rx_mode(bp);
9945 }
9946
9947 /* called with rtnl_lock */
9948 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9949 {
9950         struct sockaddr *addr = p;
9951         struct bnx2x *bp = netdev_priv(dev);
9952
9953         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
9954                 return -EINVAL;
9955
9956         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9957         if (netif_running(dev)) {
9958                 if (CHIP_IS_E1(bp))
9959                         bnx2x_set_mac_addr_e1(bp, 1);
9960                 else
9961                         bnx2x_set_mac_addr_e1h(bp, 1);
9962         }
9963
9964         return 0;
9965 }
9966
9967 /* called with rtnl_lock */
9968 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9969 {
9970         struct mii_ioctl_data *data = if_mii(ifr);
9971         struct bnx2x *bp = netdev_priv(dev);
9972         int port = BP_PORT(bp);
9973         int err;
9974
9975         switch (cmd) {
9976         case SIOCGMIIPHY:
9977                 data->phy_id = bp->port.phy_addr;
9978
9979                 /* fallthrough */
9980
9981         case SIOCGMIIREG: {
9982                 u16 mii_regval;
9983
9984                 if (!netif_running(dev))
9985                         return -EAGAIN;
9986
9987                 mutex_lock(&bp->port.phy_mutex);
9988                 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
9989                                       DEFAULT_PHY_DEV_ADDR,
9990                                       (data->reg_num & 0x1f), &mii_regval);
9991                 data->val_out = mii_regval;
9992                 mutex_unlock(&bp->port.phy_mutex);
9993                 return err;
9994         }
9995
9996         case SIOCSMIIREG:
9997                 if (!capable(CAP_NET_ADMIN))
9998                         return -EPERM;
9999
10000                 if (!netif_running(dev))
10001                         return -EAGAIN;
10002
10003                 mutex_lock(&bp->port.phy_mutex);
10004                 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
10005                                        DEFAULT_PHY_DEV_ADDR,
10006                                        (data->reg_num & 0x1f), data->val_in);
10007                 mutex_unlock(&bp->port.phy_mutex);
10008                 return err;
10009
10010         default:
10011                 /* do nothing */
10012                 break;
10013         }
10014
10015         return -EOPNOTSUPP;
10016 }
10017
10018 /* called with rtnl_lock */
10019 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10020 {
10021         struct bnx2x *bp = netdev_priv(dev);
10022         int rc = 0;
10023
10024         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10025             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10026                 return -EINVAL;
10027
10028         /* This does not race with packet allocation
10029          * because the actual alloc size is
10030          * only updated as part of load
10031          */
10032         dev->mtu = new_mtu;
10033
10034         if (netif_running(dev)) {
10035                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10036                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10037         }
10038
10039         return rc;
10040 }
10041
10042 static void bnx2x_tx_timeout(struct net_device *dev)
10043 {
10044         struct bnx2x *bp = netdev_priv(dev);
10045
10046 #ifdef BNX2X_STOP_ON_ERROR
10047         if (!bp->panic)
10048                 bnx2x_panic();
10049 #endif
10050         /* This allows the netif to be shutdown gracefully before resetting */
10051         schedule_work(&bp->reset_task);
10052 }
10053
10054 #ifdef BCM_VLAN
10055 /* called with rtnl_lock */
10056 static void bnx2x_vlan_rx_register(struct net_device *dev,
10057                                    struct vlan_group *vlgrp)
10058 {
10059         struct bnx2x *bp = netdev_priv(dev);
10060
10061         bp->vlgrp = vlgrp;
10062
10063         /* Set flags according to the required capabilities */
10064         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10065
10066         if (dev->features & NETIF_F_HW_VLAN_TX)
10067                 bp->flags |= HW_VLAN_TX_FLAG;
10068
10069         if (dev->features & NETIF_F_HW_VLAN_RX)
10070                 bp->flags |= HW_VLAN_RX_FLAG;
10071
10072         if (netif_running(dev))
10073                 bnx2x_set_client_config(bp);
10074 }
10075
10076 #endif
10077
10078 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10079 static void poll_bnx2x(struct net_device *dev)
10080 {
10081         struct bnx2x *bp = netdev_priv(dev);
10082
10083         disable_irq(bp->pdev->irq);
10084         bnx2x_interrupt(bp->pdev->irq, dev);
10085         enable_irq(bp->pdev->irq);
10086 }
10087 #endif
10088
10089 static const struct net_device_ops bnx2x_netdev_ops = {
10090         .ndo_open               = bnx2x_open,
10091         .ndo_stop               = bnx2x_close,
10092         .ndo_start_xmit         = bnx2x_start_xmit,
10093         .ndo_set_multicast_list = bnx2x_set_rx_mode,
10094         .ndo_set_mac_address    = bnx2x_change_mac_addr,
10095         .ndo_validate_addr      = eth_validate_addr,
10096         .ndo_do_ioctl           = bnx2x_ioctl,
10097         .ndo_change_mtu         = bnx2x_change_mtu,
10098         .ndo_tx_timeout         = bnx2x_tx_timeout,
10099 #ifdef BCM_VLAN
10100         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
10101 #endif
10102 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10103         .ndo_poll_controller    = poll_bnx2x,
10104 #endif
10105 };
10106
10107
10108 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10109                                     struct net_device *dev)
10110 {
10111         struct bnx2x *bp;
10112         int rc;
10113
10114         SET_NETDEV_DEV(dev, &pdev->dev);
10115         bp = netdev_priv(dev);
10116
10117         bp->dev = dev;
10118         bp->pdev = pdev;
10119         bp->flags = 0;
10120         bp->func = PCI_FUNC(pdev->devfn);
10121
10122         rc = pci_enable_device(pdev);
10123         if (rc) {
10124                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10125                 goto err_out;
10126         }
10127
10128         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10129                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10130                        " aborting\n");
10131                 rc = -ENODEV;
10132                 goto err_out_disable;
10133         }
10134
10135         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10136                 printk(KERN_ERR PFX "Cannot find second PCI device"
10137                        " base address, aborting\n");
10138                 rc = -ENODEV;
10139                 goto err_out_disable;
10140         }
10141
10142         if (atomic_read(&pdev->enable_cnt) == 1) {
10143                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10144                 if (rc) {
10145                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10146                                " aborting\n");
10147                         goto err_out_disable;
10148                 }
10149
10150                 pci_set_master(pdev);
10151                 pci_save_state(pdev);
10152         }
10153
10154         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10155         if (bp->pm_cap == 0) {
10156                 printk(KERN_ERR PFX "Cannot find power management"
10157                        " capability, aborting\n");
10158                 rc = -EIO;
10159                 goto err_out_release;
10160         }
10161
10162         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10163         if (bp->pcie_cap == 0) {
10164                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10165                        " aborting\n");
10166                 rc = -EIO;
10167                 goto err_out_release;
10168         }
10169
10170         if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10171                 bp->flags |= USING_DAC_FLAG;
10172                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10173                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10174                                " failed, aborting\n");
10175                         rc = -EIO;
10176                         goto err_out_release;
10177                 }
10178
10179         } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10180                 printk(KERN_ERR PFX "System does not support DMA,"
10181                        " aborting\n");
10182                 rc = -EIO;
10183                 goto err_out_release;
10184         }
10185
10186         dev->mem_start = pci_resource_start(pdev, 0);
10187         dev->base_addr = dev->mem_start;
10188         dev->mem_end = pci_resource_end(pdev, 0);
10189
10190         dev->irq = pdev->irq;
10191
10192         bp->regview = pci_ioremap_bar(pdev, 0);
10193         if (!bp->regview) {
10194                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10195                 rc = -ENOMEM;
10196                 goto err_out_release;
10197         }
10198
10199         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10200                                         min_t(u64, BNX2X_DB_SIZE,
10201                                               pci_resource_len(pdev, 2)));
10202         if (!bp->doorbells) {
10203                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10204                 rc = -ENOMEM;
10205                 goto err_out_unmap;
10206         }
10207
10208         bnx2x_set_power_state(bp, PCI_D0);
10209
10210         /* clean indirect addresses */
10211         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10212                                PCICFG_VENDOR_ID_OFFSET);
10213         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10214         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10215         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10216         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10217
10218         dev->watchdog_timeo = TX_TIMEOUT;
10219
10220         dev->netdev_ops = &bnx2x_netdev_ops;
10221         dev->ethtool_ops = &bnx2x_ethtool_ops;
10222         dev->features |= NETIF_F_SG;
10223         dev->features |= NETIF_F_HW_CSUM;
10224         if (bp->flags & USING_DAC_FLAG)
10225                 dev->features |= NETIF_F_HIGHDMA;
10226 #ifdef BCM_VLAN
10227         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10228         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10229 #endif
10230         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10231         dev->features |= NETIF_F_TSO6;
10232
10233         return 0;
10234
10235 err_out_unmap:
10236         if (bp->regview) {
10237                 iounmap(bp->regview);
10238                 bp->regview = NULL;
10239         }
10240         if (bp->doorbells) {
10241                 iounmap(bp->doorbells);
10242                 bp->doorbells = NULL;
10243         }
10244
10245 err_out_release:
10246         if (atomic_read(&pdev->enable_cnt) == 1)
10247                 pci_release_regions(pdev);
10248
10249 err_out_disable:
10250         pci_disable_device(pdev);
10251         pci_set_drvdata(pdev, NULL);
10252
10253 err_out:
10254         return rc;
10255 }
10256
10257 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10258 {
10259         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10260
10261         val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10262         return val;
10263 }
10264
10265 /* return value of 1=2.5GHz 2=5GHz */
10266 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10267 {
10268         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10269
10270         val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10271         return val;
10272 }
10273
10274 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10275                                     const struct pci_device_id *ent)
10276 {
10277         static int version_printed;
10278         struct net_device *dev = NULL;
10279         struct bnx2x *bp;
10280         int rc;
10281
10282         if (version_printed++ == 0)
10283                 printk(KERN_INFO "%s", version);
10284
10285         /* dev zeroed in init_etherdev */
10286         dev = alloc_etherdev(sizeof(*bp));
10287         if (!dev) {
10288                 printk(KERN_ERR PFX "Cannot allocate net device\n");
10289                 return -ENOMEM;
10290         }
10291
10292         bp = netdev_priv(dev);
10293         bp->msglevel = debug;
10294
10295         rc = bnx2x_init_dev(pdev, dev);
10296         if (rc < 0) {
10297                 free_netdev(dev);
10298                 return rc;
10299         }
10300
10301         pci_set_drvdata(pdev, dev);
10302
10303         rc = bnx2x_init_bp(bp);
10304         if (rc)
10305                 goto init_one_exit;
10306
10307         rc = register_netdev(dev);
10308         if (rc) {
10309                 dev_err(&pdev->dev, "Cannot register net device\n");
10310                 goto init_one_exit;
10311         }
10312
10313         netif_carrier_off(dev);
10314
10315         bp->common.name = board_info[ent->driver_data].name;
10316         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10317                " IRQ %d, ", dev->name, bp->common.name,
10318                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10319                bnx2x_get_pcie_width(bp),
10320                (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10321                dev->base_addr, bp->pdev->irq);
10322         printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
10323         return 0;
10324
10325 init_one_exit:
10326         if (bp->regview)
10327                 iounmap(bp->regview);
10328
10329         if (bp->doorbells)
10330                 iounmap(bp->doorbells);
10331
10332         free_netdev(dev);
10333
10334         if (atomic_read(&pdev->enable_cnt) == 1)
10335                 pci_release_regions(pdev);
10336
10337         pci_disable_device(pdev);
10338         pci_set_drvdata(pdev, NULL);
10339
10340         return rc;
10341 }
10342
10343 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10344 {
10345         struct net_device *dev = pci_get_drvdata(pdev);
10346         struct bnx2x *bp;
10347
10348         if (!dev) {
10349                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10350                 return;
10351         }
10352         bp = netdev_priv(dev);
10353
10354         unregister_netdev(dev);
10355
10356         if (bp->regview)
10357                 iounmap(bp->regview);
10358
10359         if (bp->doorbells)
10360                 iounmap(bp->doorbells);
10361
10362         free_netdev(dev);
10363
10364         if (atomic_read(&pdev->enable_cnt) == 1)
10365                 pci_release_regions(pdev);
10366
10367         pci_disable_device(pdev);
10368         pci_set_drvdata(pdev, NULL);
10369 }
10370
10371 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10372 {
10373         struct net_device *dev = pci_get_drvdata(pdev);
10374         struct bnx2x *bp;
10375
10376         if (!dev) {
10377                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10378                 return -ENODEV;
10379         }
10380         bp = netdev_priv(dev);
10381
10382         rtnl_lock();
10383
10384         pci_save_state(pdev);
10385
10386         if (!netif_running(dev)) {
10387                 rtnl_unlock();
10388                 return 0;
10389         }
10390
10391         netif_device_detach(dev);
10392
10393         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10394
10395         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10396
10397         rtnl_unlock();
10398
10399         return 0;
10400 }
10401
10402 static int bnx2x_resume(struct pci_dev *pdev)
10403 {
10404         struct net_device *dev = pci_get_drvdata(pdev);
10405         struct bnx2x *bp;
10406         int rc;
10407
10408         if (!dev) {
10409                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10410                 return -ENODEV;
10411         }
10412         bp = netdev_priv(dev);
10413
10414         rtnl_lock();
10415
10416         pci_restore_state(pdev);
10417
10418         if (!netif_running(dev)) {
10419                 rtnl_unlock();
10420                 return 0;
10421         }
10422
10423         bnx2x_set_power_state(bp, PCI_D0);
10424         netif_device_attach(dev);
10425
10426         rc = bnx2x_nic_load(bp, LOAD_OPEN);
10427
10428         rtnl_unlock();
10429
10430         return rc;
10431 }
10432
10433 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10434 {
10435         int i;
10436
10437         bp->state = BNX2X_STATE_ERROR;
10438
10439         bp->rx_mode = BNX2X_RX_MODE_NONE;
10440
10441         bnx2x_netif_stop(bp, 0);
10442
10443         del_timer_sync(&bp->timer);
10444         bp->stats_state = STATS_STATE_DISABLED;
10445         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10446
10447         /* Release IRQs */
10448         bnx2x_free_irq(bp);
10449
10450         if (CHIP_IS_E1(bp)) {
10451                 struct mac_configuration_cmd *config =
10452                                                 bnx2x_sp(bp, mcast_config);
10453
10454                 for (i = 0; i < config->hdr.length_6b; i++)
10455                         CAM_INVALIDATE(config->config_table[i]);
10456         }
10457
10458         /* Free SKBs, SGEs, TPA pool and driver internals */
10459         bnx2x_free_skbs(bp);
10460         for_each_queue(bp, i)
10461                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10462         bnx2x_free_mem(bp);
10463
10464         bp->state = BNX2X_STATE_CLOSED;
10465
10466         netif_carrier_off(bp->dev);
10467
10468         return 0;
10469 }
10470
10471 static void bnx2x_eeh_recover(struct bnx2x *bp)
10472 {
10473         u32 val;
10474
10475         mutex_init(&bp->port.phy_mutex);
10476
10477         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10478         bp->link_params.shmem_base = bp->common.shmem_base;
10479         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10480
10481         if (!bp->common.shmem_base ||
10482             (bp->common.shmem_base < 0xA0000) ||
10483             (bp->common.shmem_base >= 0xC0000)) {
10484                 BNX2X_DEV_INFO("MCP not active\n");
10485                 bp->flags |= NO_MCP_FLAG;
10486                 return;
10487         }
10488
10489         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10490         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10491                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10492                 BNX2X_ERR("BAD MCP validity signature\n");
10493
10494         if (!BP_NOMCP(bp)) {
10495                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10496                               & DRV_MSG_SEQ_NUMBER_MASK);
10497                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10498         }
10499 }
10500
10501 /**
10502  * bnx2x_io_error_detected - called when PCI error is detected
10503  * @pdev: Pointer to PCI device
10504  * @state: The current pci connection state
10505  *
10506  * This function is called after a PCI bus error affecting
10507  * this device has been detected.
10508  */
10509 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10510                                                 pci_channel_state_t state)
10511 {
10512         struct net_device *dev = pci_get_drvdata(pdev);
10513         struct bnx2x *bp = netdev_priv(dev);
10514
10515         rtnl_lock();
10516
10517         netif_device_detach(dev);
10518
10519         if (netif_running(dev))
10520                 bnx2x_eeh_nic_unload(bp);
10521
10522         pci_disable_device(pdev);
10523
10524         rtnl_unlock();
10525
10526         /* Request a slot reset */
10527         return PCI_ERS_RESULT_NEED_RESET;
10528 }
10529
10530 /**
10531  * bnx2x_io_slot_reset - called after the PCI bus has been reset
10532  * @pdev: Pointer to PCI device
10533  *
10534  * Restart the card from scratch, as if from a cold-boot.
10535  */
10536 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10537 {
10538         struct net_device *dev = pci_get_drvdata(pdev);
10539         struct bnx2x *bp = netdev_priv(dev);
10540
10541         rtnl_lock();
10542
10543         if (pci_enable_device(pdev)) {
10544                 dev_err(&pdev->dev,
10545                         "Cannot re-enable PCI device after reset\n");
10546                 rtnl_unlock();
10547                 return PCI_ERS_RESULT_DISCONNECT;
10548         }
10549
10550         pci_set_master(pdev);
10551         pci_restore_state(pdev);
10552
10553         if (netif_running(dev))
10554                 bnx2x_set_power_state(bp, PCI_D0);
10555
10556         rtnl_unlock();
10557
10558         return PCI_ERS_RESULT_RECOVERED;
10559 }
10560
10561 /**
10562  * bnx2x_io_resume - called when traffic can start flowing again
10563  * @pdev: Pointer to PCI device
10564  *
10565  * This callback is called when the error recovery driver tells us that
10566  * its OK to resume normal operation.
10567  */
10568 static void bnx2x_io_resume(struct pci_dev *pdev)
10569 {
10570         struct net_device *dev = pci_get_drvdata(pdev);
10571         struct bnx2x *bp = netdev_priv(dev);
10572
10573         rtnl_lock();
10574
10575         bnx2x_eeh_recover(bp);
10576
10577         if (netif_running(dev))
10578                 bnx2x_nic_load(bp, LOAD_NORMAL);
10579
10580         netif_device_attach(dev);
10581
10582         rtnl_unlock();
10583 }
10584
10585 static struct pci_error_handlers bnx2x_err_handler = {
10586         .error_detected = bnx2x_io_error_detected,
10587         .slot_reset = bnx2x_io_slot_reset,
10588         .resume = bnx2x_io_resume,
10589 };
10590
10591 static struct pci_driver bnx2x_pci_driver = {
10592         .name        = DRV_MODULE_NAME,
10593         .id_table    = bnx2x_pci_tbl,
10594         .probe       = bnx2x_init_one,
10595         .remove      = __devexit_p(bnx2x_remove_one),
10596         .suspend     = bnx2x_suspend,
10597         .resume      = bnx2x_resume,
10598         .err_handler = &bnx2x_err_handler,
10599 };
10600
10601 static int __init bnx2x_init(void)
10602 {
10603         bnx2x_wq = create_singlethread_workqueue("bnx2x");
10604         if (bnx2x_wq == NULL) {
10605                 printk(KERN_ERR PFX "Cannot create workqueue\n");
10606                 return -ENOMEM;
10607         }
10608
10609         return pci_register_driver(&bnx2x_pci_driver);
10610 }
10611
10612 static void __exit bnx2x_cleanup(void)
10613 {
10614         pci_unregister_driver(&bnx2x_pci_driver);
10615
10616         destroy_workqueue(bnx2x_wq);
10617 }
10618
10619 module_init(bnx2x_init);
10620 module_exit(bnx2x_cleanup);
10621