bnx2x: Remove redundant smb_mb on unload
[linux-2.6] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53
54 #include "bnx2x.h"
55 #include "bnx2x_init.h"
56
57 #define DRV_MODULE_VERSION      "1.48.102"
58 #define DRV_MODULE_RELDATE      "2009/02/12"
59 #define BNX2X_BC_VER            0x040200
60
61 /* Time in jiffies before concluding the transmitter is hung */
62 #define TX_TIMEOUT              (5*HZ)
63
64 static char version[] __devinitdata =
65         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
66         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
68 MODULE_AUTHOR("Eliezer Tamir");
69 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
72
73 static int multi_mode = 1;
74 module_param(multi_mode, int, 0);
75
76 static int disable_tpa;
77 module_param(disable_tpa, int, 0);
78 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
79
80 static int int_mode;
81 module_param(int_mode, int, 0);
82 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
83
84 static int poll;
85 module_param(poll, int, 0);
86 MODULE_PARM_DESC(poll, " Use polling (for debug)");
87
88 static int mrrs = -1;
89 module_param(mrrs, int, 0);
90 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
91
92 static int debug;
93 module_param(debug, int, 0);
94 MODULE_PARM_DESC(debug, " Default debug msglevel");
95
96 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
97
98 static struct workqueue_struct *bnx2x_wq;
99
100 enum bnx2x_board_type {
101         BCM57710 = 0,
102         BCM57711 = 1,
103         BCM57711E = 2,
104 };
105
106 /* indexed by board_type, above */
107 static struct {
108         char *name;
109 } board_info[] __devinitdata = {
110         { "Broadcom NetXtreme II BCM57710 XGb" },
111         { "Broadcom NetXtreme II BCM57711 XGb" },
112         { "Broadcom NetXtreme II BCM57711E XGb" }
113 };
114
115
116 static const struct pci_device_id bnx2x_pci_tbl[] = {
117         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
119         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
123         { 0 }
124 };
125
126 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
127
128 /****************************************************************************
129 * General service functions
130 ****************************************************************************/
131
132 /* used only at init
133  * locking is done by mcp
134  */
135 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
136 {
137         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140                                PCICFG_VENDOR_ID_OFFSET);
141 }
142
143 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144 {
145         u32 val;
146
147         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150                                PCICFG_VENDOR_ID_OFFSET);
151
152         return val;
153 }
154
155 static const u32 dmae_reg_go_c[] = {
156         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160 };
161
162 /* copy command into DMAE command memory and set DMAE command go */
163 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
164                             int idx)
165 {
166         u32 cmd_offset;
167         int i;
168
169         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
172
173                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
175         }
176         REG_WR(bp, dmae_reg_go_c[idx], 1);
177 }
178
179 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180                       u32 len32)
181 {
182         struct dmae_command *dmae = &bp->init_dmae;
183         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
184         int cnt = 200;
185
186         if (!bp->dmae_ready) {
187                 u32 *data = bnx2x_sp(bp, wb_data[0]);
188
189                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
190                    "  using indirect\n", dst_addr, len32);
191                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
192                 return;
193         }
194
195         mutex_lock(&bp->dmae_mutex);
196
197         memset(dmae, 0, sizeof(struct dmae_command));
198
199         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
202 #ifdef __BIG_ENDIAN
203                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
204 #else
205                         DMAE_CMD_ENDIANITY_DW_SWAP |
206 #endif
207                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
209         dmae->src_addr_lo = U64_LO(dma_addr);
210         dmae->src_addr_hi = U64_HI(dma_addr);
211         dmae->dst_addr_lo = dst_addr >> 2;
212         dmae->dst_addr_hi = 0;
213         dmae->len = len32;
214         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
216         dmae->comp_val = DMAE_COMP_VAL;
217
218         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
219            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
220                     "dst_addr [%x:%08x (%08x)]\n"
221            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
222            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
225         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
226            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
228
229         *wb_comp = 0;
230
231         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
232
233         udelay(5);
234
235         while (*wb_comp != DMAE_COMP_VAL) {
236                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237
238                 if (!cnt) {
239                         BNX2X_ERR("dmae timeout!\n");
240                         break;
241                 }
242                 cnt--;
243                 /* adjust delay for emulation/FPGA */
244                 if (CHIP_REV_IS_SLOW(bp))
245                         msleep(100);
246                 else
247                         udelay(5);
248         }
249
250         mutex_unlock(&bp->dmae_mutex);
251 }
252
253 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
254 {
255         struct dmae_command *dmae = &bp->init_dmae;
256         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
257         int cnt = 200;
258
259         if (!bp->dmae_ready) {
260                 u32 *data = bnx2x_sp(bp, wb_data[0]);
261                 int i;
262
263                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
264                    "  using indirect\n", src_addr, len32);
265                 for (i = 0; i < len32; i++)
266                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
267                 return;
268         }
269
270         mutex_lock(&bp->dmae_mutex);
271
272         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
273         memset(dmae, 0, sizeof(struct dmae_command));
274
275         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
276                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
277                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
278 #ifdef __BIG_ENDIAN
279                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
280 #else
281                         DMAE_CMD_ENDIANITY_DW_SWAP |
282 #endif
283                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
284                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
285         dmae->src_addr_lo = src_addr >> 2;
286         dmae->src_addr_hi = 0;
287         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
288         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
289         dmae->len = len32;
290         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
291         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
292         dmae->comp_val = DMAE_COMP_VAL;
293
294         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
295            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
296                     "dst_addr [%x:%08x (%08x)]\n"
297            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
298            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
299            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
300            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
301
302         *wb_comp = 0;
303
304         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
305
306         udelay(5);
307
308         while (*wb_comp != DMAE_COMP_VAL) {
309
310                 if (!cnt) {
311                         BNX2X_ERR("dmae timeout!\n");
312                         break;
313                 }
314                 cnt--;
315                 /* adjust delay for emulation/FPGA */
316                 if (CHIP_REV_IS_SLOW(bp))
317                         msleep(100);
318                 else
319                         udelay(5);
320         }
321         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
322            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
323            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
324
325         mutex_unlock(&bp->dmae_mutex);
326 }
327
328 /* used only for slowpath so not inlined */
329 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
330 {
331         u32 wb_write[2];
332
333         wb_write[0] = val_hi;
334         wb_write[1] = val_lo;
335         REG_WR_DMAE(bp, reg, wb_write, 2);
336 }
337
338 #ifdef USE_WB_RD
339 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
340 {
341         u32 wb_data[2];
342
343         REG_RD_DMAE(bp, reg, wb_data, 2);
344
345         return HILO_U64(wb_data[0], wb_data[1]);
346 }
347 #endif
348
349 static int bnx2x_mc_assert(struct bnx2x *bp)
350 {
351         char last_idx;
352         int i, rc = 0;
353         u32 row0, row1, row2, row3;
354
355         /* XSTORM */
356         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
357                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
358         if (last_idx)
359                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
360
361         /* print the asserts */
362         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
363
364                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365                               XSTORM_ASSERT_LIST_OFFSET(i));
366                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
368                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
370                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
372
373                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
374                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
375                                   " 0x%08x 0x%08x 0x%08x\n",
376                                   i, row3, row2, row1, row0);
377                         rc++;
378                 } else {
379                         break;
380                 }
381         }
382
383         /* TSTORM */
384         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
385                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
386         if (last_idx)
387                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
388
389         /* print the asserts */
390         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
391
392                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393                               TSTORM_ASSERT_LIST_OFFSET(i));
394                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
396                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
398                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
400
401                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
402                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
403                                   " 0x%08x 0x%08x 0x%08x\n",
404                                   i, row3, row2, row1, row0);
405                         rc++;
406                 } else {
407                         break;
408                 }
409         }
410
411         /* CSTORM */
412         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
413                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
414         if (last_idx)
415                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
416
417         /* print the asserts */
418         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
419
420                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421                               CSTORM_ASSERT_LIST_OFFSET(i));
422                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
424                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
426                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
428
429                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
430                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
431                                   " 0x%08x 0x%08x 0x%08x\n",
432                                   i, row3, row2, row1, row0);
433                         rc++;
434                 } else {
435                         break;
436                 }
437         }
438
439         /* USTORM */
440         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
441                            USTORM_ASSERT_LIST_INDEX_OFFSET);
442         if (last_idx)
443                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
444
445         /* print the asserts */
446         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
447
448                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
449                               USTORM_ASSERT_LIST_OFFSET(i));
450                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
451                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
452                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
453                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
454                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
455                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
456
457                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
458                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
459                                   " 0x%08x 0x%08x 0x%08x\n",
460                                   i, row3, row2, row1, row0);
461                         rc++;
462                 } else {
463                         break;
464                 }
465         }
466
467         return rc;
468 }
469
470 static void bnx2x_fw_dump(struct bnx2x *bp)
471 {
472         u32 mark, offset;
473         __be32 data[9];
474         int word;
475
476         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
477         mark = ((mark + 0x3) & ~0x3);
478         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
479
480         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
481                 for (word = 0; word < 8; word++)
482                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
483                                                   offset + 4*word));
484                 data[8] = 0x0;
485                 printk(KERN_CONT "%s", (char *)data);
486         }
487         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
488                 for (word = 0; word < 8; word++)
489                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
490                                                   offset + 4*word));
491                 data[8] = 0x0;
492                 printk(KERN_CONT "%s", (char *)data);
493         }
494         printk("\n" KERN_ERR PFX "end of fw dump\n");
495 }
496
497 static void bnx2x_panic_dump(struct bnx2x *bp)
498 {
499         int i;
500         u16 j, start, end;
501
502         bp->stats_state = STATS_STATE_DISABLED;
503         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
504
505         BNX2X_ERR("begin crash dump -----------------\n");
506
507         /* Indices */
508         /* Common */
509         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
510                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
511                   "  spq_prod_idx(%u)\n",
512                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
513                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
514
515         /* Rx */
516         for_each_rx_queue(bp, i) {
517                 struct bnx2x_fastpath *fp = &bp->fp[i];
518
519                 BNX2X_ERR("queue[%d]: rx_bd_prod(%x)  rx_bd_cons(%x)"
520                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
521                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
522                           i, fp->rx_bd_prod, fp->rx_bd_cons,
523                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
524                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
525                 BNX2X_ERR("          rx_sge_prod(%x)  last_max_sge(%x)"
526                           "  fp_u_idx(%x) *sb_u_idx(%x)\n",
527                           fp->rx_sge_prod, fp->last_max_sge,
528                           le16_to_cpu(fp->fp_u_idx),
529                           fp->status_blk->u_status_block.status_block_index);
530         }
531
532         /* Tx */
533         for_each_tx_queue(bp, i) {
534                 struct bnx2x_fastpath *fp = &bp->fp[i];
535                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
536
537                 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
538                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
539                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
540                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
541                 BNX2X_ERR("          fp_c_idx(%x)  *sb_c_idx(%x)"
542                           "  bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
543                           fp->status_blk->c_status_block.status_block_index,
544                           hw_prods->packets_prod, hw_prods->bds_prod);
545         }
546
547         /* Rings */
548         /* Rx */
549         for_each_rx_queue(bp, i) {
550                 struct bnx2x_fastpath *fp = &bp->fp[i];
551
552                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
553                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
554                 for (j = start; j != end; j = RX_BD(j + 1)) {
555                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
556                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
557
558                         BNX2X_ERR("rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
559                                   j, rx_bd[1], rx_bd[0], sw_bd->skb);
560                 }
561
562                 start = RX_SGE(fp->rx_sge_prod);
563                 end = RX_SGE(fp->last_max_sge);
564                 for (j = start; j != end; j = RX_SGE(j + 1)) {
565                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
566                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
567
568                         BNX2X_ERR("rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
569                                   j, rx_sge[1], rx_sge[0], sw_page->page);
570                 }
571
572                 start = RCQ_BD(fp->rx_comp_cons - 10);
573                 end = RCQ_BD(fp->rx_comp_cons + 503);
574                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
575                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
576
577                         BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
578                                   j, cqe[0], cqe[1], cqe[2], cqe[3]);
579                 }
580         }
581
582         /* Tx */
583         for_each_tx_queue(bp, i) {
584                 struct bnx2x_fastpath *fp = &bp->fp[i];
585
586                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
587                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
588                 for (j = start; j != end; j = TX_BD(j + 1)) {
589                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
590
591                         BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
592                                   sw_bd->skb, sw_bd->first_bd);
593                 }
594
595                 start = TX_BD(fp->tx_bd_cons - 10);
596                 end = TX_BD(fp->tx_bd_cons + 254);
597                 for (j = start; j != end; j = TX_BD(j + 1)) {
598                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
599
600                         BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
601                                   j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
602                 }
603         }
604
605         bnx2x_fw_dump(bp);
606         bnx2x_mc_assert(bp);
607         BNX2X_ERR("end crash dump -----------------\n");
608 }
609
610 static void bnx2x_int_enable(struct bnx2x *bp)
611 {
612         int port = BP_PORT(bp);
613         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
614         u32 val = REG_RD(bp, addr);
615         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
616         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
617
618         if (msix) {
619                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
620                          HC_CONFIG_0_REG_INT_LINE_EN_0);
621                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
622                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
623         } else if (msi) {
624                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
625                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
626                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
627                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
628         } else {
629                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
630                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
631                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
632                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
633
634                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
635                    val, port, addr);
636
637                 REG_WR(bp, addr, val);
638
639                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
640         }
641
642         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
643            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
644
645         REG_WR(bp, addr, val);
646
647         if (CHIP_IS_E1H(bp)) {
648                 /* init leading/trailing edge */
649                 if (IS_E1HMF(bp)) {
650                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
651                         if (bp->port.pmf)
652                                 /* enable nig and gpio3 attention */
653                                 val |= 0x1100;
654                 } else
655                         val = 0xffff;
656
657                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
658                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
659         }
660 }
661
662 static void bnx2x_int_disable(struct bnx2x *bp)
663 {
664         int port = BP_PORT(bp);
665         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
666         u32 val = REG_RD(bp, addr);
667
668         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
669                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
670                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
671                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
672
673         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
674            val, port, addr);
675
676         /* flush all outstanding writes */
677         mmiowb();
678
679         REG_WR(bp, addr, val);
680         if (REG_RD(bp, addr) != val)
681                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
682
683 }
684
685 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
686 {
687         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
688         int i, offset;
689
690         /* disable interrupt handling */
691         atomic_inc(&bp->intr_sem);
692         if (disable_hw)
693                 /* prevent the HW from sending interrupts */
694                 bnx2x_int_disable(bp);
695
696         /* make sure all ISRs are done */
697         if (msix) {
698                 synchronize_irq(bp->msix_table[0].vector);
699                 offset = 1;
700                 for_each_queue(bp, i)
701                         synchronize_irq(bp->msix_table[i + offset].vector);
702         } else
703                 synchronize_irq(bp->pdev->irq);
704
705         /* make sure sp_task is not running */
706         cancel_delayed_work(&bp->sp_task);
707         flush_workqueue(bnx2x_wq);
708 }
709
710 /* fast path */
711
712 /*
713  * General service functions
714  */
715
716 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
717                                 u8 storm, u16 index, u8 op, u8 update)
718 {
719         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
720                        COMMAND_REG_INT_ACK);
721         struct igu_ack_register igu_ack;
722
723         igu_ack.status_block_index = index;
724         igu_ack.sb_id_and_flags =
725                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
726                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
727                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
728                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
729
730         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
731            (*(u32 *)&igu_ack), hc_addr);
732         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
733 }
734
735 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
736 {
737         struct host_status_block *fpsb = fp->status_blk;
738         u16 rc = 0;
739
740         barrier(); /* status block is written to by the chip */
741         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
742                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
743                 rc |= 1;
744         }
745         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
746                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
747                 rc |= 2;
748         }
749         return rc;
750 }
751
752 static u16 bnx2x_ack_int(struct bnx2x *bp)
753 {
754         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
755                        COMMAND_REG_SIMD_MASK);
756         u32 result = REG_RD(bp, hc_addr);
757
758         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
759            result, hc_addr);
760
761         return result;
762 }
763
764
765 /*
766  * fast path service functions
767  */
768
769 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
770 {
771         u16 tx_cons_sb;
772
773         /* Tell compiler that status block fields can change */
774         barrier();
775         tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
776         return (fp->tx_pkt_cons != tx_cons_sb);
777 }
778
779 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
780 {
781         /* Tell compiler that consumer and producer can change */
782         barrier();
783         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
784 }
785
786 /* free skb in the packet ring at pos idx
787  * return idx of last bd freed
788  */
789 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
790                              u16 idx)
791 {
792         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
793         struct eth_tx_bd *tx_bd;
794         struct sk_buff *skb = tx_buf->skb;
795         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
796         int nbd;
797
798         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
799            idx, tx_buf, skb);
800
801         /* unmap first bd */
802         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
803         tx_bd = &fp->tx_desc_ring[bd_idx];
804         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
805                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
806
807         nbd = le16_to_cpu(tx_bd->nbd) - 1;
808         new_cons = nbd + tx_buf->first_bd;
809 #ifdef BNX2X_STOP_ON_ERROR
810         if (nbd > (MAX_SKB_FRAGS + 2)) {
811                 BNX2X_ERR("BAD nbd!\n");
812                 bnx2x_panic();
813         }
814 #endif
815
816         /* Skip a parse bd and the TSO split header bd
817            since they have no mapping */
818         if (nbd)
819                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
820
821         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
822                                            ETH_TX_BD_FLAGS_TCP_CSUM |
823                                            ETH_TX_BD_FLAGS_SW_LSO)) {
824                 if (--nbd)
825                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
826                 tx_bd = &fp->tx_desc_ring[bd_idx];
827                 /* is this a TSO split header bd? */
828                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
829                         if (--nbd)
830                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
831                 }
832         }
833
834         /* now free frags */
835         while (nbd > 0) {
836
837                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
838                 tx_bd = &fp->tx_desc_ring[bd_idx];
839                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
840                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
841                 if (--nbd)
842                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
843         }
844
845         /* release skb */
846         WARN_ON(!skb);
847         dev_kfree_skb(skb);
848         tx_buf->first_bd = 0;
849         tx_buf->skb = NULL;
850
851         return new_cons;
852 }
853
854 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
855 {
856         s16 used;
857         u16 prod;
858         u16 cons;
859
860         barrier(); /* Tell compiler that prod and cons can change */
861         prod = fp->tx_bd_prod;
862         cons = fp->tx_bd_cons;
863
864         /* NUM_TX_RINGS = number of "next-page" entries
865            It will be used as a threshold */
866         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
867
868 #ifdef BNX2X_STOP_ON_ERROR
869         WARN_ON(used < 0);
870         WARN_ON(used > fp->bp->tx_ring_size);
871         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
872 #endif
873
874         return (s16)(fp->bp->tx_ring_size) - used;
875 }
876
877 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
878 {
879         struct bnx2x *bp = fp->bp;
880         struct netdev_queue *txq;
881         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
882         int done = 0;
883
884 #ifdef BNX2X_STOP_ON_ERROR
885         if (unlikely(bp->panic))
886                 return;
887 #endif
888
889         txq = netdev_get_tx_queue(bp->dev, fp->index);
890         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
891         sw_cons = fp->tx_pkt_cons;
892
893         while (sw_cons != hw_cons) {
894                 u16 pkt_cons;
895
896                 pkt_cons = TX_BD(sw_cons);
897
898                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
899
900                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
901                    hw_cons, sw_cons, pkt_cons);
902
903 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
904                         rmb();
905                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
906                 }
907 */
908                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
909                 sw_cons++;
910                 done++;
911
912                 if (done == work)
913                         break;
914         }
915
916         fp->tx_pkt_cons = sw_cons;
917         fp->tx_bd_cons = bd_cons;
918
919         /* Need to make the tx_bd_cons update visible to start_xmit()
920          * before checking for netif_tx_queue_stopped().  Without the
921          * memory barrier, there is a small possibility that start_xmit()
922          * will miss it and cause the queue to be stopped forever.
923          */
924         smp_mb();
925
926         /* TBD need a thresh? */
927         if (unlikely(netif_tx_queue_stopped(txq))) {
928
929                 __netif_tx_lock(txq, smp_processor_id());
930
931                 if ((netif_tx_queue_stopped(txq)) &&
932                     (bp->state == BNX2X_STATE_OPEN) &&
933                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
934                         netif_tx_wake_queue(txq);
935
936                 __netif_tx_unlock(txq);
937         }
938 }
939
940
941 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
942                            union eth_rx_cqe *rr_cqe)
943 {
944         struct bnx2x *bp = fp->bp;
945         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
946         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
947
948         DP(BNX2X_MSG_SP,
949            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
950            fp->index, cid, command, bp->state,
951            rr_cqe->ramrod_cqe.ramrod_type);
952
953         bp->spq_left++;
954
955         if (fp->index) {
956                 switch (command | fp->state) {
957                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
958                                                 BNX2X_FP_STATE_OPENING):
959                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
960                            cid);
961                         fp->state = BNX2X_FP_STATE_OPEN;
962                         break;
963
964                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
965                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
966                            cid);
967                         fp->state = BNX2X_FP_STATE_HALTED;
968                         break;
969
970                 default:
971                         BNX2X_ERR("unexpected MC reply (%d)  "
972                                   "fp->state is %x\n", command, fp->state);
973                         break;
974                 }
975                 mb(); /* force bnx2x_wait_ramrod() to see the change */
976                 return;
977         }
978
979         switch (command | bp->state) {
980         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
981                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
982                 bp->state = BNX2X_STATE_OPEN;
983                 break;
984
985         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
986                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
987                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
988                 fp->state = BNX2X_FP_STATE_HALTED;
989                 break;
990
991         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
992                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
993                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
994                 break;
995
996
997         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
998         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
999                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1000                 bp->set_mac_pending = 0;
1001                 break;
1002
1003         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1004                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1005                 break;
1006
1007         default:
1008                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1009                           command, bp->state);
1010                 break;
1011         }
1012         mb(); /* force bnx2x_wait_ramrod() to see the change */
1013 }
1014
1015 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1016                                      struct bnx2x_fastpath *fp, u16 index)
1017 {
1018         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1019         struct page *page = sw_buf->page;
1020         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1021
1022         /* Skip "next page" elements */
1023         if (!page)
1024                 return;
1025
1026         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1027                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1028         __free_pages(page, PAGES_PER_SGE_SHIFT);
1029
1030         sw_buf->page = NULL;
1031         sge->addr_hi = 0;
1032         sge->addr_lo = 0;
1033 }
1034
1035 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1036                                            struct bnx2x_fastpath *fp, int last)
1037 {
1038         int i;
1039
1040         for (i = 0; i < last; i++)
1041                 bnx2x_free_rx_sge(bp, fp, i);
1042 }
1043
1044 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1045                                      struct bnx2x_fastpath *fp, u16 index)
1046 {
1047         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1048         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1049         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1050         dma_addr_t mapping;
1051
1052         if (unlikely(page == NULL))
1053                 return -ENOMEM;
1054
1055         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1056                                PCI_DMA_FROMDEVICE);
1057         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1058                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1059                 return -ENOMEM;
1060         }
1061
1062         sw_buf->page = page;
1063         pci_unmap_addr_set(sw_buf, mapping, mapping);
1064
1065         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1066         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1067
1068         return 0;
1069 }
1070
1071 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1072                                      struct bnx2x_fastpath *fp, u16 index)
1073 {
1074         struct sk_buff *skb;
1075         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1076         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1077         dma_addr_t mapping;
1078
1079         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1080         if (unlikely(skb == NULL))
1081                 return -ENOMEM;
1082
1083         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1084                                  PCI_DMA_FROMDEVICE);
1085         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1086                 dev_kfree_skb(skb);
1087                 return -ENOMEM;
1088         }
1089
1090         rx_buf->skb = skb;
1091         pci_unmap_addr_set(rx_buf, mapping, mapping);
1092
1093         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1094         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1095
1096         return 0;
1097 }
1098
1099 /* note that we are not allocating a new skb,
1100  * we are just moving one from cons to prod
1101  * we are not creating a new mapping,
1102  * so there is no need to check for dma_mapping_error().
1103  */
1104 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1105                                struct sk_buff *skb, u16 cons, u16 prod)
1106 {
1107         struct bnx2x *bp = fp->bp;
1108         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1109         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1110         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1111         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1112
1113         pci_dma_sync_single_for_device(bp->pdev,
1114                                        pci_unmap_addr(cons_rx_buf, mapping),
1115                                        RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1116
1117         prod_rx_buf->skb = cons_rx_buf->skb;
1118         pci_unmap_addr_set(prod_rx_buf, mapping,
1119                            pci_unmap_addr(cons_rx_buf, mapping));
1120         *prod_bd = *cons_bd;
1121 }
1122
1123 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1124                                              u16 idx)
1125 {
1126         u16 last_max = fp->last_max_sge;
1127
1128         if (SUB_S16(idx, last_max) > 0)
1129                 fp->last_max_sge = idx;
1130 }
1131
1132 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1133 {
1134         int i, j;
1135
1136         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1137                 int idx = RX_SGE_CNT * i - 1;
1138
1139                 for (j = 0; j < 2; j++) {
1140                         SGE_MASK_CLEAR_BIT(fp, idx);
1141                         idx--;
1142                 }
1143         }
1144 }
1145
1146 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1147                                   struct eth_fast_path_rx_cqe *fp_cqe)
1148 {
1149         struct bnx2x *bp = fp->bp;
1150         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1151                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1152                       SGE_PAGE_SHIFT;
1153         u16 last_max, last_elem, first_elem;
1154         u16 delta = 0;
1155         u16 i;
1156
1157         if (!sge_len)
1158                 return;
1159
1160         /* First mark all used pages */
1161         for (i = 0; i < sge_len; i++)
1162                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1163
1164         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1165            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1166
1167         /* Here we assume that the last SGE index is the biggest */
1168         prefetch((void *)(fp->sge_mask));
1169         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1170
1171         last_max = RX_SGE(fp->last_max_sge);
1172         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1173         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1174
1175         /* If ring is not full */
1176         if (last_elem + 1 != first_elem)
1177                 last_elem++;
1178
1179         /* Now update the prod */
1180         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1181                 if (likely(fp->sge_mask[i]))
1182                         break;
1183
1184                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1185                 delta += RX_SGE_MASK_ELEM_SZ;
1186         }
1187
1188         if (delta > 0) {
1189                 fp->rx_sge_prod += delta;
1190                 /* clear page-end entries */
1191                 bnx2x_clear_sge_mask_next_elems(fp);
1192         }
1193
1194         DP(NETIF_MSG_RX_STATUS,
1195            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1196            fp->last_max_sge, fp->rx_sge_prod);
1197 }
1198
1199 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1200 {
1201         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1202         memset(fp->sge_mask, 0xff,
1203                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1204
1205         /* Clear the two last indices in the page to 1:
1206            these are the indices that correspond to the "next" element,
1207            hence will never be indicated and should be removed from
1208            the calculations. */
1209         bnx2x_clear_sge_mask_next_elems(fp);
1210 }
1211
1212 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1213                             struct sk_buff *skb, u16 cons, u16 prod)
1214 {
1215         struct bnx2x *bp = fp->bp;
1216         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1217         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1218         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1219         dma_addr_t mapping;
1220
1221         /* move empty skb from pool to prod and map it */
1222         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1223         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1224                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1225         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1226
1227         /* move partial skb from cons to pool (don't unmap yet) */
1228         fp->tpa_pool[queue] = *cons_rx_buf;
1229
1230         /* mark bin state as start - print error if current state != stop */
1231         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1232                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1233
1234         fp->tpa_state[queue] = BNX2X_TPA_START;
1235
1236         /* point prod_bd to new skb */
1237         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1238         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1239
1240 #ifdef BNX2X_STOP_ON_ERROR
1241         fp->tpa_queue_used |= (1 << queue);
1242 #ifdef __powerpc64__
1243         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1244 #else
1245         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1246 #endif
1247            fp->tpa_queue_used);
1248 #endif
1249 }
1250
1251 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1252                                struct sk_buff *skb,
1253                                struct eth_fast_path_rx_cqe *fp_cqe,
1254                                u16 cqe_idx)
1255 {
1256         struct sw_rx_page *rx_pg, old_rx_pg;
1257         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1258         u32 i, frag_len, frag_size, pages;
1259         int err;
1260         int j;
1261
1262         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1263         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1264
1265         /* This is needed in order to enable forwarding support */
1266         if (frag_size)
1267                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1268                                                max(frag_size, (u32)len_on_bd));
1269
1270 #ifdef BNX2X_STOP_ON_ERROR
1271         if (pages >
1272             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1273                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1274                           pages, cqe_idx);
1275                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1276                           fp_cqe->pkt_len, len_on_bd);
1277                 bnx2x_panic();
1278                 return -EINVAL;
1279         }
1280 #endif
1281
1282         /* Run through the SGL and compose the fragmented skb */
1283         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1284                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1285
1286                 /* FW gives the indices of the SGE as if the ring is an array
1287                    (meaning that "next" element will consume 2 indices) */
1288                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1289                 rx_pg = &fp->rx_page_ring[sge_idx];
1290                 old_rx_pg = *rx_pg;
1291
1292                 /* If we fail to allocate a substitute page, we simply stop
1293                    where we are and drop the whole packet */
1294                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1295                 if (unlikely(err)) {
1296                         fp->eth_q_stats.rx_skb_alloc_failed++;
1297                         return err;
1298                 }
1299
1300                 /* Unmap the page as we r going to pass it to the stack */
1301                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1302                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1303
1304                 /* Add one frag and update the appropriate fields in the skb */
1305                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1306
1307                 skb->data_len += frag_len;
1308                 skb->truesize += frag_len;
1309                 skb->len += frag_len;
1310
1311                 frag_size -= frag_len;
1312         }
1313
1314         return 0;
1315 }
1316
1317 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1318                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1319                            u16 cqe_idx)
1320 {
1321         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1322         struct sk_buff *skb = rx_buf->skb;
1323         /* alloc new skb */
1324         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1325
1326         /* Unmap skb in the pool anyway, as we are going to change
1327            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1328            fails. */
1329         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1330                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1331
1332         if (likely(new_skb)) {
1333                 /* fix ip xsum and give it to the stack */
1334                 /* (no need to map the new skb) */
1335 #ifdef BCM_VLAN
1336                 int is_vlan_cqe =
1337                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1338                          PARSING_FLAGS_VLAN);
1339                 int is_not_hwaccel_vlan_cqe =
1340                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1341 #endif
1342
1343                 prefetch(skb);
1344                 prefetch(((char *)(skb)) + 128);
1345
1346 #ifdef BNX2X_STOP_ON_ERROR
1347                 if (pad + len > bp->rx_buf_size) {
1348                         BNX2X_ERR("skb_put is about to fail...  "
1349                                   "pad %d  len %d  rx_buf_size %d\n",
1350                                   pad, len, bp->rx_buf_size);
1351                         bnx2x_panic();
1352                         return;
1353                 }
1354 #endif
1355
1356                 skb_reserve(skb, pad);
1357                 skb_put(skb, len);
1358
1359                 skb->protocol = eth_type_trans(skb, bp->dev);
1360                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1361
1362                 {
1363                         struct iphdr *iph;
1364
1365                         iph = (struct iphdr *)skb->data;
1366 #ifdef BCM_VLAN
1367                         /* If there is no Rx VLAN offloading -
1368                            take VLAN tag into an account */
1369                         if (unlikely(is_not_hwaccel_vlan_cqe))
1370                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1371 #endif
1372                         iph->check = 0;
1373                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1374                 }
1375
1376                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1377                                          &cqe->fast_path_cqe, cqe_idx)) {
1378 #ifdef BCM_VLAN
1379                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1380                             (!is_not_hwaccel_vlan_cqe))
1381                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1382                                                 le16_to_cpu(cqe->fast_path_cqe.
1383                                                             vlan_tag));
1384                         else
1385 #endif
1386                                 netif_receive_skb(skb);
1387                 } else {
1388                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1389                            " - dropping packet!\n");
1390                         dev_kfree_skb(skb);
1391                 }
1392
1393
1394                 /* put new skb in bin */
1395                 fp->tpa_pool[queue].skb = new_skb;
1396
1397         } else {
1398                 /* else drop the packet and keep the buffer in the bin */
1399                 DP(NETIF_MSG_RX_STATUS,
1400                    "Failed to allocate new skb - dropping packet!\n");
1401                 fp->eth_q_stats.rx_skb_alloc_failed++;
1402         }
1403
1404         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1405 }
1406
1407 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1408                                         struct bnx2x_fastpath *fp,
1409                                         u16 bd_prod, u16 rx_comp_prod,
1410                                         u16 rx_sge_prod)
1411 {
1412         struct ustorm_eth_rx_producers rx_prods = {0};
1413         int i;
1414
1415         /* Update producers */
1416         rx_prods.bd_prod = bd_prod;
1417         rx_prods.cqe_prod = rx_comp_prod;
1418         rx_prods.sge_prod = rx_sge_prod;
1419
1420         /*
1421          * Make sure that the BD and SGE data is updated before updating the
1422          * producers since FW might read the BD/SGE right after the producer
1423          * is updated.
1424          * This is only applicable for weak-ordered memory model archs such
1425          * as IA-64. The following barrier is also mandatory since FW will
1426          * assumes BDs must have buffers.
1427          */
1428         wmb();
1429
1430         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1431                 REG_WR(bp, BAR_USTRORM_INTMEM +
1432                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1433                        ((u32 *)&rx_prods)[i]);
1434
1435         mmiowb(); /* keep prod updates ordered */
1436
1437         DP(NETIF_MSG_RX_STATUS,
1438            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1439            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1440 }
1441
1442 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1443 {
1444         struct bnx2x *bp = fp->bp;
1445         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1446         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1447         int rx_pkt = 0;
1448
1449 #ifdef BNX2X_STOP_ON_ERROR
1450         if (unlikely(bp->panic))
1451                 return 0;
1452 #endif
1453
1454         /* CQ "next element" is of the size of the regular element,
1455            that's why it's ok here */
1456         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1457         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1458                 hw_comp_cons++;
1459
1460         bd_cons = fp->rx_bd_cons;
1461         bd_prod = fp->rx_bd_prod;
1462         bd_prod_fw = bd_prod;
1463         sw_comp_cons = fp->rx_comp_cons;
1464         sw_comp_prod = fp->rx_comp_prod;
1465
1466         /* Memory barrier necessary as speculative reads of the rx
1467          * buffer can be ahead of the index in the status block
1468          */
1469         rmb();
1470
1471         DP(NETIF_MSG_RX_STATUS,
1472            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1473            fp->index, hw_comp_cons, sw_comp_cons);
1474
1475         while (sw_comp_cons != hw_comp_cons) {
1476                 struct sw_rx_bd *rx_buf = NULL;
1477                 struct sk_buff *skb;
1478                 union eth_rx_cqe *cqe;
1479                 u8 cqe_fp_flags;
1480                 u16 len, pad;
1481
1482                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1483                 bd_prod = RX_BD(bd_prod);
1484                 bd_cons = RX_BD(bd_cons);
1485
1486                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1487                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1488
1489                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1490                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1491                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1492                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1493                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1494                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1495
1496                 /* is this a slowpath msg? */
1497                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1498                         bnx2x_sp_event(fp, cqe);
1499                         goto next_cqe;
1500
1501                 /* this is an rx packet */
1502                 } else {
1503                         rx_buf = &fp->rx_buf_ring[bd_cons];
1504                         skb = rx_buf->skb;
1505                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1506                         pad = cqe->fast_path_cqe.placement_offset;
1507
1508                         /* If CQE is marked both TPA_START and TPA_END
1509                            it is a non-TPA CQE */
1510                         if ((!fp->disable_tpa) &&
1511                             (TPA_TYPE(cqe_fp_flags) !=
1512                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1513                                 u16 queue = cqe->fast_path_cqe.queue_index;
1514
1515                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1516                                         DP(NETIF_MSG_RX_STATUS,
1517                                            "calling tpa_start on queue %d\n",
1518                                            queue);
1519
1520                                         bnx2x_tpa_start(fp, queue, skb,
1521                                                         bd_cons, bd_prod);
1522                                         goto next_rx;
1523                                 }
1524
1525                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1526                                         DP(NETIF_MSG_RX_STATUS,
1527                                            "calling tpa_stop on queue %d\n",
1528                                            queue);
1529
1530                                         if (!BNX2X_RX_SUM_FIX(cqe))
1531                                                 BNX2X_ERR("STOP on none TCP "
1532                                                           "data\n");
1533
1534                                         /* This is a size of the linear data
1535                                            on this skb */
1536                                         len = le16_to_cpu(cqe->fast_path_cqe.
1537                                                                 len_on_bd);
1538                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1539                                                     len, cqe, comp_ring_cons);
1540 #ifdef BNX2X_STOP_ON_ERROR
1541                                         if (bp->panic)
1542                                                 return -EINVAL;
1543 #endif
1544
1545                                         bnx2x_update_sge_prod(fp,
1546                                                         &cqe->fast_path_cqe);
1547                                         goto next_cqe;
1548                                 }
1549                         }
1550
1551                         pci_dma_sync_single_for_device(bp->pdev,
1552                                         pci_unmap_addr(rx_buf, mapping),
1553                                                        pad + RX_COPY_THRESH,
1554                                                        PCI_DMA_FROMDEVICE);
1555                         prefetch(skb);
1556                         prefetch(((char *)(skb)) + 128);
1557
1558                         /* is this an error packet? */
1559                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1560                                 DP(NETIF_MSG_RX_ERR,
1561                                    "ERROR  flags %x  rx packet %u\n",
1562                                    cqe_fp_flags, sw_comp_cons);
1563                                 fp->eth_q_stats.rx_err_discard_pkt++;
1564                                 goto reuse_rx;
1565                         }
1566
1567                         /* Since we don't have a jumbo ring
1568                          * copy small packets if mtu > 1500
1569                          */
1570                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1571                             (len <= RX_COPY_THRESH)) {
1572                                 struct sk_buff *new_skb;
1573
1574                                 new_skb = netdev_alloc_skb(bp->dev,
1575                                                            len + pad);
1576                                 if (new_skb == NULL) {
1577                                         DP(NETIF_MSG_RX_ERR,
1578                                            "ERROR  packet dropped "
1579                                            "because of alloc failure\n");
1580                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1581                                         goto reuse_rx;
1582                                 }
1583
1584                                 /* aligned copy */
1585                                 skb_copy_from_linear_data_offset(skb, pad,
1586                                                     new_skb->data + pad, len);
1587                                 skb_reserve(new_skb, pad);
1588                                 skb_put(new_skb, len);
1589
1590                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1591
1592                                 skb = new_skb;
1593
1594                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1595                                 pci_unmap_single(bp->pdev,
1596                                         pci_unmap_addr(rx_buf, mapping),
1597                                                  bp->rx_buf_size,
1598                                                  PCI_DMA_FROMDEVICE);
1599                                 skb_reserve(skb, pad);
1600                                 skb_put(skb, len);
1601
1602                         } else {
1603                                 DP(NETIF_MSG_RX_ERR,
1604                                    "ERROR  packet dropped because "
1605                                    "of alloc failure\n");
1606                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1607 reuse_rx:
1608                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1609                                 goto next_rx;
1610                         }
1611
1612                         skb->protocol = eth_type_trans(skb, bp->dev);
1613
1614                         skb->ip_summed = CHECKSUM_NONE;
1615                         if (bp->rx_csum) {
1616                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1617                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1618                                 else
1619                                         fp->eth_q_stats.hw_csum_err++;
1620                         }
1621                 }
1622
1623                 skb_record_rx_queue(skb, fp->index);
1624 #ifdef BCM_VLAN
1625                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1626                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1627                      PARSING_FLAGS_VLAN))
1628                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1629                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1630                 else
1631 #endif
1632                         netif_receive_skb(skb);
1633
1634
1635 next_rx:
1636                 rx_buf->skb = NULL;
1637
1638                 bd_cons = NEXT_RX_IDX(bd_cons);
1639                 bd_prod = NEXT_RX_IDX(bd_prod);
1640                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1641                 rx_pkt++;
1642 next_cqe:
1643                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1644                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1645
1646                 if (rx_pkt == budget)
1647                         break;
1648         } /* while */
1649
1650         fp->rx_bd_cons = bd_cons;
1651         fp->rx_bd_prod = bd_prod_fw;
1652         fp->rx_comp_cons = sw_comp_cons;
1653         fp->rx_comp_prod = sw_comp_prod;
1654
1655         /* Update producers */
1656         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1657                              fp->rx_sge_prod);
1658
1659         fp->rx_pkt += rx_pkt;
1660         fp->rx_calls++;
1661
1662         return rx_pkt;
1663 }
1664
1665 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1666 {
1667         struct bnx2x_fastpath *fp = fp_cookie;
1668         struct bnx2x *bp = fp->bp;
1669         int index = fp->index;
1670
1671         /* Return here if interrupt is disabled */
1672         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1673                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1674                 return IRQ_HANDLED;
1675         }
1676
1677         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1678            index, fp->sb_id);
1679         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1680
1681 #ifdef BNX2X_STOP_ON_ERROR
1682         if (unlikely(bp->panic))
1683                 return IRQ_HANDLED;
1684 #endif
1685
1686         prefetch(fp->rx_cons_sb);
1687         prefetch(fp->tx_cons_sb);
1688         prefetch(&fp->status_blk->c_status_block.status_block_index);
1689         prefetch(&fp->status_blk->u_status_block.status_block_index);
1690
1691         napi_schedule(&bnx2x_fp(bp, index, napi));
1692
1693         return IRQ_HANDLED;
1694 }
1695
1696 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1697 {
1698         struct bnx2x *bp = netdev_priv(dev_instance);
1699         u16 status = bnx2x_ack_int(bp);
1700         u16 mask;
1701
1702         /* Return here if interrupt is shared and it's not for us */
1703         if (unlikely(status == 0)) {
1704                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1705                 return IRQ_NONE;
1706         }
1707         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1708
1709         /* Return here if interrupt is disabled */
1710         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1711                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1712                 return IRQ_HANDLED;
1713         }
1714
1715 #ifdef BNX2X_STOP_ON_ERROR
1716         if (unlikely(bp->panic))
1717                 return IRQ_HANDLED;
1718 #endif
1719
1720         mask = 0x2 << bp->fp[0].sb_id;
1721         if (status & mask) {
1722                 struct bnx2x_fastpath *fp = &bp->fp[0];
1723
1724                 prefetch(fp->rx_cons_sb);
1725                 prefetch(fp->tx_cons_sb);
1726                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1727                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1728
1729                 napi_schedule(&bnx2x_fp(bp, 0, napi));
1730
1731                 status &= ~mask;
1732         }
1733
1734
1735         if (unlikely(status & 0x1)) {
1736                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1737
1738                 status &= ~0x1;
1739                 if (!status)
1740                         return IRQ_HANDLED;
1741         }
1742
1743         if (status)
1744                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1745                    status);
1746
1747         return IRQ_HANDLED;
1748 }
1749
1750 /* end of fast path */
1751
1752 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1753
1754 /* Link */
1755
1756 /*
1757  * General service functions
1758  */
1759
1760 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1761 {
1762         u32 lock_status;
1763         u32 resource_bit = (1 << resource);
1764         int func = BP_FUNC(bp);
1765         u32 hw_lock_control_reg;
1766         int cnt;
1767
1768         /* Validating that the resource is within range */
1769         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1770                 DP(NETIF_MSG_HW,
1771                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1772                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1773                 return -EINVAL;
1774         }
1775
1776         if (func <= 5) {
1777                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1778         } else {
1779                 hw_lock_control_reg =
1780                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1781         }
1782
1783         /* Validating that the resource is not already taken */
1784         lock_status = REG_RD(bp, hw_lock_control_reg);
1785         if (lock_status & resource_bit) {
1786                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1787                    lock_status, resource_bit);
1788                 return -EEXIST;
1789         }
1790
1791         /* Try for 5 second every 5ms */
1792         for (cnt = 0; cnt < 1000; cnt++) {
1793                 /* Try to acquire the lock */
1794                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1795                 lock_status = REG_RD(bp, hw_lock_control_reg);
1796                 if (lock_status & resource_bit)
1797                         return 0;
1798
1799                 msleep(5);
1800         }
1801         DP(NETIF_MSG_HW, "Timeout\n");
1802         return -EAGAIN;
1803 }
1804
1805 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1806 {
1807         u32 lock_status;
1808         u32 resource_bit = (1 << resource);
1809         int func = BP_FUNC(bp);
1810         u32 hw_lock_control_reg;
1811
1812         /* Validating that the resource is within range */
1813         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1814                 DP(NETIF_MSG_HW,
1815                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1816                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1817                 return -EINVAL;
1818         }
1819
1820         if (func <= 5) {
1821                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1822         } else {
1823                 hw_lock_control_reg =
1824                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1825         }
1826
1827         /* Validating that the resource is currently taken */
1828         lock_status = REG_RD(bp, hw_lock_control_reg);
1829         if (!(lock_status & resource_bit)) {
1830                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1831                    lock_status, resource_bit);
1832                 return -EFAULT;
1833         }
1834
1835         REG_WR(bp, hw_lock_control_reg, resource_bit);
1836         return 0;
1837 }
1838
1839 /* HW Lock for shared dual port PHYs */
1840 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1841 {
1842         mutex_lock(&bp->port.phy_mutex);
1843
1844         if (bp->port.need_hw_lock)
1845                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1846 }
1847
1848 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1849 {
1850         if (bp->port.need_hw_lock)
1851                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1852
1853         mutex_unlock(&bp->port.phy_mutex);
1854 }
1855
1856 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1857 {
1858         /* The GPIO should be swapped if swap register is set and active */
1859         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1860                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1861         int gpio_shift = gpio_num +
1862                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1863         u32 gpio_mask = (1 << gpio_shift);
1864         u32 gpio_reg;
1865         int value;
1866
1867         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1868                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1869                 return -EINVAL;
1870         }
1871
1872         /* read GPIO value */
1873         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1874
1875         /* get the requested pin value */
1876         if ((gpio_reg & gpio_mask) == gpio_mask)
1877                 value = 1;
1878         else
1879                 value = 0;
1880
1881         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1882
1883         return value;
1884 }
1885
1886 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1887 {
1888         /* The GPIO should be swapped if swap register is set and active */
1889         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1890                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1891         int gpio_shift = gpio_num +
1892                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1893         u32 gpio_mask = (1 << gpio_shift);
1894         u32 gpio_reg;
1895
1896         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1897                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1898                 return -EINVAL;
1899         }
1900
1901         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1902         /* read GPIO and mask except the float bits */
1903         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1904
1905         switch (mode) {
1906         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1907                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1908                    gpio_num, gpio_shift);
1909                 /* clear FLOAT and set CLR */
1910                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1911                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1912                 break;
1913
1914         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1915                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1916                    gpio_num, gpio_shift);
1917                 /* clear FLOAT and set SET */
1918                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1919                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1920                 break;
1921
1922         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1923                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1924                    gpio_num, gpio_shift);
1925                 /* set FLOAT */
1926                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1927                 break;
1928
1929         default:
1930                 break;
1931         }
1932
1933         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1934         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1935
1936         return 0;
1937 }
1938
1939 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1940 {
1941         /* The GPIO should be swapped if swap register is set and active */
1942         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1943                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1944         int gpio_shift = gpio_num +
1945                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1946         u32 gpio_mask = (1 << gpio_shift);
1947         u32 gpio_reg;
1948
1949         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1950                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1951                 return -EINVAL;
1952         }
1953
1954         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1955         /* read GPIO int */
1956         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1957
1958         switch (mode) {
1959         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1960                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1961                                    "output low\n", gpio_num, gpio_shift);
1962                 /* clear SET and set CLR */
1963                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1964                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1965                 break;
1966
1967         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1968                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1969                                    "output high\n", gpio_num, gpio_shift);
1970                 /* clear CLR and set SET */
1971                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1972                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1973                 break;
1974
1975         default:
1976                 break;
1977         }
1978
1979         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1980         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1981
1982         return 0;
1983 }
1984
1985 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1986 {
1987         u32 spio_mask = (1 << spio_num);
1988         u32 spio_reg;
1989
1990         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1991             (spio_num > MISC_REGISTERS_SPIO_7)) {
1992                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1993                 return -EINVAL;
1994         }
1995
1996         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1997         /* read SPIO and mask except the float bits */
1998         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1999
2000         switch (mode) {
2001         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2002                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2003                 /* clear FLOAT and set CLR */
2004                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2005                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2006                 break;
2007
2008         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2009                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2010                 /* clear FLOAT and set SET */
2011                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2012                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2013                 break;
2014
2015         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2016                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2017                 /* set FLOAT */
2018                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2019                 break;
2020
2021         default:
2022                 break;
2023         }
2024
2025         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2026         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2027
2028         return 0;
2029 }
2030
2031 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2032 {
2033         switch (bp->link_vars.ieee_fc &
2034                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2035         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2036                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2037                                           ADVERTISED_Pause);
2038                 break;
2039
2040         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2041                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2042                                          ADVERTISED_Pause);
2043                 break;
2044
2045         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2046                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2047                 break;
2048
2049         default:
2050                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2051                                           ADVERTISED_Pause);
2052                 break;
2053         }
2054 }
2055
2056 static void bnx2x_link_report(struct bnx2x *bp)
2057 {
2058         if (bp->link_vars.link_up) {
2059                 if (bp->state == BNX2X_STATE_OPEN)
2060                         netif_carrier_on(bp->dev);
2061                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2062
2063                 printk("%d Mbps ", bp->link_vars.line_speed);
2064
2065                 if (bp->link_vars.duplex == DUPLEX_FULL)
2066                         printk("full duplex");
2067                 else
2068                         printk("half duplex");
2069
2070                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2071                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2072                                 printk(", receive ");
2073                                 if (bp->link_vars.flow_ctrl &
2074                                     BNX2X_FLOW_CTRL_TX)
2075                                         printk("& transmit ");
2076                         } else {
2077                                 printk(", transmit ");
2078                         }
2079                         printk("flow control ON");
2080                 }
2081                 printk("\n");
2082
2083         } else { /* link_down */
2084                 netif_carrier_off(bp->dev);
2085                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2086         }
2087 }
2088
2089 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2090 {
2091         if (!BP_NOMCP(bp)) {
2092                 u8 rc;
2093
2094                 /* Initialize link parameters structure variables */
2095                 /* It is recommended to turn off RX FC for jumbo frames
2096                    for better performance */
2097                 if (IS_E1HMF(bp))
2098                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2099                 else if (bp->dev->mtu > 5000)
2100                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2101                 else
2102                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2103
2104                 bnx2x_acquire_phy_lock(bp);
2105
2106                 if (load_mode == LOAD_DIAG)
2107                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2108
2109                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2110
2111                 bnx2x_release_phy_lock(bp);
2112
2113                 bnx2x_calc_fc_adv(bp);
2114
2115                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2116                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2117                         bnx2x_link_report(bp);
2118                 }
2119
2120                 return rc;
2121         }
2122         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2123         return -EINVAL;
2124 }
2125
2126 static void bnx2x_link_set(struct bnx2x *bp)
2127 {
2128         if (!BP_NOMCP(bp)) {
2129                 bnx2x_acquire_phy_lock(bp);
2130                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2131                 bnx2x_release_phy_lock(bp);
2132
2133                 bnx2x_calc_fc_adv(bp);
2134         } else
2135                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2136 }
2137
2138 static void bnx2x__link_reset(struct bnx2x *bp)
2139 {
2140         if (!BP_NOMCP(bp)) {
2141                 bnx2x_acquire_phy_lock(bp);
2142                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2143                 bnx2x_release_phy_lock(bp);
2144         } else
2145                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2146 }
2147
2148 static u8 bnx2x_link_test(struct bnx2x *bp)
2149 {
2150         u8 rc;
2151
2152         bnx2x_acquire_phy_lock(bp);
2153         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2154         bnx2x_release_phy_lock(bp);
2155
2156         return rc;
2157 }
2158
2159 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2160 {
2161         u32 r_param = bp->link_vars.line_speed / 8;
2162         u32 fair_periodic_timeout_usec;
2163         u32 t_fair;
2164
2165         memset(&(bp->cmng.rs_vars), 0,
2166                sizeof(struct rate_shaping_vars_per_port));
2167         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2168
2169         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2170         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2171
2172         /* this is the threshold below which no timer arming will occur
2173            1.25 coefficient is for the threshold to be a little bigger
2174            than the real time, to compensate for timer in-accuracy */
2175         bp->cmng.rs_vars.rs_threshold =
2176                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2177
2178         /* resolution of fairness timer */
2179         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2180         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2181         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2182
2183         /* this is the threshold below which we won't arm the timer anymore */
2184         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2185
2186         /* we multiply by 1e3/8 to get bytes/msec.
2187            We don't want the credits to pass a credit
2188            of the t_fair*FAIR_MEM (algorithm resolution) */
2189         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2190         /* since each tick is 4 usec */
2191         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2192 }
2193
2194 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2195 {
2196         struct rate_shaping_vars_per_vn m_rs_vn;
2197         struct fairness_vars_per_vn m_fair_vn;
2198         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2199         u16 vn_min_rate, vn_max_rate;
2200         int i;
2201
2202         /* If function is hidden - set min and max to zeroes */
2203         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2204                 vn_min_rate = 0;
2205                 vn_max_rate = 0;
2206
2207         } else {
2208                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2209                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2210                 /* If fairness is enabled (not all min rates are zeroes) and
2211                    if current min rate is zero - set it to 1.
2212                    This is a requirement of the algorithm. */
2213                 if (bp->vn_weight_sum && (vn_min_rate == 0))
2214                         vn_min_rate = DEF_MIN_RATE;
2215                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2216                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2217         }
2218
2219         DP(NETIF_MSG_IFUP,
2220            "func %d: vn_min_rate=%d  vn_max_rate=%d  vn_weight_sum=%d\n",
2221            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2222
2223         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2224         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2225
2226         /* global vn counter - maximal Mbps for this vn */
2227         m_rs_vn.vn_counter.rate = vn_max_rate;
2228
2229         /* quota - number of bytes transmitted in this period */
2230         m_rs_vn.vn_counter.quota =
2231                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2232
2233         if (bp->vn_weight_sum) {
2234                 /* credit for each period of the fairness algorithm:
2235                    number of bytes in T_FAIR (the vn share the port rate).
2236                    vn_weight_sum should not be larger than 10000, thus
2237                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2238                    than zero */
2239                 m_fair_vn.vn_credit_delta =
2240                         max((u32)(vn_min_rate * (T_FAIR_COEF /
2241                                                  (8 * bp->vn_weight_sum))),
2242                             (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2243                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2244                    m_fair_vn.vn_credit_delta);
2245         }
2246
2247         /* Store it to internal memory */
2248         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2249                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2250                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2251                        ((u32 *)(&m_rs_vn))[i]);
2252
2253         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2254                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2255                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2256                        ((u32 *)(&m_fair_vn))[i]);
2257 }
2258
2259
2260 /* This function is called upon link interrupt */
2261 static void bnx2x_link_attn(struct bnx2x *bp)
2262 {
2263         /* Make sure that we are synced with the current statistics */
2264         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2265
2266         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2267
2268         if (bp->link_vars.link_up) {
2269
2270                 /* dropless flow control */
2271                 if (CHIP_IS_E1H(bp)) {
2272                         int port = BP_PORT(bp);
2273                         u32 pause_enabled = 0;
2274
2275                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2276                                 pause_enabled = 1;
2277
2278                         REG_WR(bp, BAR_USTRORM_INTMEM +
2279                                USTORM_PAUSE_ENABLED_OFFSET(port),
2280                                pause_enabled);
2281                 }
2282
2283                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2284                         struct host_port_stats *pstats;
2285
2286                         pstats = bnx2x_sp(bp, port_stats);
2287                         /* reset old bmac stats */
2288                         memset(&(pstats->mac_stx[0]), 0,
2289                                sizeof(struct mac_stx));
2290                 }
2291                 if ((bp->state == BNX2X_STATE_OPEN) ||
2292                     (bp->state == BNX2X_STATE_DISABLED))
2293                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2294         }
2295
2296         /* indicate link status */
2297         bnx2x_link_report(bp);
2298
2299         if (IS_E1HMF(bp)) {
2300                 int port = BP_PORT(bp);
2301                 int func;
2302                 int vn;
2303
2304                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2305                         if (vn == BP_E1HVN(bp))
2306                                 continue;
2307
2308                         func = ((vn << 1) | port);
2309
2310                         /* Set the attention towards other drivers
2311                            on the same port */
2312                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2313                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2314                 }
2315
2316                 if (bp->link_vars.link_up) {
2317                         int i;
2318
2319                         /* Init rate shaping and fairness contexts */
2320                         bnx2x_init_port_minmax(bp);
2321
2322                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2323                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2324
2325                         /* Store it to internal memory */
2326                         for (i = 0;
2327                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2328                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2329                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2330                                        ((u32 *)(&bp->cmng))[i]);
2331                 }
2332         }
2333 }
2334
2335 static void bnx2x__link_status_update(struct bnx2x *bp)
2336 {
2337         if (bp->state != BNX2X_STATE_OPEN)
2338                 return;
2339
2340         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2341
2342         if (bp->link_vars.link_up)
2343                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2344         else
2345                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2346
2347         /* indicate link status */
2348         bnx2x_link_report(bp);
2349 }
2350
2351 static void bnx2x_pmf_update(struct bnx2x *bp)
2352 {
2353         int port = BP_PORT(bp);
2354         u32 val;
2355
2356         bp->port.pmf = 1;
2357         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2358
2359         /* enable nig attention */
2360         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2361         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2362         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2363
2364         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2365 }
2366
2367 /* end of Link */
2368
2369 /* slow path */
2370
2371 /*
2372  * General service functions
2373  */
2374
2375 /* the slow path queue is odd since completions arrive on the fastpath ring */
2376 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2377                          u32 data_hi, u32 data_lo, int common)
2378 {
2379         int func = BP_FUNC(bp);
2380
2381         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2382            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2383            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2384            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2385            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2386
2387 #ifdef BNX2X_STOP_ON_ERROR
2388         if (unlikely(bp->panic))
2389                 return -EIO;
2390 #endif
2391
2392         spin_lock_bh(&bp->spq_lock);
2393
2394         if (!bp->spq_left) {
2395                 BNX2X_ERR("BUG! SPQ ring full!\n");
2396                 spin_unlock_bh(&bp->spq_lock);
2397                 bnx2x_panic();
2398                 return -EBUSY;
2399         }
2400
2401         /* CID needs port number to be encoded int it */
2402         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2403                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2404                                      HW_CID(bp, cid)));
2405         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2406         if (common)
2407                 bp->spq_prod_bd->hdr.type |=
2408                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2409
2410         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2411         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2412
2413         bp->spq_left--;
2414
2415         if (bp->spq_prod_bd == bp->spq_last_bd) {
2416                 bp->spq_prod_bd = bp->spq;
2417                 bp->spq_prod_idx = 0;
2418                 DP(NETIF_MSG_TIMER, "end of spq\n");
2419
2420         } else {
2421                 bp->spq_prod_bd++;
2422                 bp->spq_prod_idx++;
2423         }
2424
2425         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2426                bp->spq_prod_idx);
2427
2428         spin_unlock_bh(&bp->spq_lock);
2429         return 0;
2430 }
2431
2432 /* acquire split MCP access lock register */
2433 static int bnx2x_acquire_alr(struct bnx2x *bp)
2434 {
2435         u32 i, j, val;
2436         int rc = 0;
2437
2438         might_sleep();
2439         i = 100;
2440         for (j = 0; j < i*10; j++) {
2441                 val = (1UL << 31);
2442                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2443                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2444                 if (val & (1L << 31))
2445                         break;
2446
2447                 msleep(5);
2448         }
2449         if (!(val & (1L << 31))) {
2450                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2451                 rc = -EBUSY;
2452         }
2453
2454         return rc;
2455 }
2456
2457 /* release split MCP access lock register */
2458 static void bnx2x_release_alr(struct bnx2x *bp)
2459 {
2460         u32 val = 0;
2461
2462         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2463 }
2464
2465 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2466 {
2467         struct host_def_status_block *def_sb = bp->def_status_blk;
2468         u16 rc = 0;
2469
2470         barrier(); /* status block is written to by the chip */
2471         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2472                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2473                 rc |= 1;
2474         }
2475         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2476                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2477                 rc |= 2;
2478         }
2479         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2480                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2481                 rc |= 4;
2482         }
2483         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2484                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2485                 rc |= 8;
2486         }
2487         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2488                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2489                 rc |= 16;
2490         }
2491         return rc;
2492 }
2493
2494 /*
2495  * slow path service functions
2496  */
2497
2498 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2499 {
2500         int port = BP_PORT(bp);
2501         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2502                        COMMAND_REG_ATTN_BITS_SET);
2503         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2504                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2505         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2506                                        NIG_REG_MASK_INTERRUPT_PORT0;
2507         u32 aeu_mask;
2508         u32 nig_mask = 0;
2509
2510         if (bp->attn_state & asserted)
2511                 BNX2X_ERR("IGU ERROR\n");
2512
2513         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2514         aeu_mask = REG_RD(bp, aeu_addr);
2515
2516         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2517            aeu_mask, asserted);
2518         aeu_mask &= ~(asserted & 0xff);
2519         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2520
2521         REG_WR(bp, aeu_addr, aeu_mask);
2522         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2523
2524         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2525         bp->attn_state |= asserted;
2526         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2527
2528         if (asserted & ATTN_HARD_WIRED_MASK) {
2529                 if (asserted & ATTN_NIG_FOR_FUNC) {
2530
2531                         bnx2x_acquire_phy_lock(bp);
2532
2533                         /* save nig interrupt mask */
2534                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2535                         REG_WR(bp, nig_int_mask_addr, 0);
2536
2537                         bnx2x_link_attn(bp);
2538
2539                         /* handle unicore attn? */
2540                 }
2541                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2542                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2543
2544                 if (asserted & GPIO_2_FUNC)
2545                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2546
2547                 if (asserted & GPIO_3_FUNC)
2548                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2549
2550                 if (asserted & GPIO_4_FUNC)
2551                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2552
2553                 if (port == 0) {
2554                         if (asserted & ATTN_GENERAL_ATTN_1) {
2555                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2556                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2557                         }
2558                         if (asserted & ATTN_GENERAL_ATTN_2) {
2559                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2560                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2561                         }
2562                         if (asserted & ATTN_GENERAL_ATTN_3) {
2563                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2564                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2565                         }
2566                 } else {
2567                         if (asserted & ATTN_GENERAL_ATTN_4) {
2568                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2569                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2570                         }
2571                         if (asserted & ATTN_GENERAL_ATTN_5) {
2572                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2573                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2574                         }
2575                         if (asserted & ATTN_GENERAL_ATTN_6) {
2576                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2577                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2578                         }
2579                 }
2580
2581         } /* if hardwired */
2582
2583         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2584            asserted, hc_addr);
2585         REG_WR(bp, hc_addr, asserted);
2586
2587         /* now set back the mask */
2588         if (asserted & ATTN_NIG_FOR_FUNC) {
2589                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2590                 bnx2x_release_phy_lock(bp);
2591         }
2592 }
2593
2594 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2595 {
2596         int port = BP_PORT(bp);
2597         int reg_offset;
2598         u32 val;
2599
2600         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2601                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2602
2603         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2604
2605                 val = REG_RD(bp, reg_offset);
2606                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2607                 REG_WR(bp, reg_offset, val);
2608
2609                 BNX2X_ERR("SPIO5 hw attention\n");
2610
2611                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2612                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2613                         /* Fan failure attention */
2614
2615                         /* The PHY reset is controlled by GPIO 1 */
2616                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2617                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2618                         /* Low power mode is controlled by GPIO 2 */
2619                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2620                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2621                         /* mark the failure */
2622                         bp->link_params.ext_phy_config &=
2623                                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2624                         bp->link_params.ext_phy_config |=
2625                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2626                         SHMEM_WR(bp,
2627                                  dev_info.port_hw_config[port].
2628                                                         external_phy_config,
2629                                  bp->link_params.ext_phy_config);
2630                         /* log the failure */
2631                         printk(KERN_ERR PFX "Fan Failure on Network"
2632                                " Controller %s has caused the driver to"
2633                                " shutdown the card to prevent permanent"
2634                                " damage.  Please contact Dell Support for"
2635                                " assistance\n", bp->dev->name);
2636                         break;
2637
2638                 default:
2639                         break;
2640                 }
2641         }
2642
2643         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2644                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2645                 bnx2x_acquire_phy_lock(bp);
2646                 bnx2x_handle_module_detect_int(&bp->link_params);
2647                 bnx2x_release_phy_lock(bp);
2648         }
2649
2650         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2651
2652                 val = REG_RD(bp, reg_offset);
2653                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2654                 REG_WR(bp, reg_offset, val);
2655
2656                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2657                           (attn & HW_INTERRUT_ASSERT_SET_0));
2658                 bnx2x_panic();
2659         }
2660 }
2661
2662 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2663 {
2664         u32 val;
2665
2666         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2667
2668                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2669                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2670                 /* DORQ discard attention */
2671                 if (val & 0x2)
2672                         BNX2X_ERR("FATAL error from DORQ\n");
2673         }
2674
2675         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2676
2677                 int port = BP_PORT(bp);
2678                 int reg_offset;
2679
2680                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2681                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2682
2683                 val = REG_RD(bp, reg_offset);
2684                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2685                 REG_WR(bp, reg_offset, val);
2686
2687                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2688                           (attn & HW_INTERRUT_ASSERT_SET_1));
2689                 bnx2x_panic();
2690         }
2691 }
2692
2693 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2694 {
2695         u32 val;
2696
2697         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2698
2699                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2700                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2701                 /* CFC error attention */
2702                 if (val & 0x2)
2703                         BNX2X_ERR("FATAL error from CFC\n");
2704         }
2705
2706         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2707
2708                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2709                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2710                 /* RQ_USDMDP_FIFO_OVERFLOW */
2711                 if (val & 0x18000)
2712                         BNX2X_ERR("FATAL error from PXP\n");
2713         }
2714
2715         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2716
2717                 int port = BP_PORT(bp);
2718                 int reg_offset;
2719
2720                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2721                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2722
2723                 val = REG_RD(bp, reg_offset);
2724                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2725                 REG_WR(bp, reg_offset, val);
2726
2727                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2728                           (attn & HW_INTERRUT_ASSERT_SET_2));
2729                 bnx2x_panic();
2730         }
2731 }
2732
2733 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2734 {
2735         u32 val;
2736
2737         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2738
2739                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2740                         int func = BP_FUNC(bp);
2741
2742                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2743                         bnx2x__link_status_update(bp);
2744                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2745                                                         DRV_STATUS_PMF)
2746                                 bnx2x_pmf_update(bp);
2747
2748                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2749
2750                         BNX2X_ERR("MC assert!\n");
2751                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2752                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2753                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2754                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2755                         bnx2x_panic();
2756
2757                 } else if (attn & BNX2X_MCP_ASSERT) {
2758
2759                         BNX2X_ERR("MCP assert!\n");
2760                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2761                         bnx2x_fw_dump(bp);
2762
2763                 } else
2764                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2765         }
2766
2767         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2768                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2769                 if (attn & BNX2X_GRC_TIMEOUT) {
2770                         val = CHIP_IS_E1H(bp) ?
2771                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2772                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2773                 }
2774                 if (attn & BNX2X_GRC_RSV) {
2775                         val = CHIP_IS_E1H(bp) ?
2776                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2777                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2778                 }
2779                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2780         }
2781 }
2782
2783 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2784 {
2785         struct attn_route attn;
2786         struct attn_route group_mask;
2787         int port = BP_PORT(bp);
2788         int index;
2789         u32 reg_addr;
2790         u32 val;
2791         u32 aeu_mask;
2792
2793         /* need to take HW lock because MCP or other port might also
2794            try to handle this event */
2795         bnx2x_acquire_alr(bp);
2796
2797         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2798         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2799         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2800         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2801         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2802            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2803
2804         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2805                 if (deasserted & (1 << index)) {
2806                         group_mask = bp->attn_group[index];
2807
2808                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2809                            index, group_mask.sig[0], group_mask.sig[1],
2810                            group_mask.sig[2], group_mask.sig[3]);
2811
2812                         bnx2x_attn_int_deasserted3(bp,
2813                                         attn.sig[3] & group_mask.sig[3]);
2814                         bnx2x_attn_int_deasserted1(bp,
2815                                         attn.sig[1] & group_mask.sig[1]);
2816                         bnx2x_attn_int_deasserted2(bp,
2817                                         attn.sig[2] & group_mask.sig[2]);
2818                         bnx2x_attn_int_deasserted0(bp,
2819                                         attn.sig[0] & group_mask.sig[0]);
2820
2821                         if ((attn.sig[0] & group_mask.sig[0] &
2822                                                 HW_PRTY_ASSERT_SET_0) ||
2823                             (attn.sig[1] & group_mask.sig[1] &
2824                                                 HW_PRTY_ASSERT_SET_1) ||
2825                             (attn.sig[2] & group_mask.sig[2] &
2826                                                 HW_PRTY_ASSERT_SET_2))
2827                                 BNX2X_ERR("FATAL HW block parity attention\n");
2828                 }
2829         }
2830
2831         bnx2x_release_alr(bp);
2832
2833         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2834
2835         val = ~deasserted;
2836         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2837            val, reg_addr);
2838         REG_WR(bp, reg_addr, val);
2839
2840         if (~bp->attn_state & deasserted)
2841                 BNX2X_ERR("IGU ERROR\n");
2842
2843         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2844                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2845
2846         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2847         aeu_mask = REG_RD(bp, reg_addr);
2848
2849         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2850            aeu_mask, deasserted);
2851         aeu_mask |= (deasserted & 0xff);
2852         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2853
2854         REG_WR(bp, reg_addr, aeu_mask);
2855         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2856
2857         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2858         bp->attn_state &= ~deasserted;
2859         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2860 }
2861
2862 static void bnx2x_attn_int(struct bnx2x *bp)
2863 {
2864         /* read local copy of bits */
2865         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2866                                                                 attn_bits);
2867         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2868                                                                 attn_bits_ack);
2869         u32 attn_state = bp->attn_state;
2870
2871         /* look for changed bits */
2872         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2873         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2874
2875         DP(NETIF_MSG_HW,
2876            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2877            attn_bits, attn_ack, asserted, deasserted);
2878
2879         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2880                 BNX2X_ERR("BAD attention state\n");
2881
2882         /* handle bits that were raised */
2883         if (asserted)
2884                 bnx2x_attn_int_asserted(bp, asserted);
2885
2886         if (deasserted)
2887                 bnx2x_attn_int_deasserted(bp, deasserted);
2888 }
2889
2890 static void bnx2x_sp_task(struct work_struct *work)
2891 {
2892         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2893         u16 status;
2894
2895
2896         /* Return here if interrupt is disabled */
2897         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2898                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2899                 return;
2900         }
2901
2902         status = bnx2x_update_dsb_idx(bp);
2903 /*      if (status == 0)                                     */
2904 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2905
2906         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2907
2908         /* HW attentions */
2909         if (status & 0x1)
2910                 bnx2x_attn_int(bp);
2911
2912         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2913                      IGU_INT_NOP, 1);
2914         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2915                      IGU_INT_NOP, 1);
2916         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2917                      IGU_INT_NOP, 1);
2918         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2919                      IGU_INT_NOP, 1);
2920         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2921                      IGU_INT_ENABLE, 1);
2922
2923 }
2924
2925 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2926 {
2927         struct net_device *dev = dev_instance;
2928         struct bnx2x *bp = netdev_priv(dev);
2929
2930         /* Return here if interrupt is disabled */
2931         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2932                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2933                 return IRQ_HANDLED;
2934         }
2935
2936         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2937
2938 #ifdef BNX2X_STOP_ON_ERROR
2939         if (unlikely(bp->panic))
2940                 return IRQ_HANDLED;
2941 #endif
2942
2943         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2944
2945         return IRQ_HANDLED;
2946 }
2947
2948 /* end of slow path */
2949
2950 /* Statistics */
2951
2952 /****************************************************************************
2953 * Macros
2954 ****************************************************************************/
2955
2956 /* sum[hi:lo] += add[hi:lo] */
2957 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2958         do { \
2959                 s_lo += a_lo; \
2960                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2961         } while (0)
2962
2963 /* difference = minuend - subtrahend */
2964 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2965         do { \
2966                 if (m_lo < s_lo) { \
2967                         /* underflow */ \
2968                         d_hi = m_hi - s_hi; \
2969                         if (d_hi > 0) { \
2970                                 /* we can 'loan' 1 */ \
2971                                 d_hi--; \
2972                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2973                         } else { \
2974                                 /* m_hi <= s_hi */ \
2975                                 d_hi = 0; \
2976                                 d_lo = 0; \
2977                         } \
2978                 } else { \
2979                         /* m_lo >= s_lo */ \
2980                         if (m_hi < s_hi) { \
2981                                 d_hi = 0; \
2982                                 d_lo = 0; \
2983                         } else { \
2984                                 /* m_hi >= s_hi */ \
2985                                 d_hi = m_hi - s_hi; \
2986                                 d_lo = m_lo - s_lo; \
2987                         } \
2988                 } \
2989         } while (0)
2990
2991 #define UPDATE_STAT64(s, t) \
2992         do { \
2993                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2994                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2995                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2996                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2997                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2998                        pstats->mac_stx[1].t##_lo, diff.lo); \
2999         } while (0)
3000
3001 #define UPDATE_STAT64_NIG(s, t) \
3002         do { \
3003                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3004                         diff.lo, new->s##_lo, old->s##_lo); \
3005                 ADD_64(estats->t##_hi, diff.hi, \
3006                        estats->t##_lo, diff.lo); \
3007         } while (0)
3008
3009 /* sum[hi:lo] += add */
3010 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3011         do { \
3012                 s_lo += a; \
3013                 s_hi += (s_lo < a) ? 1 : 0; \
3014         } while (0)
3015
3016 #define UPDATE_EXTEND_STAT(s) \
3017         do { \
3018                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3019                               pstats->mac_stx[1].s##_lo, \
3020                               new->s); \
3021         } while (0)
3022
3023 #define UPDATE_EXTEND_TSTAT(s, t) \
3024         do { \
3025                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3026                 old_tclient->s = tclient->s; \
3027                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3028         } while (0)
3029
3030 #define UPDATE_EXTEND_USTAT(s, t) \
3031         do { \
3032                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3033                 old_uclient->s = uclient->s; \
3034                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3035         } while (0)
3036
3037 #define UPDATE_EXTEND_XSTAT(s, t) \
3038         do { \
3039                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3040                 old_xclient->s = xclient->s; \
3041                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3042         } while (0)
3043
3044 /* minuend -= subtrahend */
3045 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3046         do { \
3047                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3048         } while (0)
3049
3050 /* minuend[hi:lo] -= subtrahend */
3051 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3052         do { \
3053                 SUB_64(m_hi, 0, m_lo, s); \
3054         } while (0)
3055
3056 #define SUB_EXTEND_USTAT(s, t) \
3057         do { \
3058                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3059                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3060         } while (0)
3061
3062 /*
3063  * General service functions
3064  */
3065
3066 static inline long bnx2x_hilo(u32 *hiref)
3067 {
3068         u32 lo = *(hiref + 1);
3069 #if (BITS_PER_LONG == 64)
3070         u32 hi = *hiref;
3071
3072         return HILO_U64(hi, lo);
3073 #else
3074         return lo;
3075 #endif
3076 }
3077
3078 /*
3079  * Init service functions
3080  */
3081
3082 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3083 {
3084         if (!bp->stats_pending) {
3085                 struct eth_query_ramrod_data ramrod_data = {0};
3086                 int i, rc;
3087
3088                 ramrod_data.drv_counter = bp->stats_counter++;
3089                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3090                 for_each_queue(bp, i)
3091                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3092
3093                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3094                                    ((u32 *)&ramrod_data)[1],
3095                                    ((u32 *)&ramrod_data)[0], 0);
3096                 if (rc == 0) {
3097                         /* stats ramrod has it's own slot on the spq */
3098                         bp->spq_left++;
3099                         bp->stats_pending = 1;
3100                 }
3101         }
3102 }
3103
3104 static void bnx2x_stats_init(struct bnx2x *bp)
3105 {
3106         int port = BP_PORT(bp);
3107         int i;
3108
3109         bp->stats_pending = 0;
3110         bp->executer_idx = 0;
3111         bp->stats_counter = 0;
3112
3113         /* port stats */
3114         if (!BP_NOMCP(bp))
3115                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3116         else
3117                 bp->port.port_stx = 0;
3118         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3119
3120         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3121         bp->port.old_nig_stats.brb_discard =
3122                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3123         bp->port.old_nig_stats.brb_truncate =
3124                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3125         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3126                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3127         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3128                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3129
3130         /* function stats */
3131         for_each_queue(bp, i) {
3132                 struct bnx2x_fastpath *fp = &bp->fp[i];
3133
3134                 memset(&fp->old_tclient, 0,
3135                        sizeof(struct tstorm_per_client_stats));
3136                 memset(&fp->old_uclient, 0,
3137                        sizeof(struct ustorm_per_client_stats));
3138                 memset(&fp->old_xclient, 0,
3139                        sizeof(struct xstorm_per_client_stats));
3140                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3141         }
3142
3143         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3144         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3145
3146         bp->stats_state = STATS_STATE_DISABLED;
3147         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3148                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3149 }
3150
3151 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3152 {
3153         struct dmae_command *dmae = &bp->stats_dmae;
3154         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3155
3156         *stats_comp = DMAE_COMP_VAL;
3157         if (CHIP_REV_IS_SLOW(bp))
3158                 return;
3159
3160         /* loader */
3161         if (bp->executer_idx) {
3162                 int loader_idx = PMF_DMAE_C(bp);
3163
3164                 memset(dmae, 0, sizeof(struct dmae_command));
3165
3166                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3167                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3168                                 DMAE_CMD_DST_RESET |
3169 #ifdef __BIG_ENDIAN
3170                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3171 #else
3172                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3173 #endif
3174                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3175                                                DMAE_CMD_PORT_0) |
3176                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3177                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3178                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3179                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3180                                      sizeof(struct dmae_command) *
3181                                      (loader_idx + 1)) >> 2;
3182                 dmae->dst_addr_hi = 0;
3183                 dmae->len = sizeof(struct dmae_command) >> 2;
3184                 if (CHIP_IS_E1(bp))
3185                         dmae->len--;
3186                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3187                 dmae->comp_addr_hi = 0;
3188                 dmae->comp_val = 1;
3189
3190                 *stats_comp = 0;
3191                 bnx2x_post_dmae(bp, dmae, loader_idx);
3192
3193         } else if (bp->func_stx) {
3194                 *stats_comp = 0;
3195                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3196         }
3197 }
3198
3199 static int bnx2x_stats_comp(struct bnx2x *bp)
3200 {
3201         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3202         int cnt = 10;
3203
3204         might_sleep();
3205         while (*stats_comp != DMAE_COMP_VAL) {
3206                 if (!cnt) {
3207                         BNX2X_ERR("timeout waiting for stats finished\n");
3208                         break;
3209                 }
3210                 cnt--;
3211                 msleep(1);
3212         }
3213         return 1;
3214 }
3215
3216 /*
3217  * Statistics service functions
3218  */
3219
3220 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3221 {
3222         struct dmae_command *dmae;
3223         u32 opcode;
3224         int loader_idx = PMF_DMAE_C(bp);
3225         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3226
3227         /* sanity */
3228         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3229                 BNX2X_ERR("BUG!\n");
3230                 return;
3231         }
3232
3233         bp->executer_idx = 0;
3234
3235         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3236                   DMAE_CMD_C_ENABLE |
3237                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3238 #ifdef __BIG_ENDIAN
3239                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3240 #else
3241                   DMAE_CMD_ENDIANITY_DW_SWAP |
3242 #endif
3243                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3244                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3245
3246         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3247         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3248         dmae->src_addr_lo = bp->port.port_stx >> 2;
3249         dmae->src_addr_hi = 0;
3250         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3251         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3252         dmae->len = DMAE_LEN32_RD_MAX;
3253         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3254         dmae->comp_addr_hi = 0;
3255         dmae->comp_val = 1;
3256
3257         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3258         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3259         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3260         dmae->src_addr_hi = 0;
3261         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3262                                    DMAE_LEN32_RD_MAX * 4);
3263         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3264                                    DMAE_LEN32_RD_MAX * 4);
3265         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3266         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3267         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3268         dmae->comp_val = DMAE_COMP_VAL;
3269
3270         *stats_comp = 0;
3271         bnx2x_hw_stats_post(bp);
3272         bnx2x_stats_comp(bp);
3273 }
3274
3275 static void bnx2x_port_stats_init(struct bnx2x *bp)
3276 {
3277         struct dmae_command *dmae;
3278         int port = BP_PORT(bp);
3279         int vn = BP_E1HVN(bp);
3280         u32 opcode;
3281         int loader_idx = PMF_DMAE_C(bp);
3282         u32 mac_addr;
3283         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3284
3285         /* sanity */
3286         if (!bp->link_vars.link_up || !bp->port.pmf) {
3287                 BNX2X_ERR("BUG!\n");
3288                 return;
3289         }
3290
3291         bp->executer_idx = 0;
3292
3293         /* MCP */
3294         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3295                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3296                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3297 #ifdef __BIG_ENDIAN
3298                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3299 #else
3300                   DMAE_CMD_ENDIANITY_DW_SWAP |
3301 #endif
3302                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3303                   (vn << DMAE_CMD_E1HVN_SHIFT));
3304
3305         if (bp->port.port_stx) {
3306
3307                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3308                 dmae->opcode = opcode;
3309                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3310                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3311                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3312                 dmae->dst_addr_hi = 0;
3313                 dmae->len = sizeof(struct host_port_stats) >> 2;
3314                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3315                 dmae->comp_addr_hi = 0;
3316                 dmae->comp_val = 1;
3317         }
3318
3319         if (bp->func_stx) {
3320
3321                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3322                 dmae->opcode = opcode;
3323                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3324                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3325                 dmae->dst_addr_lo = bp->func_stx >> 2;
3326                 dmae->dst_addr_hi = 0;
3327                 dmae->len = sizeof(struct host_func_stats) >> 2;
3328                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3329                 dmae->comp_addr_hi = 0;
3330                 dmae->comp_val = 1;
3331         }
3332
3333         /* MAC */
3334         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3335                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3336                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3337 #ifdef __BIG_ENDIAN
3338                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3339 #else
3340                   DMAE_CMD_ENDIANITY_DW_SWAP |
3341 #endif
3342                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3343                   (vn << DMAE_CMD_E1HVN_SHIFT));
3344
3345         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3346
3347                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3348                                    NIG_REG_INGRESS_BMAC0_MEM);
3349
3350                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3351                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3352                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3353                 dmae->opcode = opcode;
3354                 dmae->src_addr_lo = (mac_addr +
3355                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3356                 dmae->src_addr_hi = 0;
3357                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3358                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3359                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3360                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3361                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3362                 dmae->comp_addr_hi = 0;
3363                 dmae->comp_val = 1;
3364
3365                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3366                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3367                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3368                 dmae->opcode = opcode;
3369                 dmae->src_addr_lo = (mac_addr +
3370                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3371                 dmae->src_addr_hi = 0;
3372                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3373                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3374                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3375                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3376                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3377                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3378                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3379                 dmae->comp_addr_hi = 0;
3380                 dmae->comp_val = 1;
3381
3382         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3383
3384                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3385
3386                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3387                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3388                 dmae->opcode = opcode;
3389                 dmae->src_addr_lo = (mac_addr +
3390                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3391                 dmae->src_addr_hi = 0;
3392                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3393                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3394                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3395                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3396                 dmae->comp_addr_hi = 0;
3397                 dmae->comp_val = 1;
3398
3399                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3400                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3401                 dmae->opcode = opcode;
3402                 dmae->src_addr_lo = (mac_addr +
3403                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3404                 dmae->src_addr_hi = 0;
3405                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3406                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3407                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3408                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3409                 dmae->len = 1;
3410                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3411                 dmae->comp_addr_hi = 0;
3412                 dmae->comp_val = 1;
3413
3414                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3415                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3416                 dmae->opcode = opcode;
3417                 dmae->src_addr_lo = (mac_addr +
3418                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3419                 dmae->src_addr_hi = 0;
3420                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3421                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3422                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3423                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3424                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3425                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3426                 dmae->comp_addr_hi = 0;
3427                 dmae->comp_val = 1;
3428         }
3429
3430         /* NIG */
3431         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3432         dmae->opcode = opcode;
3433         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3434                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3435         dmae->src_addr_hi = 0;
3436         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3437         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3438         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3439         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3440         dmae->comp_addr_hi = 0;
3441         dmae->comp_val = 1;
3442
3443         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3444         dmae->opcode = opcode;
3445         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3446                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3447         dmae->src_addr_hi = 0;
3448         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3449                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3450         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3451                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3452         dmae->len = (2*sizeof(u32)) >> 2;
3453         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3454         dmae->comp_addr_hi = 0;
3455         dmae->comp_val = 1;
3456
3457         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3458         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3459                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3460                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3461 #ifdef __BIG_ENDIAN
3462                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3463 #else
3464                         DMAE_CMD_ENDIANITY_DW_SWAP |
3465 #endif
3466                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3467                         (vn << DMAE_CMD_E1HVN_SHIFT));
3468         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3469                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3470         dmae->src_addr_hi = 0;
3471         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3472                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3473         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3474                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3475         dmae->len = (2*sizeof(u32)) >> 2;
3476         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3477         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3478         dmae->comp_val = DMAE_COMP_VAL;
3479
3480         *stats_comp = 0;
3481 }
3482
3483 static void bnx2x_func_stats_init(struct bnx2x *bp)
3484 {
3485         struct dmae_command *dmae = &bp->stats_dmae;
3486         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3487
3488         /* sanity */
3489         if (!bp->func_stx) {
3490                 BNX2X_ERR("BUG!\n");
3491                 return;
3492         }
3493
3494         bp->executer_idx = 0;
3495         memset(dmae, 0, sizeof(struct dmae_command));
3496
3497         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3498                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3499                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3500 #ifdef __BIG_ENDIAN
3501                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3502 #else
3503                         DMAE_CMD_ENDIANITY_DW_SWAP |
3504 #endif
3505                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3506                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3507         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3508         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3509         dmae->dst_addr_lo = bp->func_stx >> 2;
3510         dmae->dst_addr_hi = 0;
3511         dmae->len = sizeof(struct host_func_stats) >> 2;
3512         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3513         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3514         dmae->comp_val = DMAE_COMP_VAL;
3515
3516         *stats_comp = 0;
3517 }
3518
3519 static void bnx2x_stats_start(struct bnx2x *bp)
3520 {
3521         if (bp->port.pmf)
3522                 bnx2x_port_stats_init(bp);
3523
3524         else if (bp->func_stx)
3525                 bnx2x_func_stats_init(bp);
3526
3527         bnx2x_hw_stats_post(bp);
3528         bnx2x_storm_stats_post(bp);
3529 }
3530
3531 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3532 {
3533         bnx2x_stats_comp(bp);
3534         bnx2x_stats_pmf_update(bp);
3535         bnx2x_stats_start(bp);
3536 }
3537
3538 static void bnx2x_stats_restart(struct bnx2x *bp)
3539 {
3540         bnx2x_stats_comp(bp);
3541         bnx2x_stats_start(bp);
3542 }
3543
3544 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3545 {
3546         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3547         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3548         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3549         struct {
3550                 u32 lo;
3551                 u32 hi;
3552         } diff;
3553
3554         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3555         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3556         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3557         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3558         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3559         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3560         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3561         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3562         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3563         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3564         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3565         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3566         UPDATE_STAT64(tx_stat_gt127,
3567                                 tx_stat_etherstatspkts65octetsto127octets);
3568         UPDATE_STAT64(tx_stat_gt255,
3569                                 tx_stat_etherstatspkts128octetsto255octets);
3570         UPDATE_STAT64(tx_stat_gt511,
3571                                 tx_stat_etherstatspkts256octetsto511octets);
3572         UPDATE_STAT64(tx_stat_gt1023,
3573                                 tx_stat_etherstatspkts512octetsto1023octets);
3574         UPDATE_STAT64(tx_stat_gt1518,
3575                                 tx_stat_etherstatspkts1024octetsto1522octets);
3576         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3577         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3578         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3579         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3580         UPDATE_STAT64(tx_stat_gterr,
3581                                 tx_stat_dot3statsinternalmactransmiterrors);
3582         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3583
3584         estats->pause_frames_received_hi =
3585                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3586         estats->pause_frames_received_lo =
3587                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3588
3589         estats->pause_frames_sent_hi =
3590                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3591         estats->pause_frames_sent_lo =
3592                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3593 }
3594
3595 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3596 {
3597         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3598         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3599         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3600
3601         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3602         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3603         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3604         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3605         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3606         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3607         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3608         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3609         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3610         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3611         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3612         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3613         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3614         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3615         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3616         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3617         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3618         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3619         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3620         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3621         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3622         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3623         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3624         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3625         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3626         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3627         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3628         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3629         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3630         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3631         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3632
3633         estats->pause_frames_received_hi =
3634                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3635         estats->pause_frames_received_lo =
3636                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3637         ADD_64(estats->pause_frames_received_hi,
3638                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3639                estats->pause_frames_received_lo,
3640                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3641
3642         estats->pause_frames_sent_hi =
3643                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
3644         estats->pause_frames_sent_lo =
3645                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
3646         ADD_64(estats->pause_frames_sent_hi,
3647                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3648                estats->pause_frames_sent_lo,
3649                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3650 }
3651
3652 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3653 {
3654         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3655         struct nig_stats *old = &(bp->port.old_nig_stats);
3656         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3657         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3658         struct {
3659                 u32 lo;
3660                 u32 hi;
3661         } diff;
3662         u32 nig_timer_max;
3663
3664         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3665                 bnx2x_bmac_stats_update(bp);
3666
3667         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3668                 bnx2x_emac_stats_update(bp);
3669
3670         else { /* unreached */
3671                 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3672                 return -1;
3673         }
3674
3675         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3676                       new->brb_discard - old->brb_discard);
3677         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3678                       new->brb_truncate - old->brb_truncate);
3679
3680         UPDATE_STAT64_NIG(egress_mac_pkt0,
3681                                         etherstatspkts1024octetsto1522octets);
3682         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3683
3684         memcpy(old, new, sizeof(struct nig_stats));
3685
3686         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3687                sizeof(struct mac_stx));
3688         estats->brb_drop_hi = pstats->brb_drop_hi;
3689         estats->brb_drop_lo = pstats->brb_drop_lo;
3690
3691         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3692
3693         nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3694         if (nig_timer_max != estats->nig_timer_max) {
3695                 estats->nig_timer_max = nig_timer_max;
3696                 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3697         }
3698
3699         return 0;
3700 }
3701
3702 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3703 {
3704         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3705         struct tstorm_per_port_stats *tport =
3706                                         &stats->tstorm_common.port_statistics;
3707         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3708         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3709         int i;
3710
3711         memset(&(fstats->total_bytes_received_hi), 0,
3712                sizeof(struct host_func_stats) - 2*sizeof(u32));
3713         estats->error_bytes_received_hi = 0;
3714         estats->error_bytes_received_lo = 0;
3715         estats->etherstatsoverrsizepkts_hi = 0;
3716         estats->etherstatsoverrsizepkts_lo = 0;
3717         estats->no_buff_discard_hi = 0;
3718         estats->no_buff_discard_lo = 0;
3719
3720         for_each_queue(bp, i) {
3721                 struct bnx2x_fastpath *fp = &bp->fp[i];
3722                 int cl_id = fp->cl_id;
3723                 struct tstorm_per_client_stats *tclient =
3724                                 &stats->tstorm_common.client_statistics[cl_id];
3725                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3726                 struct ustorm_per_client_stats *uclient =
3727                                 &stats->ustorm_common.client_statistics[cl_id];
3728                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3729                 struct xstorm_per_client_stats *xclient =
3730                                 &stats->xstorm_common.client_statistics[cl_id];
3731                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3732                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3733                 u32 diff;
3734
3735                 /* are storm stats valid? */
3736                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3737                                                         bp->stats_counter) {
3738                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3739                            "  xstorm counter (%d) != stats_counter (%d)\n",
3740                            i, xclient->stats_counter, bp->stats_counter);
3741                         return -1;
3742                 }
3743                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3744                                                         bp->stats_counter) {
3745                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3746                            "  tstorm counter (%d) != stats_counter (%d)\n",
3747                            i, tclient->stats_counter, bp->stats_counter);
3748                         return -2;
3749                 }
3750                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3751                                                         bp->stats_counter) {
3752                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3753                            "  ustorm counter (%d) != stats_counter (%d)\n",
3754                            i, uclient->stats_counter, bp->stats_counter);
3755                         return -4;
3756                 }
3757
3758                 qstats->total_bytes_received_hi =
3759                 qstats->valid_bytes_received_hi =
3760                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3761                 qstats->total_bytes_received_lo =
3762                 qstats->valid_bytes_received_lo =
3763                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3764
3765                 qstats->error_bytes_received_hi =
3766                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3767                 qstats->error_bytes_received_lo =
3768                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3769
3770                 ADD_64(qstats->total_bytes_received_hi,
3771                        qstats->error_bytes_received_hi,
3772                        qstats->total_bytes_received_lo,
3773                        qstats->error_bytes_received_lo);
3774
3775                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3776                                         total_unicast_packets_received);
3777                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3778                                         total_multicast_packets_received);
3779                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3780                                         total_broadcast_packets_received);
3781                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3782                                         etherstatsoverrsizepkts);
3783                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3784
3785                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3786                                         total_unicast_packets_received);
3787                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3788                                         total_multicast_packets_received);
3789                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3790                                         total_broadcast_packets_received);
3791                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3792                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3793                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3794
3795                 qstats->total_bytes_transmitted_hi =
3796                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3797                 qstats->total_bytes_transmitted_lo =
3798                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3799
3800                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3801                                         total_unicast_packets_transmitted);
3802                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3803                                         total_multicast_packets_transmitted);
3804                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3805                                         total_broadcast_packets_transmitted);
3806
3807                 old_tclient->checksum_discard = tclient->checksum_discard;
3808                 old_tclient->ttl0_discard = tclient->ttl0_discard;
3809
3810                 ADD_64(fstats->total_bytes_received_hi,
3811                        qstats->total_bytes_received_hi,
3812                        fstats->total_bytes_received_lo,
3813                        qstats->total_bytes_received_lo);
3814                 ADD_64(fstats->total_bytes_transmitted_hi,
3815                        qstats->total_bytes_transmitted_hi,
3816                        fstats->total_bytes_transmitted_lo,
3817                        qstats->total_bytes_transmitted_lo);
3818                 ADD_64(fstats->total_unicast_packets_received_hi,
3819                        qstats->total_unicast_packets_received_hi,
3820                        fstats->total_unicast_packets_received_lo,
3821                        qstats->total_unicast_packets_received_lo);
3822                 ADD_64(fstats->total_multicast_packets_received_hi,
3823                        qstats->total_multicast_packets_received_hi,
3824                        fstats->total_multicast_packets_received_lo,
3825                        qstats->total_multicast_packets_received_lo);
3826                 ADD_64(fstats->total_broadcast_packets_received_hi,
3827                        qstats->total_broadcast_packets_received_hi,
3828                        fstats->total_broadcast_packets_received_lo,
3829                        qstats->total_broadcast_packets_received_lo);
3830                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3831                        qstats->total_unicast_packets_transmitted_hi,
3832                        fstats->total_unicast_packets_transmitted_lo,
3833                        qstats->total_unicast_packets_transmitted_lo);
3834                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3835                        qstats->total_multicast_packets_transmitted_hi,
3836                        fstats->total_multicast_packets_transmitted_lo,
3837                        qstats->total_multicast_packets_transmitted_lo);
3838                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3839                        qstats->total_broadcast_packets_transmitted_hi,
3840                        fstats->total_broadcast_packets_transmitted_lo,
3841                        qstats->total_broadcast_packets_transmitted_lo);
3842                 ADD_64(fstats->valid_bytes_received_hi,
3843                        qstats->valid_bytes_received_hi,
3844                        fstats->valid_bytes_received_lo,
3845                        qstats->valid_bytes_received_lo);
3846
3847                 ADD_64(estats->error_bytes_received_hi,
3848                        qstats->error_bytes_received_hi,
3849                        estats->error_bytes_received_lo,
3850                        qstats->error_bytes_received_lo);
3851                 ADD_64(estats->etherstatsoverrsizepkts_hi,
3852                        qstats->etherstatsoverrsizepkts_hi,
3853                        estats->etherstatsoverrsizepkts_lo,
3854                        qstats->etherstatsoverrsizepkts_lo);
3855                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3856                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3857         }
3858
3859         ADD_64(fstats->total_bytes_received_hi,
3860                estats->rx_stat_ifhcinbadoctets_hi,
3861                fstats->total_bytes_received_lo,
3862                estats->rx_stat_ifhcinbadoctets_lo);
3863
3864         memcpy(estats, &(fstats->total_bytes_received_hi),
3865                sizeof(struct host_func_stats) - 2*sizeof(u32));
3866
3867         ADD_64(estats->etherstatsoverrsizepkts_hi,
3868                estats->rx_stat_dot3statsframestoolong_hi,
3869                estats->etherstatsoverrsizepkts_lo,
3870                estats->rx_stat_dot3statsframestoolong_lo);
3871         ADD_64(estats->error_bytes_received_hi,
3872                estats->rx_stat_ifhcinbadoctets_hi,
3873                estats->error_bytes_received_lo,
3874                estats->rx_stat_ifhcinbadoctets_lo);
3875
3876         if (bp->port.pmf) {
3877                 estats->mac_filter_discard =
3878                                 le32_to_cpu(tport->mac_filter_discard);
3879                 estats->xxoverflow_discard =
3880                                 le32_to_cpu(tport->xxoverflow_discard);
3881                 estats->brb_truncate_discard =
3882                                 le32_to_cpu(tport->brb_truncate_discard);
3883                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3884         }
3885
3886         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3887
3888         bp->stats_pending = 0;
3889
3890         return 0;
3891 }
3892
3893 static void bnx2x_net_stats_update(struct bnx2x *bp)
3894 {
3895         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3896         struct net_device_stats *nstats = &bp->dev->stats;
3897         int i;
3898
3899         nstats->rx_packets =
3900                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3901                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3902                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3903
3904         nstats->tx_packets =
3905                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3906                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3907                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3908
3909         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
3910
3911         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3912
3913         nstats->rx_dropped = estats->mac_discard;
3914         for_each_queue(bp, i)
3915                 nstats->rx_dropped +=
3916                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3917
3918         nstats->tx_dropped = 0;
3919
3920         nstats->multicast =
3921                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
3922
3923         nstats->collisions =
3924                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
3925
3926         nstats->rx_length_errors =
3927                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3928                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3929         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3930                                  bnx2x_hilo(&estats->brb_truncate_hi);
3931         nstats->rx_crc_errors =
3932                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3933         nstats->rx_frame_errors =
3934                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3935         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
3936         nstats->rx_missed_errors = estats->xxoverflow_discard;
3937
3938         nstats->rx_errors = nstats->rx_length_errors +
3939                             nstats->rx_over_errors +
3940                             nstats->rx_crc_errors +
3941                             nstats->rx_frame_errors +
3942                             nstats->rx_fifo_errors +
3943                             nstats->rx_missed_errors;
3944
3945         nstats->tx_aborted_errors =
3946                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3947                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3948         nstats->tx_carrier_errors =
3949                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
3950         nstats->tx_fifo_errors = 0;
3951         nstats->tx_heartbeat_errors = 0;
3952         nstats->tx_window_errors = 0;
3953
3954         nstats->tx_errors = nstats->tx_aborted_errors +
3955                             nstats->tx_carrier_errors +
3956             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3957 }
3958
3959 static void bnx2x_drv_stats_update(struct bnx2x *bp)
3960 {
3961         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3962         int i;
3963
3964         estats->driver_xoff = 0;
3965         estats->rx_err_discard_pkt = 0;
3966         estats->rx_skb_alloc_failed = 0;
3967         estats->hw_csum_err = 0;
3968         for_each_queue(bp, i) {
3969                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3970
3971                 estats->driver_xoff += qstats->driver_xoff;
3972                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3973                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3974                 estats->hw_csum_err += qstats->hw_csum_err;
3975         }
3976 }
3977
3978 static void bnx2x_stats_update(struct bnx2x *bp)
3979 {
3980         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3981
3982         if (*stats_comp != DMAE_COMP_VAL)
3983                 return;
3984
3985         if (bp->port.pmf)
3986                 bnx2x_hw_stats_update(bp);
3987
3988         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3989                 BNX2X_ERR("storm stats were not updated for 3 times\n");
3990                 bnx2x_panic();
3991                 return;
3992         }
3993
3994         bnx2x_net_stats_update(bp);
3995         bnx2x_drv_stats_update(bp);
3996
3997         if (bp->msglevel & NETIF_MSG_TIMER) {
3998                 struct tstorm_per_client_stats *old_tclient =
3999                                                         &bp->fp->old_tclient;
4000                 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4001                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4002                 struct net_device_stats *nstats = &bp->dev->stats;
4003                 int i;
4004
4005                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4006                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
4007                                   "  tx pkt (%lx)\n",
4008                        bnx2x_tx_avail(bp->fp),
4009                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
4010                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
4011                                   "  rx pkt (%lx)\n",
4012                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4013                              bp->fp->rx_comp_cons),
4014                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
4015                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u  "
4016                                   "brb truncate %u\n",
4017                        (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4018                        qstats->driver_xoff,
4019                        estats->brb_drop_lo, estats->brb_truncate_lo);
4020                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
4021                         "packets_too_big_discard %lu  no_buff_discard %lu  "
4022                         "mac_discard %u  mac_filter_discard %u  "
4023                         "xxovrflow_discard %u  brb_truncate_discard %u  "
4024                         "ttl0_discard %u\n",
4025                        le32_to_cpu(old_tclient->checksum_discard),
4026                        bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4027                        bnx2x_hilo(&qstats->no_buff_discard_hi),
4028                        estats->mac_discard, estats->mac_filter_discard,
4029                        estats->xxoverflow_discard, estats->brb_truncate_discard,
4030                        le32_to_cpu(old_tclient->ttl0_discard));
4031
4032                 for_each_queue(bp, i) {
4033                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4034                                bnx2x_fp(bp, i, tx_pkt),
4035                                bnx2x_fp(bp, i, rx_pkt),
4036                                bnx2x_fp(bp, i, rx_calls));
4037                 }
4038         }
4039
4040         bnx2x_hw_stats_post(bp);
4041         bnx2x_storm_stats_post(bp);
4042 }
4043
4044 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4045 {
4046         struct dmae_command *dmae;
4047         u32 opcode;
4048         int loader_idx = PMF_DMAE_C(bp);
4049         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4050
4051         bp->executer_idx = 0;
4052
4053         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4054                   DMAE_CMD_C_ENABLE |
4055                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4056 #ifdef __BIG_ENDIAN
4057                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4058 #else
4059                   DMAE_CMD_ENDIANITY_DW_SWAP |
4060 #endif
4061                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4062                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4063
4064         if (bp->port.port_stx) {
4065
4066                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4067                 if (bp->func_stx)
4068                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4069                 else
4070                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4071                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4072                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4073                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4074                 dmae->dst_addr_hi = 0;
4075                 dmae->len = sizeof(struct host_port_stats) >> 2;
4076                 if (bp->func_stx) {
4077                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4078                         dmae->comp_addr_hi = 0;
4079                         dmae->comp_val = 1;
4080                 } else {
4081                         dmae->comp_addr_lo =
4082                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4083                         dmae->comp_addr_hi =
4084                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4085                         dmae->comp_val = DMAE_COMP_VAL;
4086
4087                         *stats_comp = 0;
4088                 }
4089         }
4090
4091         if (bp->func_stx) {
4092
4093                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4094                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4095                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4096                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4097                 dmae->dst_addr_lo = bp->func_stx >> 2;
4098                 dmae->dst_addr_hi = 0;
4099                 dmae->len = sizeof(struct host_func_stats) >> 2;
4100                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4101                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4102                 dmae->comp_val = DMAE_COMP_VAL;
4103
4104                 *stats_comp = 0;
4105         }
4106 }
4107
4108 static void bnx2x_stats_stop(struct bnx2x *bp)
4109 {
4110         int update = 0;
4111
4112         bnx2x_stats_comp(bp);
4113
4114         if (bp->port.pmf)
4115                 update = (bnx2x_hw_stats_update(bp) == 0);
4116
4117         update |= (bnx2x_storm_stats_update(bp) == 0);
4118
4119         if (update) {
4120                 bnx2x_net_stats_update(bp);
4121
4122                 if (bp->port.pmf)
4123                         bnx2x_port_stats_stop(bp);
4124
4125                 bnx2x_hw_stats_post(bp);
4126                 bnx2x_stats_comp(bp);
4127         }
4128 }
4129
4130 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4131 {
4132 }
4133
4134 static const struct {
4135         void (*action)(struct bnx2x *bp);
4136         enum bnx2x_stats_state next_state;
4137 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4138 /* state        event   */
4139 {
4140 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4141 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4142 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4143 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4144 },
4145 {
4146 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4147 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4148 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4149 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4150 }
4151 };
4152
4153 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4154 {
4155         enum bnx2x_stats_state state = bp->stats_state;
4156
4157         bnx2x_stats_stm[state][event].action(bp);
4158         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4159
4160         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4161                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4162                    state, event, bp->stats_state);
4163 }
4164
4165 static void bnx2x_timer(unsigned long data)
4166 {
4167         struct bnx2x *bp = (struct bnx2x *) data;
4168
4169         if (!netif_running(bp->dev))
4170                 return;
4171
4172         if (atomic_read(&bp->intr_sem) != 0)
4173                 goto timer_restart;
4174
4175         if (poll) {
4176                 struct bnx2x_fastpath *fp = &bp->fp[0];
4177                 int rc;
4178
4179                 bnx2x_tx_int(fp, 1000);
4180                 rc = bnx2x_rx_int(fp, 1000);
4181         }
4182
4183         if (!BP_NOMCP(bp)) {
4184                 int func = BP_FUNC(bp);
4185                 u32 drv_pulse;
4186                 u32 mcp_pulse;
4187
4188                 ++bp->fw_drv_pulse_wr_seq;
4189                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4190                 /* TBD - add SYSTEM_TIME */
4191                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4192                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4193
4194                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4195                              MCP_PULSE_SEQ_MASK);
4196                 /* The delta between driver pulse and mcp response
4197                  * should be 1 (before mcp response) or 0 (after mcp response)
4198                  */
4199                 if ((drv_pulse != mcp_pulse) &&
4200                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4201                         /* someone lost a heartbeat... */
4202                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4203                                   drv_pulse, mcp_pulse);
4204                 }
4205         }
4206
4207         if ((bp->state == BNX2X_STATE_OPEN) ||
4208             (bp->state == BNX2X_STATE_DISABLED))
4209                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4210
4211 timer_restart:
4212         mod_timer(&bp->timer, jiffies + bp->current_interval);
4213 }
4214
4215 /* end of Statistics */
4216
4217 /* nic init */
4218
4219 /*
4220  * nic init service functions
4221  */
4222
4223 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4224 {
4225         int port = BP_PORT(bp);
4226
4227         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4228                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4229                         sizeof(struct ustorm_status_block)/4);
4230         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4231                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4232                         sizeof(struct cstorm_status_block)/4);
4233 }
4234
4235 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4236                           dma_addr_t mapping, int sb_id)
4237 {
4238         int port = BP_PORT(bp);
4239         int func = BP_FUNC(bp);
4240         int index;
4241         u64 section;
4242
4243         /* USTORM */
4244         section = ((u64)mapping) + offsetof(struct host_status_block,
4245                                             u_status_block);
4246         sb->u_status_block.status_block_id = sb_id;
4247
4248         REG_WR(bp, BAR_USTRORM_INTMEM +
4249                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4250         REG_WR(bp, BAR_USTRORM_INTMEM +
4251                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4252                U64_HI(section));
4253         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4254                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4255
4256         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4257                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4258                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4259
4260         /* CSTORM */
4261         section = ((u64)mapping) + offsetof(struct host_status_block,
4262                                             c_status_block);
4263         sb->c_status_block.status_block_id = sb_id;
4264
4265         REG_WR(bp, BAR_CSTRORM_INTMEM +
4266                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4267         REG_WR(bp, BAR_CSTRORM_INTMEM +
4268                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4269                U64_HI(section));
4270         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4271                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4272
4273         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4274                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4275                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4276
4277         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4278 }
4279
4280 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4281 {
4282         int func = BP_FUNC(bp);
4283
4284         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4285                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4286                         sizeof(struct ustorm_def_status_block)/4);
4287         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4288                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4289                         sizeof(struct cstorm_def_status_block)/4);
4290         bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4291                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4292                         sizeof(struct xstorm_def_status_block)/4);
4293         bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4294                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4295                         sizeof(struct tstorm_def_status_block)/4);
4296 }
4297
4298 static void bnx2x_init_def_sb(struct bnx2x *bp,
4299                               struct host_def_status_block *def_sb,
4300                               dma_addr_t mapping, int sb_id)
4301 {
4302         int port = BP_PORT(bp);
4303         int func = BP_FUNC(bp);
4304         int index, val, reg_offset;
4305         u64 section;
4306
4307         /* ATTN */
4308         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4309                                             atten_status_block);
4310         def_sb->atten_status_block.status_block_id = sb_id;
4311
4312         bp->attn_state = 0;
4313
4314         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4315                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4316
4317         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4318                 bp->attn_group[index].sig[0] = REG_RD(bp,
4319                                                      reg_offset + 0x10*index);
4320                 bp->attn_group[index].sig[1] = REG_RD(bp,
4321                                                reg_offset + 0x4 + 0x10*index);
4322                 bp->attn_group[index].sig[2] = REG_RD(bp,
4323                                                reg_offset + 0x8 + 0x10*index);
4324                 bp->attn_group[index].sig[3] = REG_RD(bp,
4325                                                reg_offset + 0xc + 0x10*index);
4326         }
4327
4328         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4329                              HC_REG_ATTN_MSG0_ADDR_L);
4330
4331         REG_WR(bp, reg_offset, U64_LO(section));
4332         REG_WR(bp, reg_offset + 4, U64_HI(section));
4333
4334         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4335
4336         val = REG_RD(bp, reg_offset);
4337         val |= sb_id;
4338         REG_WR(bp, reg_offset, val);
4339
4340         /* USTORM */
4341         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4342                                             u_def_status_block);
4343         def_sb->u_def_status_block.status_block_id = sb_id;
4344
4345         REG_WR(bp, BAR_USTRORM_INTMEM +
4346                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4347         REG_WR(bp, BAR_USTRORM_INTMEM +
4348                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4349                U64_HI(section));
4350         REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4351                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4352
4353         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4354                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4355                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4356
4357         /* CSTORM */
4358         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4359                                             c_def_status_block);
4360         def_sb->c_def_status_block.status_block_id = sb_id;
4361
4362         REG_WR(bp, BAR_CSTRORM_INTMEM +
4363                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4364         REG_WR(bp, BAR_CSTRORM_INTMEM +
4365                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4366                U64_HI(section));
4367         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4368                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4369
4370         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4371                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4372                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4373
4374         /* TSTORM */
4375         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4376                                             t_def_status_block);
4377         def_sb->t_def_status_block.status_block_id = sb_id;
4378
4379         REG_WR(bp, BAR_TSTRORM_INTMEM +
4380                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4381         REG_WR(bp, BAR_TSTRORM_INTMEM +
4382                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4383                U64_HI(section));
4384         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4385                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4386
4387         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4388                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4389                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4390
4391         /* XSTORM */
4392         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4393                                             x_def_status_block);
4394         def_sb->x_def_status_block.status_block_id = sb_id;
4395
4396         REG_WR(bp, BAR_XSTRORM_INTMEM +
4397                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4398         REG_WR(bp, BAR_XSTRORM_INTMEM +
4399                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4400                U64_HI(section));
4401         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4402                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4403
4404         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4405                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4406                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4407
4408         bp->stats_pending = 0;
4409         bp->set_mac_pending = 0;
4410
4411         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4412 }
4413
4414 static void bnx2x_update_coalesce(struct bnx2x *bp)
4415 {
4416         int port = BP_PORT(bp);
4417         int i;
4418
4419         for_each_queue(bp, i) {
4420                 int sb_id = bp->fp[i].sb_id;
4421
4422                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4423                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4424                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4425                                                     U_SB_ETH_RX_CQ_INDEX),
4426                         bp->rx_ticks/12);
4427                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4428                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4429                                                      U_SB_ETH_RX_CQ_INDEX),
4430                          bp->rx_ticks ? 0 : 1);
4431
4432                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4433                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4434                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4435                                                     C_SB_ETH_TX_CQ_INDEX),
4436                         bp->tx_ticks/12);
4437                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4438                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4439                                                      C_SB_ETH_TX_CQ_INDEX),
4440                          bp->tx_ticks ? 0 : 1);
4441         }
4442 }
4443
4444 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4445                                        struct bnx2x_fastpath *fp, int last)
4446 {
4447         int i;
4448
4449         for (i = 0; i < last; i++) {
4450                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4451                 struct sk_buff *skb = rx_buf->skb;
4452
4453                 if (skb == NULL) {
4454                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4455                         continue;
4456                 }
4457
4458                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4459                         pci_unmap_single(bp->pdev,
4460                                          pci_unmap_addr(rx_buf, mapping),
4461                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4462
4463                 dev_kfree_skb(skb);
4464                 rx_buf->skb = NULL;
4465         }
4466 }
4467
4468 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4469 {
4470         int func = BP_FUNC(bp);
4471         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4472                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4473         u16 ring_prod, cqe_ring_prod;
4474         int i, j;
4475
4476         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4477         DP(NETIF_MSG_IFUP,
4478            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4479
4480         if (bp->flags & TPA_ENABLE_FLAG) {
4481
4482                 for_each_rx_queue(bp, j) {
4483                         struct bnx2x_fastpath *fp = &bp->fp[j];
4484
4485                         for (i = 0; i < max_agg_queues; i++) {
4486                                 fp->tpa_pool[i].skb =
4487                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4488                                 if (!fp->tpa_pool[i].skb) {
4489                                         BNX2X_ERR("Failed to allocate TPA "
4490                                                   "skb pool for queue[%d] - "
4491                                                   "disabling TPA on this "
4492                                                   "queue!\n", j);
4493                                         bnx2x_free_tpa_pool(bp, fp, i);
4494                                         fp->disable_tpa = 1;
4495                                         break;
4496                                 }
4497                                 pci_unmap_addr_set((struct sw_rx_bd *)
4498                                                         &bp->fp->tpa_pool[i],
4499                                                    mapping, 0);
4500                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4501                         }
4502                 }
4503         }
4504
4505         for_each_rx_queue(bp, j) {
4506                 struct bnx2x_fastpath *fp = &bp->fp[j];
4507
4508                 fp->rx_bd_cons = 0;
4509                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4510                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4511
4512                 /* "next page" elements initialization */
4513                 /* SGE ring */
4514                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4515                         struct eth_rx_sge *sge;
4516
4517                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4518                         sge->addr_hi =
4519                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4520                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4521                         sge->addr_lo =
4522                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4523                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4524                 }
4525
4526                 bnx2x_init_sge_ring_bit_mask(fp);
4527
4528                 /* RX BD ring */
4529                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4530                         struct eth_rx_bd *rx_bd;
4531
4532                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4533                         rx_bd->addr_hi =
4534                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4535                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4536                         rx_bd->addr_lo =
4537                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4538                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4539                 }
4540
4541                 /* CQ ring */
4542                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4543                         struct eth_rx_cqe_next_page *nextpg;
4544
4545                         nextpg = (struct eth_rx_cqe_next_page *)
4546                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4547                         nextpg->addr_hi =
4548                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4549                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4550                         nextpg->addr_lo =
4551                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4552                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4553                 }
4554
4555                 /* Allocate SGEs and initialize the ring elements */
4556                 for (i = 0, ring_prod = 0;
4557                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4558
4559                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4560                                 BNX2X_ERR("was only able to allocate "
4561                                           "%d rx sges\n", i);
4562                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4563                                 /* Cleanup already allocated elements */
4564                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4565                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4566                                 fp->disable_tpa = 1;
4567                                 ring_prod = 0;
4568                                 break;
4569                         }
4570                         ring_prod = NEXT_SGE_IDX(ring_prod);
4571                 }
4572                 fp->rx_sge_prod = ring_prod;
4573
4574                 /* Allocate BDs and initialize BD ring */
4575                 fp->rx_comp_cons = 0;
4576                 cqe_ring_prod = ring_prod = 0;
4577                 for (i = 0; i < bp->rx_ring_size; i++) {
4578                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4579                                 BNX2X_ERR("was only able to allocate "
4580                                           "%d rx skbs on queue[%d]\n", i, j);
4581                                 fp->eth_q_stats.rx_skb_alloc_failed++;
4582                                 break;
4583                         }
4584                         ring_prod = NEXT_RX_IDX(ring_prod);
4585                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4586                         WARN_ON(ring_prod <= i);
4587                 }
4588
4589                 fp->rx_bd_prod = ring_prod;
4590                 /* must not have more available CQEs than BDs */
4591                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4592                                        cqe_ring_prod);
4593                 fp->rx_pkt = fp->rx_calls = 0;
4594
4595                 /* Warning!
4596                  * this will generate an interrupt (to the TSTORM)
4597                  * must only be done after chip is initialized
4598                  */
4599                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4600                                      fp->rx_sge_prod);
4601                 if (j != 0)
4602                         continue;
4603
4604                 REG_WR(bp, BAR_USTRORM_INTMEM +
4605                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4606                        U64_LO(fp->rx_comp_mapping));
4607                 REG_WR(bp, BAR_USTRORM_INTMEM +
4608                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4609                        U64_HI(fp->rx_comp_mapping));
4610         }
4611 }
4612
4613 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4614 {
4615         int i, j;
4616
4617         for_each_tx_queue(bp, j) {
4618                 struct bnx2x_fastpath *fp = &bp->fp[j];
4619
4620                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4621                         struct eth_tx_bd *tx_bd =
4622                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4623
4624                         tx_bd->addr_hi =
4625                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4626                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4627                         tx_bd->addr_lo =
4628                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4629                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4630                 }
4631
4632                 fp->tx_pkt_prod = 0;
4633                 fp->tx_pkt_cons = 0;
4634                 fp->tx_bd_prod = 0;
4635                 fp->tx_bd_cons = 0;
4636                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4637                 fp->tx_pkt = 0;
4638         }
4639 }
4640
4641 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4642 {
4643         int func = BP_FUNC(bp);
4644
4645         spin_lock_init(&bp->spq_lock);
4646
4647         bp->spq_left = MAX_SPQ_PENDING;
4648         bp->spq_prod_idx = 0;
4649         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4650         bp->spq_prod_bd = bp->spq;
4651         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4652
4653         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4654                U64_LO(bp->spq_mapping));
4655         REG_WR(bp,
4656                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4657                U64_HI(bp->spq_mapping));
4658
4659         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4660                bp->spq_prod_idx);
4661 }
4662
4663 static void bnx2x_init_context(struct bnx2x *bp)
4664 {
4665         int i;
4666
4667         for_each_queue(bp, i) {
4668                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4669                 struct bnx2x_fastpath *fp = &bp->fp[i];
4670                 u8 cl_id = fp->cl_id;
4671                 u8 sb_id = fp->sb_id;
4672
4673                 context->ustorm_st_context.common.sb_index_numbers =
4674                                                 BNX2X_RX_SB_INDEX_NUM;
4675                 context->ustorm_st_context.common.clientId = cl_id;
4676                 context->ustorm_st_context.common.status_block_id = sb_id;
4677                 context->ustorm_st_context.common.flags =
4678                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4679                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4680                 context->ustorm_st_context.common.statistics_counter_id =
4681                                                 cl_id;
4682                 context->ustorm_st_context.common.mc_alignment_log_size =
4683                                                 BNX2X_RX_ALIGN_SHIFT;
4684                 context->ustorm_st_context.common.bd_buff_size =
4685                                                 bp->rx_buf_size;
4686                 context->ustorm_st_context.common.bd_page_base_hi =
4687                                                 U64_HI(fp->rx_desc_mapping);
4688                 context->ustorm_st_context.common.bd_page_base_lo =
4689                                                 U64_LO(fp->rx_desc_mapping);
4690                 if (!fp->disable_tpa) {
4691                         context->ustorm_st_context.common.flags |=
4692                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4693                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4694                         context->ustorm_st_context.common.sge_buff_size =
4695                                 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4696                                          (u32)0xffff);
4697                         context->ustorm_st_context.common.sge_page_base_hi =
4698                                                 U64_HI(fp->rx_sge_mapping);
4699                         context->ustorm_st_context.common.sge_page_base_lo =
4700                                                 U64_LO(fp->rx_sge_mapping);
4701                 }
4702
4703                 context->ustorm_ag_context.cdu_usage =
4704                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4705                                                CDU_REGION_NUMBER_UCM_AG,
4706                                                ETH_CONNECTION_TYPE);
4707
4708                 context->xstorm_st_context.tx_bd_page_base_hi =
4709                                                 U64_HI(fp->tx_desc_mapping);
4710                 context->xstorm_st_context.tx_bd_page_base_lo =
4711                                                 U64_LO(fp->tx_desc_mapping);
4712                 context->xstorm_st_context.db_data_addr_hi =
4713                                                 U64_HI(fp->tx_prods_mapping);
4714                 context->xstorm_st_context.db_data_addr_lo =
4715                                                 U64_LO(fp->tx_prods_mapping);
4716                 context->xstorm_st_context.statistics_data = (cl_id |
4717                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4718                 context->cstorm_st_context.sb_index_number =
4719                                                 C_SB_ETH_TX_CQ_INDEX;
4720                 context->cstorm_st_context.status_block_id = sb_id;
4721
4722                 context->xstorm_ag_context.cdu_reserved =
4723                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4724                                                CDU_REGION_NUMBER_XCM_AG,
4725                                                ETH_CONNECTION_TYPE);
4726         }
4727 }
4728
4729 static void bnx2x_init_ind_table(struct bnx2x *bp)
4730 {
4731         int func = BP_FUNC(bp);
4732         int i;
4733
4734         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4735                 return;
4736
4737         DP(NETIF_MSG_IFUP,
4738            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
4739         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4740                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4741                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4742                         bp->fp->cl_id + (i % bp->num_rx_queues));
4743 }
4744
4745 static void bnx2x_set_client_config(struct bnx2x *bp)
4746 {
4747         struct tstorm_eth_client_config tstorm_client = {0};
4748         int port = BP_PORT(bp);
4749         int i;
4750
4751         tstorm_client.mtu = bp->dev->mtu;
4752         tstorm_client.config_flags =
4753                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4754                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
4755 #ifdef BCM_VLAN
4756         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4757                 tstorm_client.config_flags |=
4758                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4759                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4760         }
4761 #endif
4762
4763         if (bp->flags & TPA_ENABLE_FLAG) {
4764                 tstorm_client.max_sges_for_packet =
4765                         SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4766                 tstorm_client.max_sges_for_packet =
4767                         ((tstorm_client.max_sges_for_packet +
4768                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4769                         PAGES_PER_SGE_SHIFT;
4770
4771                 tstorm_client.config_flags |=
4772                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4773         }
4774
4775         for_each_queue(bp, i) {
4776                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4777
4778                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4779                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4780                        ((u32 *)&tstorm_client)[0]);
4781                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4782                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4783                        ((u32 *)&tstorm_client)[1]);
4784         }
4785
4786         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4787            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4788 }
4789
4790 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4791 {
4792         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4793         int mode = bp->rx_mode;
4794         int mask = (1 << BP_L_ID(bp));
4795         int func = BP_FUNC(bp);
4796         int i;
4797
4798         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
4799
4800         switch (mode) {
4801         case BNX2X_RX_MODE_NONE: /* no Rx */
4802                 tstorm_mac_filter.ucast_drop_all = mask;
4803                 tstorm_mac_filter.mcast_drop_all = mask;
4804                 tstorm_mac_filter.bcast_drop_all = mask;
4805                 break;
4806
4807         case BNX2X_RX_MODE_NORMAL:
4808                 tstorm_mac_filter.bcast_accept_all = mask;
4809                 break;
4810
4811         case BNX2X_RX_MODE_ALLMULTI:
4812                 tstorm_mac_filter.mcast_accept_all = mask;
4813                 tstorm_mac_filter.bcast_accept_all = mask;
4814                 break;
4815
4816         case BNX2X_RX_MODE_PROMISC:
4817                 tstorm_mac_filter.ucast_accept_all = mask;
4818                 tstorm_mac_filter.mcast_accept_all = mask;
4819                 tstorm_mac_filter.bcast_accept_all = mask;
4820                 break;
4821
4822         default:
4823                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4824                 break;
4825         }
4826
4827         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4828                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4829                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4830                        ((u32 *)&tstorm_mac_filter)[i]);
4831
4832 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4833                    ((u32 *)&tstorm_mac_filter)[i]); */
4834         }
4835
4836         if (mode != BNX2X_RX_MODE_NONE)
4837                 bnx2x_set_client_config(bp);
4838 }
4839
4840 static void bnx2x_init_internal_common(struct bnx2x *bp)
4841 {
4842         int i;
4843
4844         if (bp->flags & TPA_ENABLE_FLAG) {
4845                 struct tstorm_eth_tpa_exist tpa = {0};
4846
4847                 tpa.tpa_exist = 1;
4848
4849                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4850                        ((u32 *)&tpa)[0]);
4851                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4852                        ((u32 *)&tpa)[1]);
4853         }
4854
4855         /* Zero this manually as its initialization is
4856            currently missing in the initTool */
4857         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4858                 REG_WR(bp, BAR_USTRORM_INTMEM +
4859                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4860 }
4861
4862 static void bnx2x_init_internal_port(struct bnx2x *bp)
4863 {
4864         int port = BP_PORT(bp);
4865
4866         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4867         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4868         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4869         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4870 }
4871
4872 /* Calculates the sum of vn_min_rates.
4873    It's needed for further normalizing of the min_rates.
4874    Returns:
4875      sum of vn_min_rates.
4876        or
4877      0 - if all the min_rates are 0.
4878      In the later case fainess algorithm should be deactivated.
4879      If not all min_rates are zero then those that are zeroes will be set to 1.
4880  */
4881 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4882 {
4883         int all_zero = 1;
4884         int port = BP_PORT(bp);
4885         int vn;
4886
4887         bp->vn_weight_sum = 0;
4888         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4889                 int func = 2*vn + port;
4890                 u32 vn_cfg =
4891                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4892                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4893                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4894
4895                 /* Skip hidden vns */
4896                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4897                         continue;
4898
4899                 /* If min rate is zero - set it to 1 */
4900                 if (!vn_min_rate)
4901                         vn_min_rate = DEF_MIN_RATE;
4902                 else
4903                         all_zero = 0;
4904
4905                 bp->vn_weight_sum += vn_min_rate;
4906         }
4907
4908         /* ... only if all min rates are zeros - disable fairness */
4909         if (all_zero)
4910                 bp->vn_weight_sum = 0;
4911 }
4912
4913 static void bnx2x_init_internal_func(struct bnx2x *bp)
4914 {
4915         struct tstorm_eth_function_common_config tstorm_config = {0};
4916         struct stats_indication_flags stats_flags = {0};
4917         int port = BP_PORT(bp);
4918         int func = BP_FUNC(bp);
4919         int i, j;
4920         u32 offset;
4921         u16 max_agg_size;
4922
4923         if (is_multi(bp)) {
4924                 tstorm_config.config_flags = MULTI_FLAGS(bp);
4925                 tstorm_config.rss_result_mask = MULTI_MASK;
4926         }
4927         if (IS_E1HMF(bp))
4928                 tstorm_config.config_flags |=
4929                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
4930
4931         tstorm_config.leading_client_id = BP_L_ID(bp);
4932
4933         REG_WR(bp, BAR_TSTRORM_INTMEM +
4934                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4935                (*(u32 *)&tstorm_config));
4936
4937         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4938         bnx2x_set_storm_rx_mode(bp);
4939
4940         for_each_queue(bp, i) {
4941                 u8 cl_id = bp->fp[i].cl_id;
4942
4943                 /* reset xstorm per client statistics */
4944                 offset = BAR_XSTRORM_INTMEM +
4945                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4946                 for (j = 0;
4947                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4948                         REG_WR(bp, offset + j*4, 0);
4949
4950                 /* reset tstorm per client statistics */
4951                 offset = BAR_TSTRORM_INTMEM +
4952                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4953                 for (j = 0;
4954                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4955                         REG_WR(bp, offset + j*4, 0);
4956
4957                 /* reset ustorm per client statistics */
4958                 offset = BAR_USTRORM_INTMEM +
4959                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4960                 for (j = 0;
4961                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4962                         REG_WR(bp, offset + j*4, 0);
4963         }
4964
4965         /* Init statistics related context */
4966         stats_flags.collect_eth = 1;
4967
4968         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4969                ((u32 *)&stats_flags)[0]);
4970         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4971                ((u32 *)&stats_flags)[1]);
4972
4973         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4974                ((u32 *)&stats_flags)[0]);
4975         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4976                ((u32 *)&stats_flags)[1]);
4977
4978         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4979                ((u32 *)&stats_flags)[0]);
4980         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4981                ((u32 *)&stats_flags)[1]);
4982
4983         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4984                ((u32 *)&stats_flags)[0]);
4985         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4986                ((u32 *)&stats_flags)[1]);
4987
4988         REG_WR(bp, BAR_XSTRORM_INTMEM +
4989                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4990                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4991         REG_WR(bp, BAR_XSTRORM_INTMEM +
4992                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4993                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4994
4995         REG_WR(bp, BAR_TSTRORM_INTMEM +
4996                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4997                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4998         REG_WR(bp, BAR_TSTRORM_INTMEM +
4999                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5000                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5001
5002         REG_WR(bp, BAR_USTRORM_INTMEM +
5003                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5004                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5005         REG_WR(bp, BAR_USTRORM_INTMEM +
5006                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5007                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5008
5009         if (CHIP_IS_E1H(bp)) {
5010                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5011                         IS_E1HMF(bp));
5012                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5013                         IS_E1HMF(bp));
5014                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5015                         IS_E1HMF(bp));
5016                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5017                         IS_E1HMF(bp));
5018
5019                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5020                          bp->e1hov);
5021         }
5022
5023         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5024         max_agg_size =
5025                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5026                           SGE_PAGE_SIZE * PAGES_PER_SGE),
5027                     (u32)0xffff);
5028         for_each_rx_queue(bp, i) {
5029                 struct bnx2x_fastpath *fp = &bp->fp[i];
5030
5031                 REG_WR(bp, BAR_USTRORM_INTMEM +
5032                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5033                        U64_LO(fp->rx_comp_mapping));
5034                 REG_WR(bp, BAR_USTRORM_INTMEM +
5035                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5036                        U64_HI(fp->rx_comp_mapping));
5037
5038                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5039                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5040                          max_agg_size);
5041         }
5042
5043         /* dropless flow control */
5044         if (CHIP_IS_E1H(bp)) {
5045                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5046
5047                 rx_pause.bd_thr_low = 250;
5048                 rx_pause.cqe_thr_low = 250;
5049                 rx_pause.cos = 1;
5050                 rx_pause.sge_thr_low = 0;
5051                 rx_pause.bd_thr_high = 350;
5052                 rx_pause.cqe_thr_high = 350;
5053                 rx_pause.sge_thr_high = 0;
5054
5055                 for_each_rx_queue(bp, i) {
5056                         struct bnx2x_fastpath *fp = &bp->fp[i];
5057
5058                         if (!fp->disable_tpa) {
5059                                 rx_pause.sge_thr_low = 150;
5060                                 rx_pause.sge_thr_high = 250;
5061                         }
5062
5063
5064                         offset = BAR_USTRORM_INTMEM +
5065                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5066                                                                    fp->cl_id);
5067                         for (j = 0;
5068                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5069                              j++)
5070                                 REG_WR(bp, offset + j*4,
5071                                        ((u32 *)&rx_pause)[j]);
5072                 }
5073         }
5074
5075         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5076
5077         /* Init rate shaping and fairness contexts */
5078         if (IS_E1HMF(bp)) {
5079                 int vn;
5080
5081                 /* During init there is no active link
5082                    Until link is up, set link rate to 10Gbps */
5083                 bp->link_vars.line_speed = SPEED_10000;
5084                 bnx2x_init_port_minmax(bp);
5085
5086                 bnx2x_calc_vn_weight_sum(bp);
5087
5088                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5089                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5090
5091                 /* Enable rate shaping and fairness */
5092                 bp->cmng.flags.cmng_enables =
5093                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5094                 if (bp->vn_weight_sum)
5095                         bp->cmng.flags.cmng_enables |=
5096                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5097                 else
5098                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5099                            "  fairness will be disabled\n");
5100         } else {
5101                 /* rate shaping and fairness are disabled */
5102                 DP(NETIF_MSG_IFUP,
5103                    "single function mode  minmax will be disabled\n");
5104         }
5105
5106
5107         /* Store it to internal memory */
5108         if (bp->port.pmf)
5109                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5110                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5111                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5112                                ((u32 *)(&bp->cmng))[i]);
5113 }
5114
5115 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5116 {
5117         switch (load_code) {
5118         case FW_MSG_CODE_DRV_LOAD_COMMON:
5119                 bnx2x_init_internal_common(bp);
5120                 /* no break */
5121
5122         case FW_MSG_CODE_DRV_LOAD_PORT:
5123                 bnx2x_init_internal_port(bp);
5124                 /* no break */
5125
5126         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5127                 bnx2x_init_internal_func(bp);
5128                 break;
5129
5130         default:
5131                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5132                 break;
5133         }
5134 }
5135
5136 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5137 {
5138         int i;
5139
5140         for_each_queue(bp, i) {
5141                 struct bnx2x_fastpath *fp = &bp->fp[i];
5142
5143                 fp->bp = bp;
5144                 fp->state = BNX2X_FP_STATE_CLOSED;
5145                 fp->index = i;
5146                 fp->cl_id = BP_L_ID(bp) + i;
5147                 fp->sb_id = fp->cl_id;
5148                 DP(NETIF_MSG_IFUP,
5149                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
5150                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5151                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5152                               fp->sb_id);
5153                 bnx2x_update_fpsb_idx(fp);
5154         }
5155
5156         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5157                           DEF_SB_ID);
5158         bnx2x_update_dsb_idx(bp);
5159         bnx2x_update_coalesce(bp);
5160         bnx2x_init_rx_rings(bp);
5161         bnx2x_init_tx_ring(bp);
5162         bnx2x_init_sp_ring(bp);
5163         bnx2x_init_context(bp);
5164         bnx2x_init_internal(bp, load_code);
5165         bnx2x_init_ind_table(bp);
5166         bnx2x_stats_init(bp);
5167
5168         /* At this point, we are ready for interrupts */
5169         atomic_set(&bp->intr_sem, 0);
5170
5171         /* flush all before enabling interrupts */
5172         mb();
5173         mmiowb();
5174
5175         bnx2x_int_enable(bp);
5176 }
5177
5178 /* end of nic init */
5179
5180 /*
5181  * gzip service functions
5182  */
5183
5184 static int bnx2x_gunzip_init(struct bnx2x *bp)
5185 {
5186         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5187                                               &bp->gunzip_mapping);
5188         if (bp->gunzip_buf  == NULL)
5189                 goto gunzip_nomem1;
5190
5191         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5192         if (bp->strm  == NULL)
5193                 goto gunzip_nomem2;
5194
5195         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5196                                       GFP_KERNEL);
5197         if (bp->strm->workspace == NULL)
5198                 goto gunzip_nomem3;
5199
5200         return 0;
5201
5202 gunzip_nomem3:
5203         kfree(bp->strm);
5204         bp->strm = NULL;
5205
5206 gunzip_nomem2:
5207         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5208                             bp->gunzip_mapping);
5209         bp->gunzip_buf = NULL;
5210
5211 gunzip_nomem1:
5212         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5213                " un-compression\n", bp->dev->name);
5214         return -ENOMEM;
5215 }
5216
5217 static void bnx2x_gunzip_end(struct bnx2x *bp)
5218 {
5219         kfree(bp->strm->workspace);
5220
5221         kfree(bp->strm);
5222         bp->strm = NULL;
5223
5224         if (bp->gunzip_buf) {
5225                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5226                                     bp->gunzip_mapping);
5227                 bp->gunzip_buf = NULL;
5228         }
5229 }
5230
5231 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5232 {
5233         int n, rc;
5234
5235         /* check gzip header */
5236         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5237                 return -EINVAL;
5238
5239         n = 10;
5240
5241 #define FNAME                           0x8
5242
5243         if (zbuf[3] & FNAME)
5244                 while ((zbuf[n++] != 0) && (n < len));
5245
5246         bp->strm->next_in = zbuf + n;
5247         bp->strm->avail_in = len - n;
5248         bp->strm->next_out = bp->gunzip_buf;
5249         bp->strm->avail_out = FW_BUF_SIZE;
5250
5251         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5252         if (rc != Z_OK)
5253                 return rc;
5254
5255         rc = zlib_inflate(bp->strm, Z_FINISH);
5256         if ((rc != Z_OK) && (rc != Z_STREAM_END))
5257                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5258                        bp->dev->name, bp->strm->msg);
5259
5260         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5261         if (bp->gunzip_outlen & 0x3)
5262                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5263                                     " gunzip_outlen (%d) not aligned\n",
5264                        bp->dev->name, bp->gunzip_outlen);
5265         bp->gunzip_outlen >>= 2;
5266
5267         zlib_inflateEnd(bp->strm);
5268
5269         if (rc == Z_STREAM_END)
5270                 return 0;
5271
5272         return rc;
5273 }
5274
5275 /* nic load/unload */
5276
5277 /*
5278  * General service functions
5279  */
5280
5281 /* send a NIG loopback debug packet */
5282 static void bnx2x_lb_pckt(struct bnx2x *bp)
5283 {
5284         u32 wb_write[3];
5285
5286         /* Ethernet source and destination addresses */
5287         wb_write[0] = 0x55555555;
5288         wb_write[1] = 0x55555555;
5289         wb_write[2] = 0x20;             /* SOP */
5290         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5291
5292         /* NON-IP protocol */
5293         wb_write[0] = 0x09000000;
5294         wb_write[1] = 0x55555555;
5295         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
5296         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5297 }
5298
5299 /* some of the internal memories
5300  * are not directly readable from the driver
5301  * to test them we send debug packets
5302  */
5303 static int bnx2x_int_mem_test(struct bnx2x *bp)
5304 {
5305         int factor;
5306         int count, i;
5307         u32 val = 0;
5308
5309         if (CHIP_REV_IS_FPGA(bp))
5310                 factor = 120;
5311         else if (CHIP_REV_IS_EMUL(bp))
5312                 factor = 200;
5313         else
5314                 factor = 1;
5315
5316         DP(NETIF_MSG_HW, "start part1\n");
5317
5318         /* Disable inputs of parser neighbor blocks */
5319         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5320         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5321         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5322         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5323
5324         /*  Write 0 to parser credits for CFC search request */
5325         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5326
5327         /* send Ethernet packet */
5328         bnx2x_lb_pckt(bp);
5329
5330         /* TODO do i reset NIG statistic? */
5331         /* Wait until NIG register shows 1 packet of size 0x10 */
5332         count = 1000 * factor;
5333         while (count) {
5334
5335                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5336                 val = *bnx2x_sp(bp, wb_data[0]);
5337                 if (val == 0x10)
5338                         break;
5339
5340                 msleep(10);
5341                 count--;
5342         }
5343         if (val != 0x10) {
5344                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5345                 return -1;
5346         }
5347
5348         /* Wait until PRS register shows 1 packet */
5349         count = 1000 * factor;
5350         while (count) {
5351                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5352                 if (val == 1)
5353                         break;
5354
5355                 msleep(10);
5356                 count--;
5357         }
5358         if (val != 0x1) {
5359                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5360                 return -2;
5361         }
5362
5363         /* Reset and init BRB, PRS */
5364         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5365         msleep(50);
5366         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5367         msleep(50);
5368         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5369         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5370
5371         DP(NETIF_MSG_HW, "part2\n");
5372
5373         /* Disable inputs of parser neighbor blocks */
5374         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5375         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5376         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5377         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5378
5379         /* Write 0 to parser credits for CFC search request */
5380         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5381
5382         /* send 10 Ethernet packets */
5383         for (i = 0; i < 10; i++)
5384                 bnx2x_lb_pckt(bp);
5385
5386         /* Wait until NIG register shows 10 + 1
5387            packets of size 11*0x10 = 0xb0 */
5388         count = 1000 * factor;
5389         while (count) {
5390
5391                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5392                 val = *bnx2x_sp(bp, wb_data[0]);
5393                 if (val == 0xb0)
5394                         break;
5395
5396                 msleep(10);
5397                 count--;
5398         }
5399         if (val != 0xb0) {
5400                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5401                 return -3;
5402         }
5403
5404         /* Wait until PRS register shows 2 packets */
5405         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5406         if (val != 2)
5407                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5408
5409         /* Write 1 to parser credits for CFC search request */
5410         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5411
5412         /* Wait until PRS register shows 3 packets */
5413         msleep(10 * factor);
5414         /* Wait until NIG register shows 1 packet of size 0x10 */
5415         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5416         if (val != 3)
5417                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5418
5419         /* clear NIG EOP FIFO */
5420         for (i = 0; i < 11; i++)
5421                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5422         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5423         if (val != 1) {
5424                 BNX2X_ERR("clear of NIG failed\n");
5425                 return -4;
5426         }
5427
5428         /* Reset and init BRB, PRS, NIG */
5429         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5430         msleep(50);
5431         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5432         msleep(50);
5433         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5434         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5435 #ifndef BCM_ISCSI
5436         /* set NIC mode */
5437         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5438 #endif
5439
5440         /* Enable inputs of parser neighbor blocks */
5441         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5442         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5443         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5444         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5445
5446         DP(NETIF_MSG_HW, "done\n");
5447
5448         return 0; /* OK */
5449 }
5450
5451 static void enable_blocks_attention(struct bnx2x *bp)
5452 {
5453         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5454         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5455         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5456         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5457         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5458         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5459         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5460         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5461         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5462 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5463 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5464         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5465         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5466         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5467 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5468 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5469         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5470         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5471         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5472         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5473 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5474 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5475         if (CHIP_REV_IS_FPGA(bp))
5476                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5477         else
5478                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5479         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5480         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5481         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5482 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5483 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5484         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5485         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5486 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5487         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5488 }
5489
5490
5491 static void bnx2x_reset_common(struct bnx2x *bp)
5492 {
5493         /* reset_common */
5494         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5495                0xd3ffff7f);
5496         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5497 }
5498
5499 static int bnx2x_init_common(struct bnx2x *bp)
5500 {
5501         u32 val, i;
5502
5503         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5504
5505         bnx2x_reset_common(bp);
5506         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5507         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5508
5509         bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5510         if (CHIP_IS_E1H(bp))
5511                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5512
5513         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5514         msleep(30);
5515         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5516
5517         bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5518         if (CHIP_IS_E1(bp)) {
5519                 /* enable HW interrupt from PXP on USDM overflow
5520                    bit 16 on INT_MASK_0 */
5521                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5522         }
5523
5524         bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5525         bnx2x_init_pxp(bp);
5526
5527 #ifdef __BIG_ENDIAN
5528         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5529         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5530         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5531         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5532         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5533         /* make sure this value is 0 */
5534         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5535
5536 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5537         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5538         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5539         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5540         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5541 #endif
5542
5543         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5544 #ifdef BCM_ISCSI
5545         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5546         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5547         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5548 #endif
5549
5550         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5551                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5552
5553         /* let the HW do it's magic ... */
5554         msleep(100);
5555         /* finish PXP init */
5556         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5557         if (val != 1) {
5558                 BNX2X_ERR("PXP2 CFG failed\n");
5559                 return -EBUSY;
5560         }
5561         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5562         if (val != 1) {
5563                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5564                 return -EBUSY;
5565         }
5566
5567         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5568         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5569
5570         bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5571
5572         /* clean the DMAE memory */
5573         bp->dmae_ready = 1;
5574         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5575
5576         bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5577         bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5578         bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5579         bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5580
5581         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5582         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5583         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5584         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5585
5586         bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5587         /* soft reset pulse */
5588         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5589         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5590
5591 #ifdef BCM_ISCSI
5592         bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5593 #endif
5594
5595         bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5596         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5597         if (!CHIP_REV_IS_SLOW(bp)) {
5598                 /* enable hw interrupt from doorbell Q */
5599                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5600         }
5601
5602         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5603         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5604         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5605         /* set NIC mode */
5606         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5607         if (CHIP_IS_E1H(bp))
5608                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5609
5610         bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5611         bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5612         bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5613         bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5614
5615         if (CHIP_IS_E1H(bp)) {
5616                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5617                                 STORM_INTMEM_SIZE_E1H/2);
5618                 bnx2x_init_fill(bp,
5619                                 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5620                                 0, STORM_INTMEM_SIZE_E1H/2);
5621                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5622                                 STORM_INTMEM_SIZE_E1H/2);
5623                 bnx2x_init_fill(bp,
5624                                 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5625                                 0, STORM_INTMEM_SIZE_E1H/2);
5626                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5627                                 STORM_INTMEM_SIZE_E1H/2);
5628                 bnx2x_init_fill(bp,
5629                                 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5630                                 0, STORM_INTMEM_SIZE_E1H/2);
5631                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5632                                 STORM_INTMEM_SIZE_E1H/2);
5633                 bnx2x_init_fill(bp,
5634                                 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5635                                 0, STORM_INTMEM_SIZE_E1H/2);
5636         } else { /* E1 */
5637                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5638                                 STORM_INTMEM_SIZE_E1);
5639                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5640                                 STORM_INTMEM_SIZE_E1);
5641                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5642                                 STORM_INTMEM_SIZE_E1);
5643                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5644                                 STORM_INTMEM_SIZE_E1);
5645         }
5646
5647         bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5648         bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5649         bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5650         bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5651
5652         /* sync semi rtc */
5653         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5654                0x80000000);
5655         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5656                0x80000000);
5657
5658         bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5659         bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5660         bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5661
5662         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5663         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5664                 REG_WR(bp, i, 0xc0cac01a);
5665                 /* TODO: replace with something meaningful */
5666         }
5667         bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5668         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5669
5670         if (sizeof(union cdu_context) != 1024)
5671                 /* we currently assume that a context is 1024 bytes */
5672                 printk(KERN_ALERT PFX "please adjust the size of"
5673                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5674
5675         bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5676         val = (4 << 24) + (0 << 12) + 1024;
5677         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5678         if (CHIP_IS_E1(bp)) {
5679                 /* !!! fix pxp client crdit until excel update */
5680                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5681                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5682         }
5683
5684         bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5685         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5686         /* enable context validation interrupt from CFC */
5687         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5688
5689         /* set the thresholds to prevent CFC/CDU race */
5690         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5691
5692         bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5693         bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5694
5695         /* PXPCS COMMON comes here */
5696         /* Reset PCIE errors for debug */
5697         REG_WR(bp, 0x2814, 0xffffffff);
5698         REG_WR(bp, 0x3820, 0xffffffff);
5699
5700         /* EMAC0 COMMON comes here */
5701         /* EMAC1 COMMON comes here */
5702         /* DBU COMMON comes here */
5703         /* DBG COMMON comes here */
5704
5705         bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5706         if (CHIP_IS_E1H(bp)) {
5707                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5708                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5709         }
5710
5711         if (CHIP_REV_IS_SLOW(bp))
5712                 msleep(200);
5713
5714         /* finish CFC init */
5715         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5716         if (val != 1) {
5717                 BNX2X_ERR("CFC LL_INIT failed\n");
5718                 return -EBUSY;
5719         }
5720         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5721         if (val != 1) {
5722                 BNX2X_ERR("CFC AC_INIT failed\n");
5723                 return -EBUSY;
5724         }
5725         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5726         if (val != 1) {
5727                 BNX2X_ERR("CFC CAM_INIT failed\n");
5728                 return -EBUSY;
5729         }
5730         REG_WR(bp, CFC_REG_DEBUG0, 0);
5731
5732         /* read NIG statistic
5733            to see if this is our first up since powerup */
5734         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5735         val = *bnx2x_sp(bp, wb_data[0]);
5736
5737         /* do internal memory self test */
5738         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5739                 BNX2X_ERR("internal mem self test failed\n");
5740                 return -EBUSY;
5741         }
5742
5743         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5744         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5745         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5746         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5747                 bp->port.need_hw_lock = 1;
5748                 break;
5749
5750         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5751                 /* Fan failure is indicated by SPIO 5 */
5752                 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5753                                MISC_REGISTERS_SPIO_INPUT_HI_Z);
5754
5755                 /* set to active low mode */
5756                 val = REG_RD(bp, MISC_REG_SPIO_INT);
5757                 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5758                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5759                 REG_WR(bp, MISC_REG_SPIO_INT, val);
5760
5761                 /* enable interrupt to signal the IGU */
5762                 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5763                 val |= (1 << MISC_REGISTERS_SPIO_5);
5764                 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5765                 break;
5766
5767         default:
5768                 break;
5769         }
5770
5771         /* clear PXP2 attentions */
5772         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5773
5774         enable_blocks_attention(bp);
5775
5776         if (!BP_NOMCP(bp)) {
5777                 bnx2x_acquire_phy_lock(bp);
5778                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5779                 bnx2x_release_phy_lock(bp);
5780         } else
5781                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5782
5783         return 0;
5784 }
5785
5786 static int bnx2x_init_port(struct bnx2x *bp)
5787 {
5788         int port = BP_PORT(bp);
5789         u32 low, high;
5790         u32 val;
5791
5792         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
5793
5794         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5795
5796         /* Port PXP comes here */
5797         /* Port PXP2 comes here */
5798 #ifdef BCM_ISCSI
5799         /* Port0  1
5800          * Port1  385 */
5801         i++;
5802         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5803         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5804         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5805         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5806
5807         /* Port0  2
5808          * Port1  386 */
5809         i++;
5810         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5811         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5812         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5813         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5814
5815         /* Port0  3
5816          * Port1  387 */
5817         i++;
5818         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5819         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5820         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5821         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5822 #endif
5823         /* Port CMs come here */
5824         bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5825                              (port ? XCM_PORT1_END : XCM_PORT0_END));
5826
5827         /* Port QM comes here */
5828 #ifdef BCM_ISCSI
5829         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5830         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5831
5832         bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5833                              func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5834 #endif
5835         /* Port DQ comes here */
5836
5837         bnx2x_init_block(bp, (port ? BRB1_PORT1_START : BRB1_PORT0_START),
5838                              (port ? BRB1_PORT1_END : BRB1_PORT0_END));
5839         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5840                 /* no pause for emulation and FPGA */
5841                 low = 0;
5842                 high = 513;
5843         } else {
5844                 if (IS_E1HMF(bp))
5845                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5846                 else if (bp->dev->mtu > 4096) {
5847                         if (bp->flags & ONE_PORT_FLAG)
5848                                 low = 160;
5849                         else {
5850                                 val = bp->dev->mtu;
5851                                 /* (24*1024 + val*4)/256 */
5852                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5853                         }
5854                 } else
5855                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5856                 high = low + 56;        /* 14*1024/256 */
5857         }
5858         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5859         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5860
5861
5862         /* Port PRS comes here */
5863         /* Port TSDM comes here */
5864         /* Port CSDM comes here */
5865         /* Port USDM comes here */
5866         /* Port XSDM comes here */
5867
5868         bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5869                              port ? TSEM_PORT1_END : TSEM_PORT0_END);
5870         bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5871                              port ? USEM_PORT1_END : USEM_PORT0_END);
5872         bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5873                              port ? CSEM_PORT1_END : CSEM_PORT0_END);
5874         bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5875                              port ? XSEM_PORT1_END : XSEM_PORT0_END);
5876
5877         /* Port UPB comes here */
5878         /* Port XPB comes here */
5879
5880         bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5881                              port ? PBF_PORT1_END : PBF_PORT0_END);
5882
5883         /* configure PBF to work without PAUSE mtu 9000 */
5884         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5885
5886         /* update threshold */
5887         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5888         /* update init credit */
5889         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5890
5891         /* probe changes */
5892         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5893         msleep(5);
5894         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5895
5896 #ifdef BCM_ISCSI
5897         /* tell the searcher where the T2 table is */
5898         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5899
5900         wb_write[0] = U64_LO(bp->t2_mapping);
5901         wb_write[1] = U64_HI(bp->t2_mapping);
5902         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5903         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5904         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5905         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5906
5907         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5908         /* Port SRCH comes here */
5909 #endif
5910         /* Port CDU comes here */
5911         /* Port CFC comes here */
5912
5913         if (CHIP_IS_E1(bp)) {
5914                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5915                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5916         }
5917         bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5918                              port ? HC_PORT1_END : HC_PORT0_END);
5919
5920         bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5921                                     MISC_AEU_PORT0_START,
5922                              port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5923         /* init aeu_mask_attn_func_0/1:
5924          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5925          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5926          *             bits 4-7 are used for "per vn group attention" */
5927         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5928                (IS_E1HMF(bp) ? 0xF7 : 0x7));
5929
5930         /* Port PXPCS comes here */
5931         /* Port EMAC0 comes here */
5932         /* Port EMAC1 comes here */
5933         /* Port DBU comes here */
5934         /* Port DBG comes here */
5935
5936         bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5937                              port ? NIG_PORT1_END : NIG_PORT0_END);
5938
5939         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5940
5941         if (CHIP_IS_E1H(bp)) {
5942                 /* 0x2 disable e1hov, 0x1 enable */
5943                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5944                        (IS_E1HMF(bp) ? 0x1 : 0x2));
5945
5946                 /* support pause requests from USDM, TSDM and BRB */
5947                 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5948
5949                 {
5950                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5951                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5952                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5953                 }
5954         }
5955
5956         /* Port MCP comes here */
5957         /* Port DMAE comes here */
5958
5959         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5960         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5961                 {
5962                 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5963
5964                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5965                                MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
5966
5967                 /* The GPIO should be swapped if the swap register is
5968                    set and active */
5969                 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5970                 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5971
5972                 /* Select function upon port-swap configuration */
5973                 if (port == 0) {
5974                         offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
5975                         aeu_gpio_mask = (swap_val && swap_override) ?
5976                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
5977                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
5978                 } else {
5979                         offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
5980                         aeu_gpio_mask = (swap_val && swap_override) ?
5981                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
5982                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
5983                 }
5984                 val = REG_RD(bp, offset);
5985                 /* add GPIO3 to group */
5986                 val |= aeu_gpio_mask;
5987                 REG_WR(bp, offset, val);
5988                 }
5989                 break;
5990
5991         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5992                 /* add SPIO 5 to group 0 */
5993                 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5994                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5995                 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5996                 break;
5997
5998         default:
5999                 break;
6000         }
6001
6002         bnx2x__link_reset(bp);
6003
6004         return 0;
6005 }
6006
6007 #define ILT_PER_FUNC            (768/2)
6008 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
6009 /* the phys address is shifted right 12 bits and has an added
6010    1=valid bit added to the 53rd bit
6011    then since this is a wide register(TM)
6012    we split it into two 32 bit writes
6013  */
6014 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6015 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
6016 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
6017 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
6018
6019 #define CNIC_ILT_LINES          0
6020
6021 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6022 {
6023         int reg;
6024
6025         if (CHIP_IS_E1H(bp))
6026                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6027         else /* E1 */
6028                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6029
6030         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6031 }
6032
6033 static int bnx2x_init_func(struct bnx2x *bp)
6034 {
6035         int port = BP_PORT(bp);
6036         int func = BP_FUNC(bp);
6037         u32 addr, val;
6038         int i;
6039
6040         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
6041
6042         /* set MSI reconfigure capability */
6043         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6044         val = REG_RD(bp, addr);
6045         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6046         REG_WR(bp, addr, val);
6047
6048         i = FUNC_ILT_BASE(func);
6049
6050         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6051         if (CHIP_IS_E1H(bp)) {
6052                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6053                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6054         } else /* E1 */
6055                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6056                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6057
6058
6059         if (CHIP_IS_E1H(bp)) {
6060                 for (i = 0; i < 9; i++)
6061                         bnx2x_init_block(bp,
6062                                          cm_start[func][i], cm_end[func][i]);
6063
6064                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6065                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6066         }
6067
6068         /* HC init per function */
6069         if (CHIP_IS_E1H(bp)) {
6070                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6071
6072                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6073                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6074         }
6075         bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
6076
6077         /* Reset PCIE errors for debug */
6078         REG_WR(bp, 0x2114, 0xffffffff);
6079         REG_WR(bp, 0x2120, 0xffffffff);
6080
6081         return 0;
6082 }
6083
6084 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6085 {
6086         int i, rc = 0;
6087
6088         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
6089            BP_FUNC(bp), load_code);
6090
6091         bp->dmae_ready = 0;
6092         mutex_init(&bp->dmae_mutex);
6093         bnx2x_gunzip_init(bp);
6094
6095         switch (load_code) {
6096         case FW_MSG_CODE_DRV_LOAD_COMMON:
6097                 rc = bnx2x_init_common(bp);
6098                 if (rc)
6099                         goto init_hw_err;
6100                 /* no break */
6101
6102         case FW_MSG_CODE_DRV_LOAD_PORT:
6103                 bp->dmae_ready = 1;
6104                 rc = bnx2x_init_port(bp);
6105                 if (rc)
6106                         goto init_hw_err;
6107                 /* no break */
6108
6109         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6110                 bp->dmae_ready = 1;
6111                 rc = bnx2x_init_func(bp);
6112                 if (rc)
6113                         goto init_hw_err;
6114                 break;
6115
6116         default:
6117                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6118                 break;
6119         }
6120
6121         if (!BP_NOMCP(bp)) {
6122                 int func = BP_FUNC(bp);
6123
6124                 bp->fw_drv_pulse_wr_seq =
6125                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6126                                  DRV_PULSE_SEQ_MASK);
6127                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6128                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  func_stx 0x%x\n",
6129                    bp->fw_drv_pulse_wr_seq, bp->func_stx);
6130         } else
6131                 bp->func_stx = 0;
6132
6133         /* this needs to be done before gunzip end */
6134         bnx2x_zero_def_sb(bp);
6135         for_each_queue(bp, i)
6136                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6137
6138 init_hw_err:
6139         bnx2x_gunzip_end(bp);
6140
6141         return rc;
6142 }
6143
6144 /* send the MCP a request, block until there is a reply */
6145 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6146 {
6147         int func = BP_FUNC(bp);
6148         u32 seq = ++bp->fw_seq;
6149         u32 rc = 0;
6150         u32 cnt = 1;
6151         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
6152
6153         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
6154         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
6155
6156         do {
6157                 /* let the FW do it's magic ... */
6158                 msleep(delay);
6159
6160                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
6161
6162                 /* Give the FW up to 2 second (200*10ms) */
6163         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6164
6165         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6166            cnt*delay, rc, seq);
6167
6168         /* is this a reply to our command? */
6169         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6170                 rc &= FW_MSG_CODE_MASK;
6171
6172         } else {
6173                 /* FW BUG! */
6174                 BNX2X_ERR("FW failed to respond!\n");
6175                 bnx2x_fw_dump(bp);
6176                 rc = 0;
6177         }
6178
6179         return rc;
6180 }
6181
6182 static void bnx2x_free_mem(struct bnx2x *bp)
6183 {
6184
6185 #define BNX2X_PCI_FREE(x, y, size) \
6186         do { \
6187                 if (x) { \
6188                         pci_free_consistent(bp->pdev, size, x, y); \
6189                         x = NULL; \
6190                         y = 0; \
6191                 } \
6192         } while (0)
6193
6194 #define BNX2X_FREE(x) \
6195         do { \
6196                 if (x) { \
6197                         vfree(x); \
6198                         x = NULL; \
6199                 } \
6200         } while (0)
6201
6202         int i;
6203
6204         /* fastpath */
6205         /* Common */
6206         for_each_queue(bp, i) {
6207
6208                 /* status blocks */
6209                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6210                                bnx2x_fp(bp, i, status_blk_mapping),
6211                                sizeof(struct host_status_block) +
6212                                sizeof(struct eth_tx_db_data));
6213         }
6214         /* Rx */
6215         for_each_rx_queue(bp, i) {
6216
6217                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6218                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6219                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6220                                bnx2x_fp(bp, i, rx_desc_mapping),
6221                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
6222
6223                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6224                                bnx2x_fp(bp, i, rx_comp_mapping),
6225                                sizeof(struct eth_fast_path_rx_cqe) *
6226                                NUM_RCQ_BD);
6227
6228                 /* SGE ring */
6229                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6230                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6231                                bnx2x_fp(bp, i, rx_sge_mapping),
6232                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6233         }
6234         /* Tx */
6235         for_each_tx_queue(bp, i) {
6236
6237                 /* fastpath tx rings: tx_buf tx_desc */
6238                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6239                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6240                                bnx2x_fp(bp, i, tx_desc_mapping),
6241                                sizeof(struct eth_tx_bd) * NUM_TX_BD);
6242         }
6243         /* end of fastpath */
6244
6245         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6246                        sizeof(struct host_def_status_block));
6247
6248         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6249                        sizeof(struct bnx2x_slowpath));
6250
6251 #ifdef BCM_ISCSI
6252         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6253         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6254         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6255         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6256 #endif
6257         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6258
6259 #undef BNX2X_PCI_FREE
6260 #undef BNX2X_KFREE
6261 }
6262
6263 static int bnx2x_alloc_mem(struct bnx2x *bp)
6264 {
6265
6266 #define BNX2X_PCI_ALLOC(x, y, size) \
6267         do { \
6268                 x = pci_alloc_consistent(bp->pdev, size, y); \
6269                 if (x == NULL) \
6270                         goto alloc_mem_err; \
6271                 memset(x, 0, size); \
6272         } while (0)
6273
6274 #define BNX2X_ALLOC(x, size) \
6275         do { \
6276                 x = vmalloc(size); \
6277                 if (x == NULL) \
6278                         goto alloc_mem_err; \
6279                 memset(x, 0, size); \
6280         } while (0)
6281
6282         int i;
6283
6284         /* fastpath */
6285         /* Common */
6286         for_each_queue(bp, i) {
6287                 bnx2x_fp(bp, i, bp) = bp;
6288
6289                 /* status blocks */
6290                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6291                                 &bnx2x_fp(bp, i, status_blk_mapping),
6292                                 sizeof(struct host_status_block) +
6293                                 sizeof(struct eth_tx_db_data));
6294         }
6295         /* Rx */
6296         for_each_rx_queue(bp, i) {
6297
6298                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6299                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6300                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6301                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6302                                 &bnx2x_fp(bp, i, rx_desc_mapping),
6303                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6304
6305                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6306                                 &bnx2x_fp(bp, i, rx_comp_mapping),
6307                                 sizeof(struct eth_fast_path_rx_cqe) *
6308                                 NUM_RCQ_BD);
6309
6310                 /* SGE ring */
6311                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6312                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6313                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6314                                 &bnx2x_fp(bp, i, rx_sge_mapping),
6315                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6316         }
6317         /* Tx */
6318         for_each_tx_queue(bp, i) {
6319
6320                 bnx2x_fp(bp, i, hw_tx_prods) =
6321                                 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6322
6323                 bnx2x_fp(bp, i, tx_prods_mapping) =
6324                                 bnx2x_fp(bp, i, status_blk_mapping) +
6325                                 sizeof(struct host_status_block);
6326
6327                 /* fastpath tx rings: tx_buf tx_desc */
6328                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6329                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6330                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6331                                 &bnx2x_fp(bp, i, tx_desc_mapping),
6332                                 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6333         }
6334         /* end of fastpath */
6335
6336         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6337                         sizeof(struct host_def_status_block));
6338
6339         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6340                         sizeof(struct bnx2x_slowpath));
6341
6342 #ifdef BCM_ISCSI
6343         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6344
6345         /* Initialize T1 */
6346         for (i = 0; i < 64*1024; i += 64) {
6347                 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6348                 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6349         }
6350
6351         /* allocate searcher T2 table
6352            we allocate 1/4 of alloc num for T2
6353           (which is not entered into the ILT) */
6354         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6355
6356         /* Initialize T2 */
6357         for (i = 0; i < 16*1024; i += 64)
6358                 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6359
6360         /* now fixup the last line in the block to point to the next block */
6361         *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6362
6363         /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6364         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6365
6366         /* QM queues (128*MAX_CONN) */
6367         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6368 #endif
6369
6370         /* Slow path ring */
6371         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6372
6373         return 0;
6374
6375 alloc_mem_err:
6376         bnx2x_free_mem(bp);
6377         return -ENOMEM;
6378
6379 #undef BNX2X_PCI_ALLOC
6380 #undef BNX2X_ALLOC
6381 }
6382
6383 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6384 {
6385         int i;
6386
6387         for_each_tx_queue(bp, i) {
6388                 struct bnx2x_fastpath *fp = &bp->fp[i];
6389
6390                 u16 bd_cons = fp->tx_bd_cons;
6391                 u16 sw_prod = fp->tx_pkt_prod;
6392                 u16 sw_cons = fp->tx_pkt_cons;
6393
6394                 while (sw_cons != sw_prod) {
6395                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6396                         sw_cons++;
6397                 }
6398         }
6399 }
6400
6401 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6402 {
6403         int i, j;
6404
6405         for_each_rx_queue(bp, j) {
6406                 struct bnx2x_fastpath *fp = &bp->fp[j];
6407
6408                 for (i = 0; i < NUM_RX_BD; i++) {
6409                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6410                         struct sk_buff *skb = rx_buf->skb;
6411
6412                         if (skb == NULL)
6413                                 continue;
6414
6415                         pci_unmap_single(bp->pdev,
6416                                          pci_unmap_addr(rx_buf, mapping),
6417                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6418
6419                         rx_buf->skb = NULL;
6420                         dev_kfree_skb(skb);
6421                 }
6422                 if (!fp->disable_tpa)
6423                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6424                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
6425                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
6426         }
6427 }
6428
6429 static void bnx2x_free_skbs(struct bnx2x *bp)
6430 {
6431         bnx2x_free_tx_skbs(bp);
6432         bnx2x_free_rx_skbs(bp);
6433 }
6434
6435 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6436 {
6437         int i, offset = 1;
6438
6439         free_irq(bp->msix_table[0].vector, bp->dev);
6440         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6441            bp->msix_table[0].vector);
6442
6443         for_each_queue(bp, i) {
6444                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
6445                    "state %x\n", i, bp->msix_table[i + offset].vector,
6446                    bnx2x_fp(bp, i, state));
6447
6448                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6449         }
6450 }
6451
6452 static void bnx2x_free_irq(struct bnx2x *bp)
6453 {
6454         if (bp->flags & USING_MSIX_FLAG) {
6455                 bnx2x_free_msix_irqs(bp);
6456                 pci_disable_msix(bp->pdev);
6457                 bp->flags &= ~USING_MSIX_FLAG;
6458
6459         } else if (bp->flags & USING_MSI_FLAG) {
6460                 free_irq(bp->pdev->irq, bp->dev);
6461                 pci_disable_msi(bp->pdev);
6462                 bp->flags &= ~USING_MSI_FLAG;
6463
6464         } else
6465                 free_irq(bp->pdev->irq, bp->dev);
6466 }
6467
6468 static int bnx2x_enable_msix(struct bnx2x *bp)
6469 {
6470         int i, rc, offset = 1;
6471         int igu_vec = 0;
6472
6473         bp->msix_table[0].entry = igu_vec;
6474         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6475
6476         for_each_queue(bp, i) {
6477                 igu_vec = BP_L_ID(bp) + offset + i;
6478                 bp->msix_table[i + offset].entry = igu_vec;
6479                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6480                    "(fastpath #%u)\n", i + offset, igu_vec, i);
6481         }
6482
6483         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6484                              BNX2X_NUM_QUEUES(bp) + offset);
6485         if (rc) {
6486                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
6487                 return rc;
6488         }
6489
6490         bp->flags |= USING_MSIX_FLAG;
6491
6492         return 0;
6493 }
6494
6495 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6496 {
6497         int i, rc, offset = 1;
6498
6499         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6500                          bp->dev->name, bp->dev);
6501         if (rc) {
6502                 BNX2X_ERR("request sp irq failed\n");
6503                 return -EBUSY;
6504         }
6505
6506         for_each_queue(bp, i) {
6507                 struct bnx2x_fastpath *fp = &bp->fp[i];
6508
6509                 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
6510                 rc = request_irq(bp->msix_table[i + offset].vector,
6511                                  bnx2x_msix_fp_int, 0, fp->name, fp);
6512                 if (rc) {
6513                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
6514                         bnx2x_free_msix_irqs(bp);
6515                         return -EBUSY;
6516                 }
6517
6518                 fp->state = BNX2X_FP_STATE_IRQ;
6519         }
6520
6521         i = BNX2X_NUM_QUEUES(bp);
6522         if (is_multi(bp))
6523                 printk(KERN_INFO PFX
6524                        "%s: using MSI-X  IRQs: sp %d  fp %d - %d\n",
6525                        bp->dev->name, bp->msix_table[0].vector,
6526                        bp->msix_table[offset].vector,
6527                        bp->msix_table[offset + i - 1].vector);
6528         else
6529                 printk(KERN_INFO PFX "%s: using MSI-X  IRQs: sp %d  fp %d\n",
6530                        bp->dev->name, bp->msix_table[0].vector,
6531                        bp->msix_table[offset + i - 1].vector);
6532
6533         return 0;
6534 }
6535
6536 static int bnx2x_enable_msi(struct bnx2x *bp)
6537 {
6538         int rc;
6539
6540         rc = pci_enable_msi(bp->pdev);
6541         if (rc) {
6542                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6543                 return -1;
6544         }
6545         bp->flags |= USING_MSI_FLAG;
6546
6547         return 0;
6548 }
6549
6550 static int bnx2x_req_irq(struct bnx2x *bp)
6551 {
6552         unsigned long flags;
6553         int rc;
6554
6555         if (bp->flags & USING_MSI_FLAG)
6556                 flags = 0;
6557         else
6558                 flags = IRQF_SHARED;
6559
6560         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6561                          bp->dev->name, bp->dev);
6562         if (!rc)
6563                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6564
6565         return rc;
6566 }
6567
6568 static void bnx2x_napi_enable(struct bnx2x *bp)
6569 {
6570         int i;
6571
6572         for_each_rx_queue(bp, i)
6573                 napi_enable(&bnx2x_fp(bp, i, napi));
6574 }
6575
6576 static void bnx2x_napi_disable(struct bnx2x *bp)
6577 {
6578         int i;
6579
6580         for_each_rx_queue(bp, i)
6581                 napi_disable(&bnx2x_fp(bp, i, napi));
6582 }
6583
6584 static void bnx2x_netif_start(struct bnx2x *bp)
6585 {
6586         if (atomic_dec_and_test(&bp->intr_sem)) {
6587                 if (netif_running(bp->dev)) {
6588                         bnx2x_napi_enable(bp);
6589                         bnx2x_int_enable(bp);
6590                         if (bp->state == BNX2X_STATE_OPEN)
6591                                 netif_tx_wake_all_queues(bp->dev);
6592                 }
6593         }
6594 }
6595
6596 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6597 {
6598         bnx2x_int_disable_sync(bp, disable_hw);
6599         bnx2x_napi_disable(bp);
6600         if (netif_running(bp->dev)) {
6601                 netif_tx_disable(bp->dev);
6602                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6603         }
6604 }
6605
6606 /*
6607  * Init service functions
6608  */
6609
6610 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6611 {
6612         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6613         int port = BP_PORT(bp);
6614
6615         /* CAM allocation
6616          * unicasts 0-31:port0 32-63:port1
6617          * multicast 64-127:port0 128-191:port1
6618          */
6619         config->hdr.length = 2;
6620         config->hdr.offset = port ? 32 : 0;
6621         config->hdr.client_id = bp->fp->cl_id;
6622         config->hdr.reserved1 = 0;
6623
6624         /* primary MAC */
6625         config->config_table[0].cam_entry.msb_mac_addr =
6626                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6627         config->config_table[0].cam_entry.middle_mac_addr =
6628                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6629         config->config_table[0].cam_entry.lsb_mac_addr =
6630                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6631         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6632         if (set)
6633                 config->config_table[0].target_table_entry.flags = 0;
6634         else
6635                 CAM_INVALIDATE(config->config_table[0]);
6636         config->config_table[0].target_table_entry.client_id = 0;
6637         config->config_table[0].target_table_entry.vlan_id = 0;
6638
6639         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6640            (set ? "setting" : "clearing"),
6641            config->config_table[0].cam_entry.msb_mac_addr,
6642            config->config_table[0].cam_entry.middle_mac_addr,
6643            config->config_table[0].cam_entry.lsb_mac_addr);
6644
6645         /* broadcast */
6646         config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6647         config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6648         config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
6649         config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6650         if (set)
6651                 config->config_table[1].target_table_entry.flags =
6652                                 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6653         else
6654                 CAM_INVALIDATE(config->config_table[1]);
6655         config->config_table[1].target_table_entry.client_id = 0;
6656         config->config_table[1].target_table_entry.vlan_id = 0;
6657
6658         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6659                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6660                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6661 }
6662
6663 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6664 {
6665         struct mac_configuration_cmd_e1h *config =
6666                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6667
6668         if (set && (bp->state != BNX2X_STATE_OPEN)) {
6669                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6670                 return;
6671         }
6672
6673         /* CAM allocation for E1H
6674          * unicasts: by func number
6675          * multicast: 20+FUNC*20, 20 each
6676          */
6677         config->hdr.length = 1;
6678         config->hdr.offset = BP_FUNC(bp);
6679         config->hdr.client_id = bp->fp->cl_id;
6680         config->hdr.reserved1 = 0;
6681
6682         /* primary MAC */
6683         config->config_table[0].msb_mac_addr =
6684                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6685         config->config_table[0].middle_mac_addr =
6686                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6687         config->config_table[0].lsb_mac_addr =
6688                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6689         config->config_table[0].client_id = BP_L_ID(bp);
6690         config->config_table[0].vlan_id = 0;
6691         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6692         if (set)
6693                 config->config_table[0].flags = BP_PORT(bp);
6694         else
6695                 config->config_table[0].flags =
6696                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6697
6698         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID %d\n",
6699            (set ? "setting" : "clearing"),
6700            config->config_table[0].msb_mac_addr,
6701            config->config_table[0].middle_mac_addr,
6702            config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6703
6704         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6705                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6706                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6707 }
6708
6709 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6710                              int *state_p, int poll)
6711 {
6712         /* can take a while if any port is running */
6713         int cnt = 5000;
6714
6715         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6716            poll ? "polling" : "waiting", state, idx);
6717
6718         might_sleep();
6719         while (cnt--) {
6720                 if (poll) {
6721                         bnx2x_rx_int(bp->fp, 10);
6722                         /* if index is different from 0
6723                          * the reply for some commands will
6724                          * be on the non default queue
6725                          */
6726                         if (idx)
6727                                 bnx2x_rx_int(&bp->fp[idx], 10);
6728                 }
6729
6730                 mb(); /* state is changed by bnx2x_sp_event() */
6731                 if (*state_p == state) {
6732 #ifdef BNX2X_STOP_ON_ERROR
6733                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
6734 #endif
6735                         return 0;
6736                 }
6737
6738                 msleep(1);
6739         }
6740
6741         /* timeout! */
6742         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6743                   poll ? "polling" : "waiting", state, idx);
6744 #ifdef BNX2X_STOP_ON_ERROR
6745         bnx2x_panic();
6746 #endif
6747
6748         return -EBUSY;
6749 }
6750
6751 static int bnx2x_setup_leading(struct bnx2x *bp)
6752 {
6753         int rc;
6754
6755         /* reset IGU state */
6756         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6757
6758         /* SETUP ramrod */
6759         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6760
6761         /* Wait for completion */
6762         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6763
6764         return rc;
6765 }
6766
6767 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6768 {
6769         struct bnx2x_fastpath *fp = &bp->fp[index];
6770
6771         /* reset IGU state */
6772         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6773
6774         /* SETUP ramrod */
6775         fp->state = BNX2X_FP_STATE_OPENING;
6776         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6777                       fp->cl_id, 0);
6778
6779         /* Wait for completion */
6780         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6781                                  &(fp->state), 0);
6782 }
6783
6784 static int bnx2x_poll(struct napi_struct *napi, int budget);
6785
6786 static void bnx2x_set_int_mode(struct bnx2x *bp)
6787 {
6788         int num_queues;
6789
6790         switch (int_mode) {
6791         case INT_MODE_INTx:
6792         case INT_MODE_MSI:
6793                 num_queues = 1;
6794                 bp->num_rx_queues = num_queues;
6795                 bp->num_tx_queues = num_queues;
6796                 DP(NETIF_MSG_IFUP,
6797                    "set number of queues to %d\n", num_queues);
6798                 break;
6799
6800         case INT_MODE_MSIX:
6801         default:
6802                 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6803                         num_queues = min_t(u32, num_online_cpus(),
6804                                            BNX2X_MAX_QUEUES(bp));
6805                 else
6806                         num_queues = 1;
6807                 bp->num_rx_queues = num_queues;
6808                 bp->num_tx_queues = num_queues;
6809                 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6810                    "  number of tx queues to %d\n",
6811                    bp->num_rx_queues, bp->num_tx_queues);
6812                 /* if we can't use MSI-X we only need one fp,
6813                  * so try to enable MSI-X with the requested number of fp's
6814                  * and fallback to MSI or legacy INTx with one fp
6815                  */
6816                 if (bnx2x_enable_msix(bp)) {
6817                         /* failed to enable MSI-X */
6818                         num_queues = 1;
6819                         bp->num_rx_queues = num_queues;
6820                         bp->num_tx_queues = num_queues;
6821                         if (bp->multi_mode)
6822                                 BNX2X_ERR("Multi requested but failed to "
6823                                           "enable MSI-X  set number of "
6824                                           "queues to %d\n", num_queues);
6825                 }
6826                 break;
6827         }
6828         bp->dev->real_num_tx_queues = bp->num_tx_queues;
6829 }
6830
6831 static void bnx2x_set_rx_mode(struct net_device *dev);
6832
6833 /* must be called with rtnl_lock */
6834 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6835 {
6836         u32 load_code;
6837         int i, rc = 0;
6838 #ifdef BNX2X_STOP_ON_ERROR
6839         DP(NETIF_MSG_IFUP, "enter  load_mode %d\n", load_mode);
6840         if (unlikely(bp->panic))
6841                 return -EPERM;
6842 #endif
6843
6844         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6845
6846         bnx2x_set_int_mode(bp);
6847
6848         if (bnx2x_alloc_mem(bp))
6849                 return -ENOMEM;
6850
6851         for_each_rx_queue(bp, i)
6852                 bnx2x_fp(bp, i, disable_tpa) =
6853                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
6854
6855         for_each_rx_queue(bp, i)
6856                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6857                                bnx2x_poll, 128);
6858
6859 #ifdef BNX2X_STOP_ON_ERROR
6860         for_each_rx_queue(bp, i) {
6861                 struct bnx2x_fastpath *fp = &bp->fp[i];
6862
6863                 fp->poll_no_work = 0;
6864                 fp->poll_calls = 0;
6865                 fp->poll_max_calls = 0;
6866                 fp->poll_complete = 0;
6867                 fp->poll_exit = 0;
6868         }
6869 #endif
6870         bnx2x_napi_enable(bp);
6871
6872         if (bp->flags & USING_MSIX_FLAG) {
6873                 rc = bnx2x_req_msix_irqs(bp);
6874                 if (rc) {
6875                         pci_disable_msix(bp->pdev);
6876                         goto load_error1;
6877                 }
6878         } else {
6879                 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6880                         bnx2x_enable_msi(bp);
6881                 bnx2x_ack_int(bp);
6882                 rc = bnx2x_req_irq(bp);
6883                 if (rc) {
6884                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
6885                         if (bp->flags & USING_MSI_FLAG)
6886                                 pci_disable_msi(bp->pdev);
6887                         goto load_error1;
6888                 }
6889                 if (bp->flags & USING_MSI_FLAG) {
6890                         bp->dev->irq = bp->pdev->irq;
6891                         printk(KERN_INFO PFX "%s: using MSI  IRQ %d\n",
6892                                bp->dev->name, bp->pdev->irq);
6893                 }
6894         }
6895
6896         /* Send LOAD_REQUEST command to MCP
6897            Returns the type of LOAD command:
6898            if it is the first port to be initialized
6899            common blocks should be initialized, otherwise - not
6900         */
6901         if (!BP_NOMCP(bp)) {
6902                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6903                 if (!load_code) {
6904                         BNX2X_ERR("MCP response failure, aborting\n");
6905                         rc = -EBUSY;
6906                         goto load_error2;
6907                 }
6908                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6909                         rc = -EBUSY; /* other port in diagnostic mode */
6910                         goto load_error2;
6911                 }
6912
6913         } else {
6914                 int port = BP_PORT(bp);
6915
6916                 DP(NETIF_MSG_IFUP, "NO MCP - load counts      %d, %d, %d\n",
6917                    load_count[0], load_count[1], load_count[2]);
6918                 load_count[0]++;
6919                 load_count[1 + port]++;
6920                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts  %d, %d, %d\n",
6921                    load_count[0], load_count[1], load_count[2]);
6922                 if (load_count[0] == 1)
6923                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6924                 else if (load_count[1 + port] == 1)
6925                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6926                 else
6927                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6928         }
6929
6930         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6931             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6932                 bp->port.pmf = 1;
6933         else
6934                 bp->port.pmf = 0;
6935         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6936
6937         /* Initialize HW */
6938         rc = bnx2x_init_hw(bp, load_code);
6939         if (rc) {
6940                 BNX2X_ERR("HW init failed, aborting\n");
6941                 goto load_error2;
6942         }
6943
6944         /* Setup NIC internals and enable interrupts */
6945         bnx2x_nic_init(bp, load_code);
6946
6947         /* Send LOAD_DONE command to MCP */
6948         if (!BP_NOMCP(bp)) {
6949                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6950                 if (!load_code) {
6951                         BNX2X_ERR("MCP response failure, aborting\n");
6952                         rc = -EBUSY;
6953                         goto load_error3;
6954                 }
6955         }
6956
6957         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6958
6959         rc = bnx2x_setup_leading(bp);
6960         if (rc) {
6961                 BNX2X_ERR("Setup leading failed!\n");
6962                 goto load_error3;
6963         }
6964
6965         if (CHIP_IS_E1H(bp))
6966                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6967                         DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
6968                         bp->state = BNX2X_STATE_DISABLED;
6969                 }
6970
6971         if (bp->state == BNX2X_STATE_OPEN)
6972                 for_each_nondefault_queue(bp, i) {
6973                         rc = bnx2x_setup_multi(bp, i);
6974                         if (rc)
6975                                 goto load_error3;
6976                 }
6977
6978         if (CHIP_IS_E1(bp))
6979                 bnx2x_set_mac_addr_e1(bp, 1);
6980         else
6981                 bnx2x_set_mac_addr_e1h(bp, 1);
6982
6983         if (bp->port.pmf)
6984                 bnx2x_initial_phy_init(bp, load_mode);
6985
6986         /* Start fast path */
6987         switch (load_mode) {
6988         case LOAD_NORMAL:
6989                 /* Tx queue should be only reenabled */
6990                 netif_tx_wake_all_queues(bp->dev);
6991                 /* Initialize the receive filter. */
6992                 bnx2x_set_rx_mode(bp->dev);
6993                 break;
6994
6995         case LOAD_OPEN:
6996                 netif_tx_start_all_queues(bp->dev);
6997                 /* Initialize the receive filter. */
6998                 bnx2x_set_rx_mode(bp->dev);
6999                 break;
7000
7001         case LOAD_DIAG:
7002                 /* Initialize the receive filter. */
7003                 bnx2x_set_rx_mode(bp->dev);
7004                 bp->state = BNX2X_STATE_DIAG;
7005                 break;
7006
7007         default:
7008                 break;
7009         }
7010
7011         if (!bp->port.pmf)
7012                 bnx2x__link_status_update(bp);
7013
7014         /* start the timer */
7015         mod_timer(&bp->timer, jiffies + bp->current_interval);
7016
7017
7018         return 0;
7019
7020 load_error3:
7021         bnx2x_int_disable_sync(bp, 1);
7022         if (!BP_NOMCP(bp)) {
7023                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7024                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7025         }
7026         bp->port.pmf = 0;
7027         /* Free SKBs, SGEs, TPA pool and driver internals */
7028         bnx2x_free_skbs(bp);
7029         for_each_rx_queue(bp, i)
7030                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7031 load_error2:
7032         /* Release IRQs */
7033         bnx2x_free_irq(bp);
7034 load_error1:
7035         bnx2x_napi_disable(bp);
7036         for_each_rx_queue(bp, i)
7037                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7038         bnx2x_free_mem(bp);
7039
7040         return rc;
7041 }
7042
7043 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7044 {
7045         struct bnx2x_fastpath *fp = &bp->fp[index];
7046         int rc;
7047
7048         /* halt the connection */
7049         fp->state = BNX2X_FP_STATE_HALTING;
7050         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7051
7052         /* Wait for completion */
7053         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7054                                &(fp->state), 1);
7055         if (rc) /* timeout */
7056                 return rc;
7057
7058         /* delete cfc entry */
7059         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7060
7061         /* Wait for completion */
7062         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7063                                &(fp->state), 1);
7064         return rc;
7065 }
7066
7067 static int bnx2x_stop_leading(struct bnx2x *bp)
7068 {
7069         __le16 dsb_sp_prod_idx;
7070         /* if the other port is handling traffic,
7071            this can take a lot of time */
7072         int cnt = 500;
7073         int rc;
7074
7075         might_sleep();
7076
7077         /* Send HALT ramrod */
7078         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7079         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7080
7081         /* Wait for completion */
7082         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7083                                &(bp->fp[0].state), 1);
7084         if (rc) /* timeout */
7085                 return rc;
7086
7087         dsb_sp_prod_idx = *bp->dsb_sp_prod;
7088
7089         /* Send PORT_DELETE ramrod */
7090         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7091
7092         /* Wait for completion to arrive on default status block
7093            we are going to reset the chip anyway
7094            so there is not much to do if this times out
7095          */
7096         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7097                 if (!cnt) {
7098                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7099                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7100                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
7101 #ifdef BNX2X_STOP_ON_ERROR
7102                         bnx2x_panic();
7103 #endif
7104                         rc = -EBUSY;
7105                         break;
7106                 }
7107                 cnt--;
7108                 msleep(1);
7109                 rmb(); /* Refresh the dsb_sp_prod */
7110         }
7111         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7112         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7113
7114         return rc;
7115 }
7116
7117 static void bnx2x_reset_func(struct bnx2x *bp)
7118 {
7119         int port = BP_PORT(bp);
7120         int func = BP_FUNC(bp);
7121         int base, i;
7122
7123         /* Configure IGU */
7124         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7125         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7126
7127         /* Clear ILT */
7128         base = FUNC_ILT_BASE(func);
7129         for (i = base; i < base + ILT_PER_FUNC; i++)
7130                 bnx2x_ilt_wr(bp, i, 0);
7131 }
7132
7133 static void bnx2x_reset_port(struct bnx2x *bp)
7134 {
7135         int port = BP_PORT(bp);
7136         u32 val;
7137
7138         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7139
7140         /* Do not rcv packets to BRB */
7141         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7142         /* Do not direct rcv packets that are not for MCP to the BRB */
7143         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7144                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7145
7146         /* Configure AEU */
7147         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7148
7149         msleep(100);
7150         /* Check for BRB port occupancy */
7151         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7152         if (val)
7153                 DP(NETIF_MSG_IFDOWN,
7154                    "BRB1 is not empty  %d blocks are occupied\n", val);
7155
7156         /* TODO: Close Doorbell port? */
7157 }
7158
7159 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7160 {
7161         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
7162            BP_FUNC(bp), reset_code);
7163
7164         switch (reset_code) {
7165         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7166                 bnx2x_reset_port(bp);
7167                 bnx2x_reset_func(bp);
7168                 bnx2x_reset_common(bp);
7169                 break;
7170
7171         case FW_MSG_CODE_DRV_UNLOAD_PORT:
7172                 bnx2x_reset_port(bp);
7173                 bnx2x_reset_func(bp);
7174                 break;
7175
7176         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7177                 bnx2x_reset_func(bp);
7178                 break;
7179
7180         default:
7181                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7182                 break;
7183         }
7184 }
7185
7186 /* must be called with rtnl_lock */
7187 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7188 {
7189         int port = BP_PORT(bp);
7190         u32 reset_code = 0;
7191         int i, cnt, rc;
7192
7193         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7194
7195         bp->rx_mode = BNX2X_RX_MODE_NONE;
7196         bnx2x_set_storm_rx_mode(bp);
7197
7198         bnx2x_netif_stop(bp, 1);
7199
7200         del_timer_sync(&bp->timer);
7201         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7202                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7203         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7204
7205         /* Release IRQs */
7206         bnx2x_free_irq(bp);
7207
7208         /* Wait until tx fastpath tasks complete */
7209         for_each_tx_queue(bp, i) {
7210                 struct bnx2x_fastpath *fp = &bp->fp[i];
7211
7212                 cnt = 1000;
7213                 while (bnx2x_has_tx_work_unload(fp)) {
7214
7215                         bnx2x_tx_int(fp, 1000);
7216                         if (!cnt) {
7217                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
7218                                           i);
7219 #ifdef BNX2X_STOP_ON_ERROR
7220                                 bnx2x_panic();
7221                                 return -EBUSY;
7222 #else
7223                                 break;
7224 #endif
7225                         }
7226                         cnt--;
7227                         msleep(1);
7228                 }
7229         }
7230         /* Give HW time to discard old tx messages */
7231         msleep(1);
7232
7233         if (CHIP_IS_E1(bp)) {
7234                 struct mac_configuration_cmd *config =
7235                                                 bnx2x_sp(bp, mcast_config);
7236
7237                 bnx2x_set_mac_addr_e1(bp, 0);
7238
7239                 for (i = 0; i < config->hdr.length; i++)
7240                         CAM_INVALIDATE(config->config_table[i]);
7241
7242                 config->hdr.length = i;
7243                 if (CHIP_REV_IS_SLOW(bp))
7244                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7245                 else
7246                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7247                 config->hdr.client_id = bp->fp->cl_id;
7248                 config->hdr.reserved1 = 0;
7249
7250                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7251                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7252                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7253
7254         } else { /* E1H */
7255                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7256
7257                 bnx2x_set_mac_addr_e1h(bp, 0);
7258
7259                 for (i = 0; i < MC_HASH_SIZE; i++)
7260                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7261         }
7262
7263         if (unload_mode == UNLOAD_NORMAL)
7264                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7265
7266         else if (bp->flags & NO_WOL_FLAG) {
7267                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7268                 if (CHIP_IS_E1H(bp))
7269                         REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7270
7271         } else if (bp->wol) {
7272                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7273                 u8 *mac_addr = bp->dev->dev_addr;
7274                 u32 val;
7275                 /* The mac address is written to entries 1-4 to
7276                    preserve entry 0 which is used by the PMF */
7277                 u8 entry = (BP_E1HVN(bp) + 1)*8;
7278
7279                 val = (mac_addr[0] << 8) | mac_addr[1];
7280                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7281
7282                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7283                       (mac_addr[4] << 8) | mac_addr[5];
7284                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7285
7286                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7287
7288         } else
7289                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7290
7291         /* Close multi and leading connections
7292            Completions for ramrods are collected in a synchronous way */
7293         for_each_nondefault_queue(bp, i)
7294                 if (bnx2x_stop_multi(bp, i))
7295                         goto unload_error;
7296
7297         rc = bnx2x_stop_leading(bp);
7298         if (rc) {
7299                 BNX2X_ERR("Stop leading failed!\n");
7300 #ifdef BNX2X_STOP_ON_ERROR
7301                 return -EBUSY;
7302 #else
7303                 goto unload_error;
7304 #endif
7305         }
7306
7307 unload_error:
7308         if (!BP_NOMCP(bp))
7309                 reset_code = bnx2x_fw_command(bp, reset_code);
7310         else {
7311                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
7312                    load_count[0], load_count[1], load_count[2]);
7313                 load_count[0]--;
7314                 load_count[1 + port]--;
7315                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
7316                    load_count[0], load_count[1], load_count[2]);
7317                 if (load_count[0] == 0)
7318                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7319                 else if (load_count[1 + port] == 0)
7320                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7321                 else
7322                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7323         }
7324
7325         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7326             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7327                 bnx2x__link_reset(bp);
7328
7329         /* Reset the chip */
7330         bnx2x_reset_chip(bp, reset_code);
7331
7332         /* Report UNLOAD_DONE to MCP */
7333         if (!BP_NOMCP(bp))
7334                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7335
7336         bp->port.pmf = 0;
7337
7338         /* Free SKBs, SGEs, TPA pool and driver internals */
7339         bnx2x_free_skbs(bp);
7340         for_each_rx_queue(bp, i)
7341                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7342         for_each_rx_queue(bp, i)
7343                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7344         bnx2x_free_mem(bp);
7345
7346         bp->state = BNX2X_STATE_CLOSED;
7347
7348         netif_carrier_off(bp->dev);
7349
7350         return 0;
7351 }
7352
7353 static void bnx2x_reset_task(struct work_struct *work)
7354 {
7355         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7356
7357 #ifdef BNX2X_STOP_ON_ERROR
7358         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7359                   " so reset not done to allow debug dump,\n"
7360          KERN_ERR " you will need to reboot when done\n");
7361         return;
7362 #endif
7363
7364         rtnl_lock();
7365
7366         if (!netif_running(bp->dev))
7367                 goto reset_task_exit;
7368
7369         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7370         bnx2x_nic_load(bp, LOAD_NORMAL);
7371
7372 reset_task_exit:
7373         rtnl_unlock();
7374 }
7375
7376 /* end of nic load/unload */
7377
7378 /* ethtool_ops */
7379
7380 /*
7381  * Init service functions
7382  */
7383
7384 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7385 {
7386         switch (func) {
7387         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7388         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7389         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7390         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7391         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7392         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7393         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7394         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7395         default:
7396                 BNX2X_ERR("Unsupported function index: %d\n", func);
7397                 return (u32)(-1);
7398         }
7399 }
7400
7401 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7402 {
7403         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7404
7405         /* Flush all outstanding writes */
7406         mmiowb();
7407
7408         /* Pretend to be function 0 */
7409         REG_WR(bp, reg, 0);
7410         /* Flush the GRC transaction (in the chip) */
7411         new_val = REG_RD(bp, reg);
7412         if (new_val != 0) {
7413                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7414                           new_val);
7415                 BUG();
7416         }
7417
7418         /* From now we are in the "like-E1" mode */
7419         bnx2x_int_disable(bp);
7420
7421         /* Flush all outstanding writes */
7422         mmiowb();
7423
7424         /* Restore the original funtion settings */
7425         REG_WR(bp, reg, orig_func);
7426         new_val = REG_RD(bp, reg);
7427         if (new_val != orig_func) {
7428                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7429                           orig_func, new_val);
7430                 BUG();
7431         }
7432 }
7433
7434 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7435 {
7436         if (CHIP_IS_E1H(bp))
7437                 bnx2x_undi_int_disable_e1h(bp, func);
7438         else
7439                 bnx2x_int_disable(bp);
7440 }
7441
7442 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7443 {
7444         u32 val;
7445
7446         /* Check if there is any driver already loaded */
7447         val = REG_RD(bp, MISC_REG_UNPREPARED);
7448         if (val == 0x1) {
7449                 /* Check if it is the UNDI driver
7450                  * UNDI driver initializes CID offset for normal bell to 0x7
7451                  */
7452                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7453                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7454                 if (val == 0x7) {
7455                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7456                         /* save our func */
7457                         int func = BP_FUNC(bp);
7458                         u32 swap_en;
7459                         u32 swap_val;
7460
7461                         /* clear the UNDI indication */
7462                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7463
7464                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
7465
7466                         /* try unload UNDI on port 0 */
7467                         bp->func = 0;
7468                         bp->fw_seq =
7469                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7470                                 DRV_MSG_SEQ_NUMBER_MASK);
7471                         reset_code = bnx2x_fw_command(bp, reset_code);
7472
7473                         /* if UNDI is loaded on the other port */
7474                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7475
7476                                 /* send "DONE" for previous unload */
7477                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7478
7479                                 /* unload UNDI on port 1 */
7480                                 bp->func = 1;
7481                                 bp->fw_seq =
7482                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7483                                         DRV_MSG_SEQ_NUMBER_MASK);
7484                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7485
7486                                 bnx2x_fw_command(bp, reset_code);
7487                         }
7488
7489                         /* now it's safe to release the lock */
7490                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7491
7492                         bnx2x_undi_int_disable(bp, func);
7493
7494                         /* close input traffic and wait for it */
7495                         /* Do not rcv packets to BRB */
7496                         REG_WR(bp,
7497                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7498                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7499                         /* Do not direct rcv packets that are not for MCP to
7500                          * the BRB */
7501                         REG_WR(bp,
7502                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7503                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7504                         /* clear AEU */
7505                         REG_WR(bp,
7506                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7507                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7508                         msleep(10);
7509
7510                         /* save NIG port swap info */
7511                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7512                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7513                         /* reset device */
7514                         REG_WR(bp,
7515                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7516                                0xd3ffffff);
7517                         REG_WR(bp,
7518                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7519                                0x1403);
7520                         /* take the NIG out of reset and restore swap values */
7521                         REG_WR(bp,
7522                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7523                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
7524                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7525                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7526
7527                         /* send unload done to the MCP */
7528                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7529
7530                         /* restore our func and fw_seq */
7531                         bp->func = func;
7532                         bp->fw_seq =
7533                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7534                                 DRV_MSG_SEQ_NUMBER_MASK);
7535
7536                 } else
7537                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7538         }
7539 }
7540
7541 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7542 {
7543         u32 val, val2, val3, val4, id;
7544         u16 pmc;
7545
7546         /* Get the chip revision id and number. */
7547         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7548         val = REG_RD(bp, MISC_REG_CHIP_NUM);
7549         id = ((val & 0xffff) << 16);
7550         val = REG_RD(bp, MISC_REG_CHIP_REV);
7551         id |= ((val & 0xf) << 12);
7552         val = REG_RD(bp, MISC_REG_CHIP_METAL);
7553         id |= ((val & 0xff) << 4);
7554         val = REG_RD(bp, MISC_REG_BOND_ID);
7555         id |= (val & 0xf);
7556         bp->common.chip_id = id;
7557         bp->link_params.chip_id = bp->common.chip_id;
7558         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7559
7560         val = (REG_RD(bp, 0x2874) & 0x55);
7561         if ((bp->common.chip_id & 0x1) ||
7562             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7563                 bp->flags |= ONE_PORT_FLAG;
7564                 BNX2X_DEV_INFO("single port device\n");
7565         }
7566
7567         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7568         bp->common.flash_size = (NVRAM_1MB_SIZE <<
7569                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
7570         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7571                        bp->common.flash_size, bp->common.flash_size);
7572
7573         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7574         bp->link_params.shmem_base = bp->common.shmem_base;
7575         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7576
7577         if (!bp->common.shmem_base ||
7578             (bp->common.shmem_base < 0xA0000) ||
7579             (bp->common.shmem_base >= 0xC0000)) {
7580                 BNX2X_DEV_INFO("MCP not active\n");
7581                 bp->flags |= NO_MCP_FLAG;
7582                 return;
7583         }
7584
7585         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7586         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7587                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7588                 BNX2X_ERR("BAD MCP validity signature\n");
7589
7590         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7591         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7592
7593         bp->link_params.hw_led_mode = ((bp->common.hw_config &
7594                                         SHARED_HW_CFG_LED_MODE_MASK) >>
7595                                        SHARED_HW_CFG_LED_MODE_SHIFT);
7596
7597         bp->link_params.feature_config_flags = 0;
7598         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7599         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7600                 bp->link_params.feature_config_flags |=
7601                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7602         else
7603                 bp->link_params.feature_config_flags &=
7604                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7605
7606         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7607         bp->common.bc_ver = val;
7608         BNX2X_DEV_INFO("bc_ver %X\n", val);
7609         if (val < BNX2X_BC_VER) {
7610                 /* for now only warn
7611                  * later we might need to enforce this */
7612                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7613                           " please upgrade BC\n", BNX2X_BC_VER, val);
7614         }
7615
7616         if (BP_E1HVN(bp) == 0) {
7617                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7618                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7619         } else {
7620                 /* no WOL capability for E1HVN != 0 */
7621                 bp->flags |= NO_WOL_FLAG;
7622         }
7623         BNX2X_DEV_INFO("%sWoL capable\n",
7624                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
7625
7626         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7627         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7628         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7629         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7630
7631         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7632                val, val2, val3, val4);
7633 }
7634
7635 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7636                                                     u32 switch_cfg)
7637 {
7638         int port = BP_PORT(bp);
7639         u32 ext_phy_type;
7640
7641         switch (switch_cfg) {
7642         case SWITCH_CFG_1G:
7643                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7644
7645                 ext_phy_type =
7646                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7647                 switch (ext_phy_type) {
7648                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7649                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7650                                        ext_phy_type);
7651
7652                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7653                                                SUPPORTED_10baseT_Full |
7654                                                SUPPORTED_100baseT_Half |
7655                                                SUPPORTED_100baseT_Full |
7656                                                SUPPORTED_1000baseT_Full |
7657                                                SUPPORTED_2500baseX_Full |
7658                                                SUPPORTED_TP |
7659                                                SUPPORTED_FIBRE |
7660                                                SUPPORTED_Autoneg |
7661                                                SUPPORTED_Pause |
7662                                                SUPPORTED_Asym_Pause);
7663                         break;
7664
7665                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7666                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7667                                        ext_phy_type);
7668
7669                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7670                                                SUPPORTED_10baseT_Full |
7671                                                SUPPORTED_100baseT_Half |
7672                                                SUPPORTED_100baseT_Full |
7673                                                SUPPORTED_1000baseT_Full |
7674                                                SUPPORTED_TP |
7675                                                SUPPORTED_FIBRE |
7676                                                SUPPORTED_Autoneg |
7677                                                SUPPORTED_Pause |
7678                                                SUPPORTED_Asym_Pause);
7679                         break;
7680
7681                 default:
7682                         BNX2X_ERR("NVRAM config error. "
7683                                   "BAD SerDes ext_phy_config 0x%x\n",
7684                                   bp->link_params.ext_phy_config);
7685                         return;
7686                 }
7687
7688                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7689                                            port*0x10);
7690                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7691                 break;
7692
7693         case SWITCH_CFG_10G:
7694                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7695
7696                 ext_phy_type =
7697                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7698                 switch (ext_phy_type) {
7699                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7700                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7701                                        ext_phy_type);
7702
7703                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7704                                                SUPPORTED_10baseT_Full |
7705                                                SUPPORTED_100baseT_Half |
7706                                                SUPPORTED_100baseT_Full |
7707                                                SUPPORTED_1000baseT_Full |
7708                                                SUPPORTED_2500baseX_Full |
7709                                                SUPPORTED_10000baseT_Full |
7710                                                SUPPORTED_TP |
7711                                                SUPPORTED_FIBRE |
7712                                                SUPPORTED_Autoneg |
7713                                                SUPPORTED_Pause |
7714                                                SUPPORTED_Asym_Pause);
7715                         break;
7716
7717                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7718                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7719                                        ext_phy_type);
7720
7721                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7722                                                SUPPORTED_1000baseT_Full |
7723                                                SUPPORTED_FIBRE |
7724                                                SUPPORTED_Autoneg |
7725                                                SUPPORTED_Pause |
7726                                                SUPPORTED_Asym_Pause);
7727                         break;
7728
7729                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7730                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7731                                        ext_phy_type);
7732
7733                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7734                                                SUPPORTED_2500baseX_Full |
7735                                                SUPPORTED_1000baseT_Full |
7736                                                SUPPORTED_FIBRE |
7737                                                SUPPORTED_Autoneg |
7738                                                SUPPORTED_Pause |
7739                                                SUPPORTED_Asym_Pause);
7740                         break;
7741
7742                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7743                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7744                                        ext_phy_type);
7745
7746                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7747                                                SUPPORTED_FIBRE |
7748                                                SUPPORTED_Pause |
7749                                                SUPPORTED_Asym_Pause);
7750                         break;
7751
7752                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7753                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7754                                        ext_phy_type);
7755
7756                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7757                                                SUPPORTED_1000baseT_Full |
7758                                                SUPPORTED_FIBRE |
7759                                                SUPPORTED_Pause |
7760                                                SUPPORTED_Asym_Pause);
7761                         break;
7762
7763                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7764                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
7765                                        ext_phy_type);
7766
7767                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7768                                                SUPPORTED_1000baseT_Full |
7769                                                SUPPORTED_Autoneg |
7770                                                SUPPORTED_FIBRE |
7771                                                SUPPORTED_Pause |
7772                                                SUPPORTED_Asym_Pause);
7773                         break;
7774
7775                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7776                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7777                                        ext_phy_type);
7778
7779                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7780                                                SUPPORTED_TP |
7781                                                SUPPORTED_Autoneg |
7782                                                SUPPORTED_Pause |
7783                                                SUPPORTED_Asym_Pause);
7784                         break;
7785
7786                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7787                         BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7788                                        ext_phy_type);
7789
7790                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7791                                                SUPPORTED_10baseT_Full |
7792                                                SUPPORTED_100baseT_Half |
7793                                                SUPPORTED_100baseT_Full |
7794                                                SUPPORTED_1000baseT_Full |
7795                                                SUPPORTED_10000baseT_Full |
7796                                                SUPPORTED_TP |
7797                                                SUPPORTED_Autoneg |
7798                                                SUPPORTED_Pause |
7799                                                SUPPORTED_Asym_Pause);
7800                         break;
7801
7802                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7803                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7804                                   bp->link_params.ext_phy_config);
7805                         break;
7806
7807                 default:
7808                         BNX2X_ERR("NVRAM config error. "
7809                                   "BAD XGXS ext_phy_config 0x%x\n",
7810                                   bp->link_params.ext_phy_config);
7811                         return;
7812                 }
7813
7814                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7815                                            port*0x18);
7816                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7817
7818                 break;
7819
7820         default:
7821                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7822                           bp->port.link_config);
7823                 return;
7824         }
7825         bp->link_params.phy_addr = bp->port.phy_addr;
7826
7827         /* mask what we support according to speed_cap_mask */
7828         if (!(bp->link_params.speed_cap_mask &
7829                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7830                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7831
7832         if (!(bp->link_params.speed_cap_mask &
7833                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7834                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7835
7836         if (!(bp->link_params.speed_cap_mask &
7837                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7838                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7839
7840         if (!(bp->link_params.speed_cap_mask &
7841                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7842                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7843
7844         if (!(bp->link_params.speed_cap_mask &
7845                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7846                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7847                                         SUPPORTED_1000baseT_Full);
7848
7849         if (!(bp->link_params.speed_cap_mask &
7850                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7851                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7852
7853         if (!(bp->link_params.speed_cap_mask &
7854                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7855                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7856
7857         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7858 }
7859
7860 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7861 {
7862         bp->link_params.req_duplex = DUPLEX_FULL;
7863
7864         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7865         case PORT_FEATURE_LINK_SPEED_AUTO:
7866                 if (bp->port.supported & SUPPORTED_Autoneg) {
7867                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7868                         bp->port.advertising = bp->port.supported;
7869                 } else {
7870                         u32 ext_phy_type =
7871                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7872
7873                         if ((ext_phy_type ==
7874                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7875                             (ext_phy_type ==
7876                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7877                                 /* force 10G, no AN */
7878                                 bp->link_params.req_line_speed = SPEED_10000;
7879                                 bp->port.advertising =
7880                                                 (ADVERTISED_10000baseT_Full |
7881                                                  ADVERTISED_FIBRE);
7882                                 break;
7883                         }
7884                         BNX2X_ERR("NVRAM config error. "
7885                                   "Invalid link_config 0x%x"
7886                                   "  Autoneg not supported\n",
7887                                   bp->port.link_config);
7888                         return;
7889                 }
7890                 break;
7891
7892         case PORT_FEATURE_LINK_SPEED_10M_FULL:
7893                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7894                         bp->link_params.req_line_speed = SPEED_10;
7895                         bp->port.advertising = (ADVERTISED_10baseT_Full |
7896                                                 ADVERTISED_TP);
7897                 } else {
7898                         BNX2X_ERR("NVRAM config error. "
7899                                   "Invalid link_config 0x%x"
7900                                   "  speed_cap_mask 0x%x\n",
7901                                   bp->port.link_config,
7902                                   bp->link_params.speed_cap_mask);
7903                         return;
7904                 }
7905                 break;
7906
7907         case PORT_FEATURE_LINK_SPEED_10M_HALF:
7908                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7909                         bp->link_params.req_line_speed = SPEED_10;
7910                         bp->link_params.req_duplex = DUPLEX_HALF;
7911                         bp->port.advertising = (ADVERTISED_10baseT_Half |
7912                                                 ADVERTISED_TP);
7913                 } else {
7914                         BNX2X_ERR("NVRAM config error. "
7915                                   "Invalid link_config 0x%x"
7916                                   "  speed_cap_mask 0x%x\n",
7917                                   bp->port.link_config,
7918                                   bp->link_params.speed_cap_mask);
7919                         return;
7920                 }
7921                 break;
7922
7923         case PORT_FEATURE_LINK_SPEED_100M_FULL:
7924                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7925                         bp->link_params.req_line_speed = SPEED_100;
7926                         bp->port.advertising = (ADVERTISED_100baseT_Full |
7927                                                 ADVERTISED_TP);
7928                 } else {
7929                         BNX2X_ERR("NVRAM config error. "
7930                                   "Invalid link_config 0x%x"
7931                                   "  speed_cap_mask 0x%x\n",
7932                                   bp->port.link_config,
7933                                   bp->link_params.speed_cap_mask);
7934                         return;
7935                 }
7936                 break;
7937
7938         case PORT_FEATURE_LINK_SPEED_100M_HALF:
7939                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7940                         bp->link_params.req_line_speed = SPEED_100;
7941                         bp->link_params.req_duplex = DUPLEX_HALF;
7942                         bp->port.advertising = (ADVERTISED_100baseT_Half |
7943                                                 ADVERTISED_TP);
7944                 } else {
7945                         BNX2X_ERR("NVRAM config error. "
7946                                   "Invalid link_config 0x%x"
7947                                   "  speed_cap_mask 0x%x\n",
7948                                   bp->port.link_config,
7949                                   bp->link_params.speed_cap_mask);
7950                         return;
7951                 }
7952                 break;
7953
7954         case PORT_FEATURE_LINK_SPEED_1G:
7955                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7956                         bp->link_params.req_line_speed = SPEED_1000;
7957                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
7958                                                 ADVERTISED_TP);
7959                 } else {
7960                         BNX2X_ERR("NVRAM config error. "
7961                                   "Invalid link_config 0x%x"
7962                                   "  speed_cap_mask 0x%x\n",
7963                                   bp->port.link_config,
7964                                   bp->link_params.speed_cap_mask);
7965                         return;
7966                 }
7967                 break;
7968
7969         case PORT_FEATURE_LINK_SPEED_2_5G:
7970                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7971                         bp->link_params.req_line_speed = SPEED_2500;
7972                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
7973                                                 ADVERTISED_TP);
7974                 } else {
7975                         BNX2X_ERR("NVRAM config error. "
7976                                   "Invalid link_config 0x%x"
7977                                   "  speed_cap_mask 0x%x\n",
7978                                   bp->port.link_config,
7979                                   bp->link_params.speed_cap_mask);
7980                         return;
7981                 }
7982                 break;
7983
7984         case PORT_FEATURE_LINK_SPEED_10G_CX4:
7985         case PORT_FEATURE_LINK_SPEED_10G_KX4:
7986         case PORT_FEATURE_LINK_SPEED_10G_KR:
7987                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7988                         bp->link_params.req_line_speed = SPEED_10000;
7989                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
7990                                                 ADVERTISED_FIBRE);
7991                 } else {
7992                         BNX2X_ERR("NVRAM config error. "
7993                                   "Invalid link_config 0x%x"
7994                                   "  speed_cap_mask 0x%x\n",
7995                                   bp->port.link_config,
7996                                   bp->link_params.speed_cap_mask);
7997                         return;
7998                 }
7999                 break;
8000
8001         default:
8002                 BNX2X_ERR("NVRAM config error. "
8003                           "BAD link speed link_config 0x%x\n",
8004                           bp->port.link_config);
8005                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8006                 bp->port.advertising = bp->port.supported;
8007                 break;
8008         }
8009
8010         bp->link_params.req_flow_ctrl = (bp->port.link_config &
8011                                          PORT_FEATURE_FLOW_CONTROL_MASK);
8012         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8013             !(bp->port.supported & SUPPORTED_Autoneg))
8014                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8015
8016         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
8017                        "  advertising 0x%x\n",
8018                        bp->link_params.req_line_speed,
8019                        bp->link_params.req_duplex,
8020                        bp->link_params.req_flow_ctrl, bp->port.advertising);
8021 }
8022
8023 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8024 {
8025         int port = BP_PORT(bp);
8026         u32 val, val2;
8027         u32 config;
8028         u16 i;
8029
8030         bp->link_params.bp = bp;
8031         bp->link_params.port = port;
8032
8033         bp->link_params.lane_config =
8034                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8035         bp->link_params.ext_phy_config =
8036                 SHMEM_RD(bp,
8037                          dev_info.port_hw_config[port].external_phy_config);
8038         bp->link_params.speed_cap_mask =
8039                 SHMEM_RD(bp,
8040                          dev_info.port_hw_config[port].speed_capability_mask);
8041
8042         bp->port.link_config =
8043                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8044
8045         /* Get the 4 lanes xgxs config rx and tx */
8046         for (i = 0; i < 2; i++) {
8047                 val = SHMEM_RD(bp,
8048                            dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8049                 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8050                 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8051
8052                 val = SHMEM_RD(bp,
8053                            dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8054                 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8055                 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8056         }
8057
8058         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8059         if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
8060                 bp->link_params.feature_config_flags |=
8061                                 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8062         else
8063                 bp->link_params.feature_config_flags &=
8064                                 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8065
8066         /* If the device is capable of WoL, set the default state according
8067          * to the HW
8068          */
8069         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8070                    (config & PORT_FEATURE_WOL_ENABLED));
8071
8072         BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
8073                        "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
8074                        bp->link_params.lane_config,
8075                        bp->link_params.ext_phy_config,
8076                        bp->link_params.speed_cap_mask, bp->port.link_config);
8077
8078         bp->link_params.switch_cfg = (bp->port.link_config &
8079                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
8080         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8081
8082         bnx2x_link_settings_requested(bp);
8083
8084         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8085         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8086         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8087         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8088         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8089         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8090         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8091         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8092         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8093         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8094 }
8095
8096 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8097 {
8098         int func = BP_FUNC(bp);
8099         u32 val, val2;
8100         int rc = 0;
8101
8102         bnx2x_get_common_hwinfo(bp);
8103
8104         bp->e1hov = 0;
8105         bp->e1hmf = 0;
8106         if (CHIP_IS_E1H(bp)) {
8107                 bp->mf_config =
8108                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8109
8110                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8111                        FUNC_MF_CFG_E1HOV_TAG_MASK);
8112                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8113
8114                         bp->e1hov = val;
8115                         bp->e1hmf = 1;
8116                         BNX2X_DEV_INFO("MF mode  E1HOV for func %d is %d "
8117                                        "(0x%04x)\n",
8118                                        func, bp->e1hov, bp->e1hov);
8119                 } else {
8120                         BNX2X_DEV_INFO("single function mode\n");
8121                         if (BP_E1HVN(bp)) {
8122                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
8123                                           "  aborting\n", func);
8124                                 rc = -EPERM;
8125                         }
8126                 }
8127         }
8128
8129         if (!BP_NOMCP(bp)) {
8130                 bnx2x_get_port_hwinfo(bp);
8131
8132                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8133                               DRV_MSG_SEQ_NUMBER_MASK);
8134                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8135         }
8136
8137         if (IS_E1HMF(bp)) {
8138                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8139                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
8140                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8141                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8142                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8143                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8144                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8145                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8146                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8147                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8148                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8149                                ETH_ALEN);
8150                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8151                                ETH_ALEN);
8152                 }
8153
8154                 return rc;
8155         }
8156
8157         if (BP_NOMCP(bp)) {
8158                 /* only supposed to happen on emulation/FPGA */
8159                 BNX2X_ERR("warning random MAC workaround active\n");
8160                 random_ether_addr(bp->dev->dev_addr);
8161                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8162         }
8163
8164         return rc;
8165 }
8166
8167 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8168 {
8169         int func = BP_FUNC(bp);
8170         int timer_interval;
8171         int rc;
8172
8173         /* Disable interrupt handling until HW is initialized */
8174         atomic_set(&bp->intr_sem, 1);
8175
8176         mutex_init(&bp->port.phy_mutex);
8177
8178         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8179         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8180
8181         rc = bnx2x_get_hwinfo(bp);
8182
8183         /* need to reset chip if undi was active */
8184         if (!BP_NOMCP(bp))
8185                 bnx2x_undi_unload(bp);
8186
8187         if (CHIP_REV_IS_FPGA(bp))
8188                 printk(KERN_ERR PFX "FPGA detected\n");
8189
8190         if (BP_NOMCP(bp) && (func == 0))
8191                 printk(KERN_ERR PFX
8192                        "MCP disabled, must load devices in order!\n");
8193
8194         /* Set multi queue mode */
8195         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8196             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8197                 printk(KERN_ERR PFX
8198                       "Multi disabled since int_mode requested is not MSI-X\n");
8199                 multi_mode = ETH_RSS_MODE_DISABLED;
8200         }
8201         bp->multi_mode = multi_mode;
8202
8203
8204         /* Set TPA flags */
8205         if (disable_tpa) {
8206                 bp->flags &= ~TPA_ENABLE_FLAG;
8207                 bp->dev->features &= ~NETIF_F_LRO;
8208         } else {
8209                 bp->flags |= TPA_ENABLE_FLAG;
8210                 bp->dev->features |= NETIF_F_LRO;
8211         }
8212
8213         bp->mrrs = mrrs;
8214
8215         bp->tx_ring_size = MAX_TX_AVAIL;
8216         bp->rx_ring_size = MAX_RX_AVAIL;
8217
8218         bp->rx_csum = 1;
8219
8220         bp->tx_ticks = 50;
8221         bp->rx_ticks = 25;
8222
8223         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8224         bp->current_interval = (poll ? poll : timer_interval);
8225
8226         init_timer(&bp->timer);
8227         bp->timer.expires = jiffies + bp->current_interval;
8228         bp->timer.data = (unsigned long) bp;
8229         bp->timer.function = bnx2x_timer;
8230
8231         return rc;
8232 }
8233
8234 /*
8235  * ethtool service functions
8236  */
8237
8238 /* All ethtool functions called with rtnl_lock */
8239
8240 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8241 {
8242         struct bnx2x *bp = netdev_priv(dev);
8243
8244         cmd->supported = bp->port.supported;
8245         cmd->advertising = bp->port.advertising;
8246
8247         if (netif_carrier_ok(dev)) {
8248                 cmd->speed = bp->link_vars.line_speed;
8249                 cmd->duplex = bp->link_vars.duplex;
8250         } else {
8251                 cmd->speed = bp->link_params.req_line_speed;
8252                 cmd->duplex = bp->link_params.req_duplex;
8253         }
8254         if (IS_E1HMF(bp)) {
8255                 u16 vn_max_rate;
8256
8257                 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8258                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8259                 if (vn_max_rate < cmd->speed)
8260                         cmd->speed = vn_max_rate;
8261         }
8262
8263         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8264                 u32 ext_phy_type =
8265                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8266
8267                 switch (ext_phy_type) {
8268                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8269                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8270                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8271                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8272                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8273                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8274                         cmd->port = PORT_FIBRE;
8275                         break;
8276
8277                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8278                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8279                         cmd->port = PORT_TP;
8280                         break;
8281
8282                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8283                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8284                                   bp->link_params.ext_phy_config);
8285                         break;
8286
8287                 default:
8288                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8289                            bp->link_params.ext_phy_config);
8290                         break;
8291                 }
8292         } else
8293                 cmd->port = PORT_TP;
8294
8295         cmd->phy_address = bp->port.phy_addr;
8296         cmd->transceiver = XCVR_INTERNAL;
8297
8298         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8299                 cmd->autoneg = AUTONEG_ENABLE;
8300         else
8301                 cmd->autoneg = AUTONEG_DISABLE;
8302
8303         cmd->maxtxpkt = 0;
8304         cmd->maxrxpkt = 0;
8305
8306         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8307            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
8308            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
8309            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
8310            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8311            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8312            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8313
8314         return 0;
8315 }
8316
8317 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8318 {
8319         struct bnx2x *bp = netdev_priv(dev);
8320         u32 advertising;
8321
8322         if (IS_E1HMF(bp))
8323                 return 0;
8324
8325         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8326            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
8327            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
8328            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
8329            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8330            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8331            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8332
8333         if (cmd->autoneg == AUTONEG_ENABLE) {
8334                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8335                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8336                         return -EINVAL;
8337                 }
8338
8339                 /* advertise the requested speed and duplex if supported */
8340                 cmd->advertising &= bp->port.supported;
8341
8342                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8343                 bp->link_params.req_duplex = DUPLEX_FULL;
8344                 bp->port.advertising |= (ADVERTISED_Autoneg |
8345                                          cmd->advertising);
8346
8347         } else { /* forced speed */
8348                 /* advertise the requested speed and duplex if supported */
8349                 switch (cmd->speed) {
8350                 case SPEED_10:
8351                         if (cmd->duplex == DUPLEX_FULL) {
8352                                 if (!(bp->port.supported &
8353                                       SUPPORTED_10baseT_Full)) {
8354                                         DP(NETIF_MSG_LINK,
8355                                            "10M full not supported\n");
8356                                         return -EINVAL;
8357                                 }
8358
8359                                 advertising = (ADVERTISED_10baseT_Full |
8360                                                ADVERTISED_TP);
8361                         } else {
8362                                 if (!(bp->port.supported &
8363                                       SUPPORTED_10baseT_Half)) {
8364                                         DP(NETIF_MSG_LINK,
8365                                            "10M half not supported\n");
8366                                         return -EINVAL;
8367                                 }
8368
8369                                 advertising = (ADVERTISED_10baseT_Half |
8370                                                ADVERTISED_TP);
8371                         }
8372                         break;
8373
8374                 case SPEED_100:
8375                         if (cmd->duplex == DUPLEX_FULL) {
8376                                 if (!(bp->port.supported &
8377                                                 SUPPORTED_100baseT_Full)) {
8378                                         DP(NETIF_MSG_LINK,
8379                                            "100M full not supported\n");
8380                                         return -EINVAL;
8381                                 }
8382
8383                                 advertising = (ADVERTISED_100baseT_Full |
8384                                                ADVERTISED_TP);
8385                         } else {
8386                                 if (!(bp->port.supported &
8387                                                 SUPPORTED_100baseT_Half)) {
8388                                         DP(NETIF_MSG_LINK,
8389                                            "100M half not supported\n");
8390                                         return -EINVAL;
8391                                 }
8392
8393                                 advertising = (ADVERTISED_100baseT_Half |
8394                                                ADVERTISED_TP);
8395                         }
8396                         break;
8397
8398                 case SPEED_1000:
8399                         if (cmd->duplex != DUPLEX_FULL) {
8400                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
8401                                 return -EINVAL;
8402                         }
8403
8404                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8405                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
8406                                 return -EINVAL;
8407                         }
8408
8409                         advertising = (ADVERTISED_1000baseT_Full |
8410                                        ADVERTISED_TP);
8411                         break;
8412
8413                 case SPEED_2500:
8414                         if (cmd->duplex != DUPLEX_FULL) {
8415                                 DP(NETIF_MSG_LINK,
8416                                    "2.5G half not supported\n");
8417                                 return -EINVAL;
8418                         }
8419
8420                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8421                                 DP(NETIF_MSG_LINK,
8422                                    "2.5G full not supported\n");
8423                                 return -EINVAL;
8424                         }
8425
8426                         advertising = (ADVERTISED_2500baseX_Full |
8427                                        ADVERTISED_TP);
8428                         break;
8429
8430                 case SPEED_10000:
8431                         if (cmd->duplex != DUPLEX_FULL) {
8432                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
8433                                 return -EINVAL;
8434                         }
8435
8436                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8437                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
8438                                 return -EINVAL;
8439                         }
8440
8441                         advertising = (ADVERTISED_10000baseT_Full |
8442                                        ADVERTISED_FIBRE);
8443                         break;
8444
8445                 default:
8446                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
8447                         return -EINVAL;
8448                 }
8449
8450                 bp->link_params.req_line_speed = cmd->speed;
8451                 bp->link_params.req_duplex = cmd->duplex;
8452                 bp->port.advertising = advertising;
8453         }
8454
8455         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8456            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
8457            bp->link_params.req_line_speed, bp->link_params.req_duplex,
8458            bp->port.advertising);
8459
8460         if (netif_running(dev)) {
8461                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8462                 bnx2x_link_set(bp);
8463         }
8464
8465         return 0;
8466 }
8467
8468 #define PHY_FW_VER_LEN                  10
8469
8470 static void bnx2x_get_drvinfo(struct net_device *dev,
8471                               struct ethtool_drvinfo *info)
8472 {
8473         struct bnx2x *bp = netdev_priv(dev);
8474         u8 phy_fw_ver[PHY_FW_VER_LEN];
8475
8476         strcpy(info->driver, DRV_MODULE_NAME);
8477         strcpy(info->version, DRV_MODULE_VERSION);
8478
8479         phy_fw_ver[0] = '\0';
8480         if (bp->port.pmf) {
8481                 bnx2x_acquire_phy_lock(bp);
8482                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8483                                              (bp->state != BNX2X_STATE_CLOSED),
8484                                              phy_fw_ver, PHY_FW_VER_LEN);
8485                 bnx2x_release_phy_lock(bp);
8486         }
8487
8488         snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8489                  (bp->common.bc_ver & 0xff0000) >> 16,
8490                  (bp->common.bc_ver & 0xff00) >> 8,
8491                  (bp->common.bc_ver & 0xff),
8492                  ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
8493         strcpy(info->bus_info, pci_name(bp->pdev));
8494         info->n_stats = BNX2X_NUM_STATS;
8495         info->testinfo_len = BNX2X_NUM_TESTS;
8496         info->eedump_len = bp->common.flash_size;
8497         info->regdump_len = 0;
8498 }
8499
8500 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8501 {
8502         struct bnx2x *bp = netdev_priv(dev);
8503
8504         if (bp->flags & NO_WOL_FLAG) {
8505                 wol->supported = 0;
8506                 wol->wolopts = 0;
8507         } else {
8508                 wol->supported = WAKE_MAGIC;
8509                 if (bp->wol)
8510                         wol->wolopts = WAKE_MAGIC;
8511                 else
8512                         wol->wolopts = 0;
8513         }
8514         memset(&wol->sopass, 0, sizeof(wol->sopass));
8515 }
8516
8517 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8518 {
8519         struct bnx2x *bp = netdev_priv(dev);
8520
8521         if (wol->wolopts & ~WAKE_MAGIC)
8522                 return -EINVAL;
8523
8524         if (wol->wolopts & WAKE_MAGIC) {
8525                 if (bp->flags & NO_WOL_FLAG)
8526                         return -EINVAL;
8527
8528                 bp->wol = 1;
8529         } else
8530                 bp->wol = 0;
8531
8532         return 0;
8533 }
8534
8535 static u32 bnx2x_get_msglevel(struct net_device *dev)
8536 {
8537         struct bnx2x *bp = netdev_priv(dev);
8538
8539         return bp->msglevel;
8540 }
8541
8542 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8543 {
8544         struct bnx2x *bp = netdev_priv(dev);
8545
8546         if (capable(CAP_NET_ADMIN))
8547                 bp->msglevel = level;
8548 }
8549
8550 static int bnx2x_nway_reset(struct net_device *dev)
8551 {
8552         struct bnx2x *bp = netdev_priv(dev);
8553
8554         if (!bp->port.pmf)
8555                 return 0;
8556
8557         if (netif_running(dev)) {
8558                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8559                 bnx2x_link_set(bp);
8560         }
8561
8562         return 0;
8563 }
8564
8565 static int bnx2x_get_eeprom_len(struct net_device *dev)
8566 {
8567         struct bnx2x *bp = netdev_priv(dev);
8568
8569         return bp->common.flash_size;
8570 }
8571
8572 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8573 {
8574         int port = BP_PORT(bp);
8575         int count, i;
8576         u32 val = 0;
8577
8578         /* adjust timeout for emulation/FPGA */
8579         count = NVRAM_TIMEOUT_COUNT;
8580         if (CHIP_REV_IS_SLOW(bp))
8581                 count *= 100;
8582
8583         /* request access to nvram interface */
8584         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8585                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8586
8587         for (i = 0; i < count*10; i++) {
8588                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8589                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8590                         break;
8591
8592                 udelay(5);
8593         }
8594
8595         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8596                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
8597                 return -EBUSY;
8598         }
8599
8600         return 0;
8601 }
8602
8603 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8604 {
8605         int port = BP_PORT(bp);
8606         int count, i;
8607         u32 val = 0;
8608
8609         /* adjust timeout for emulation/FPGA */
8610         count = NVRAM_TIMEOUT_COUNT;
8611         if (CHIP_REV_IS_SLOW(bp))
8612                 count *= 100;
8613
8614         /* relinquish nvram interface */
8615         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8616                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8617
8618         for (i = 0; i < count*10; i++) {
8619                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8620                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8621                         break;
8622
8623                 udelay(5);
8624         }
8625
8626         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8627                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
8628                 return -EBUSY;
8629         }
8630
8631         return 0;
8632 }
8633
8634 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8635 {
8636         u32 val;
8637
8638         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8639
8640         /* enable both bits, even on read */
8641         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8642                (val | MCPR_NVM_ACCESS_ENABLE_EN |
8643                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
8644 }
8645
8646 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8647 {
8648         u32 val;
8649
8650         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8651
8652         /* disable both bits, even after read */
8653         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8654                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8655                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8656 }
8657
8658 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
8659                                   u32 cmd_flags)
8660 {
8661         int count, i, rc;
8662         u32 val;
8663
8664         /* build the command word */
8665         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8666
8667         /* need to clear DONE bit separately */
8668         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8669
8670         /* address of the NVRAM to read from */
8671         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8672                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8673
8674         /* issue a read command */
8675         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8676
8677         /* adjust timeout for emulation/FPGA */
8678         count = NVRAM_TIMEOUT_COUNT;
8679         if (CHIP_REV_IS_SLOW(bp))
8680                 count *= 100;
8681
8682         /* wait for completion */
8683         *ret_val = 0;
8684         rc = -EBUSY;
8685         for (i = 0; i < count; i++) {
8686                 udelay(5);
8687                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8688
8689                 if (val & MCPR_NVM_COMMAND_DONE) {
8690                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8691                         /* we read nvram data in cpu order
8692                          * but ethtool sees it as an array of bytes
8693                          * converting to big-endian will do the work */
8694                         *ret_val = cpu_to_be32(val);
8695                         rc = 0;
8696                         break;
8697                 }
8698         }
8699
8700         return rc;
8701 }
8702
8703 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8704                             int buf_size)
8705 {
8706         int rc;
8707         u32 cmd_flags;
8708         __be32 val;
8709
8710         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8711                 DP(BNX2X_MSG_NVM,
8712                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8713                    offset, buf_size);
8714                 return -EINVAL;
8715         }
8716
8717         if (offset + buf_size > bp->common.flash_size) {
8718                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8719                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8720                    offset, buf_size, bp->common.flash_size);
8721                 return -EINVAL;
8722         }
8723
8724         /* request access to nvram interface */
8725         rc = bnx2x_acquire_nvram_lock(bp);
8726         if (rc)
8727                 return rc;
8728
8729         /* enable access to nvram interface */
8730         bnx2x_enable_nvram_access(bp);
8731
8732         /* read the first word(s) */
8733         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8734         while ((buf_size > sizeof(u32)) && (rc == 0)) {
8735                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8736                 memcpy(ret_buf, &val, 4);
8737
8738                 /* advance to the next dword */
8739                 offset += sizeof(u32);
8740                 ret_buf += sizeof(u32);
8741                 buf_size -= sizeof(u32);
8742                 cmd_flags = 0;
8743         }
8744
8745         if (rc == 0) {
8746                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8747                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8748                 memcpy(ret_buf, &val, 4);
8749         }
8750
8751         /* disable access to nvram interface */
8752         bnx2x_disable_nvram_access(bp);
8753         bnx2x_release_nvram_lock(bp);
8754
8755         return rc;
8756 }
8757
8758 static int bnx2x_get_eeprom(struct net_device *dev,
8759                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8760 {
8761         struct bnx2x *bp = netdev_priv(dev);
8762         int rc;
8763
8764         if (!netif_running(dev))
8765                 return -EAGAIN;
8766
8767         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8768            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8769            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8770            eeprom->len, eeprom->len);
8771
8772         /* parameters already validated in ethtool_get_eeprom */
8773
8774         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8775
8776         return rc;
8777 }
8778
8779 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8780                                    u32 cmd_flags)
8781 {
8782         int count, i, rc;
8783
8784         /* build the command word */
8785         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8786
8787         /* need to clear DONE bit separately */
8788         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8789
8790         /* write the data */
8791         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8792
8793         /* address of the NVRAM to write to */
8794         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8795                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8796
8797         /* issue the write command */
8798         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8799
8800         /* adjust timeout for emulation/FPGA */
8801         count = NVRAM_TIMEOUT_COUNT;
8802         if (CHIP_REV_IS_SLOW(bp))
8803                 count *= 100;
8804
8805         /* wait for completion */
8806         rc = -EBUSY;
8807         for (i = 0; i < count; i++) {
8808                 udelay(5);
8809                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8810                 if (val & MCPR_NVM_COMMAND_DONE) {
8811                         rc = 0;
8812                         break;
8813                 }
8814         }
8815
8816         return rc;
8817 }
8818
8819 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
8820
8821 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8822                               int buf_size)
8823 {
8824         int rc;
8825         u32 cmd_flags;
8826         u32 align_offset;
8827         __be32 val;
8828
8829         if (offset + buf_size > bp->common.flash_size) {
8830                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8831                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8832                    offset, buf_size, bp->common.flash_size);
8833                 return -EINVAL;
8834         }
8835
8836         /* request access to nvram interface */
8837         rc = bnx2x_acquire_nvram_lock(bp);
8838         if (rc)
8839                 return rc;
8840
8841         /* enable access to nvram interface */
8842         bnx2x_enable_nvram_access(bp);
8843
8844         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8845         align_offset = (offset & ~0x03);
8846         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8847
8848         if (rc == 0) {
8849                 val &= ~(0xff << BYTE_OFFSET(offset));
8850                 val |= (*data_buf << BYTE_OFFSET(offset));
8851
8852                 /* nvram data is returned as an array of bytes
8853                  * convert it back to cpu order */
8854                 val = be32_to_cpu(val);
8855
8856                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8857                                              cmd_flags);
8858         }
8859
8860         /* disable access to nvram interface */
8861         bnx2x_disable_nvram_access(bp);
8862         bnx2x_release_nvram_lock(bp);
8863
8864         return rc;
8865 }
8866
8867 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8868                              int buf_size)
8869 {
8870         int rc;
8871         u32 cmd_flags;
8872         u32 val;
8873         u32 written_so_far;
8874
8875         if (buf_size == 1)      /* ethtool */
8876                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8877
8878         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8879                 DP(BNX2X_MSG_NVM,
8880                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8881                    offset, buf_size);
8882                 return -EINVAL;
8883         }
8884
8885         if (offset + buf_size > bp->common.flash_size) {
8886                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8887                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8888                    offset, buf_size, bp->common.flash_size);
8889                 return -EINVAL;
8890         }
8891
8892         /* request access to nvram interface */
8893         rc = bnx2x_acquire_nvram_lock(bp);
8894         if (rc)
8895                 return rc;
8896
8897         /* enable access to nvram interface */
8898         bnx2x_enable_nvram_access(bp);
8899
8900         written_so_far = 0;
8901         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8902         while ((written_so_far < buf_size) && (rc == 0)) {
8903                 if (written_so_far == (buf_size - sizeof(u32)))
8904                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8905                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8906                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8907                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8908                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8909
8910                 memcpy(&val, data_buf, 4);
8911
8912                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8913
8914                 /* advance to the next dword */
8915                 offset += sizeof(u32);
8916                 data_buf += sizeof(u32);
8917                 written_so_far += sizeof(u32);
8918                 cmd_flags = 0;
8919         }
8920
8921         /* disable access to nvram interface */
8922         bnx2x_disable_nvram_access(bp);
8923         bnx2x_release_nvram_lock(bp);
8924
8925         return rc;
8926 }
8927
8928 static int bnx2x_set_eeprom(struct net_device *dev,
8929                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8930 {
8931         struct bnx2x *bp = netdev_priv(dev);
8932         int rc;
8933
8934         if (!netif_running(dev))
8935                 return -EAGAIN;
8936
8937         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8938            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8939            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8940            eeprom->len, eeprom->len);
8941
8942         /* parameters already validated in ethtool_set_eeprom */
8943
8944         /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8945         if (eeprom->magic == 0x00504859)
8946                 if (bp->port.pmf) {
8947
8948                         bnx2x_acquire_phy_lock(bp);
8949                         rc = bnx2x_flash_download(bp, BP_PORT(bp),
8950                                              bp->link_params.ext_phy_config,
8951                                              (bp->state != BNX2X_STATE_CLOSED),
8952                                              eebuf, eeprom->len);
8953                         if ((bp->state == BNX2X_STATE_OPEN) ||
8954                             (bp->state == BNX2X_STATE_DISABLED)) {
8955                                 rc |= bnx2x_link_reset(&bp->link_params,
8956                                                        &bp->link_vars, 1);
8957                                 rc |= bnx2x_phy_init(&bp->link_params,
8958                                                      &bp->link_vars);
8959                         }
8960                         bnx2x_release_phy_lock(bp);
8961
8962                 } else /* Only the PMF can access the PHY */
8963                         return -EINVAL;
8964         else
8965                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8966
8967         return rc;
8968 }
8969
8970 static int bnx2x_get_coalesce(struct net_device *dev,
8971                               struct ethtool_coalesce *coal)
8972 {
8973         struct bnx2x *bp = netdev_priv(dev);
8974
8975         memset(coal, 0, sizeof(struct ethtool_coalesce));
8976
8977         coal->rx_coalesce_usecs = bp->rx_ticks;
8978         coal->tx_coalesce_usecs = bp->tx_ticks;
8979
8980         return 0;
8981 }
8982
8983 static int bnx2x_set_coalesce(struct net_device *dev,
8984                               struct ethtool_coalesce *coal)
8985 {
8986         struct bnx2x *bp = netdev_priv(dev);
8987
8988         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8989         if (bp->rx_ticks > 3000)
8990                 bp->rx_ticks = 3000;
8991
8992         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8993         if (bp->tx_ticks > 0x3000)
8994                 bp->tx_ticks = 0x3000;
8995
8996         if (netif_running(dev))
8997                 bnx2x_update_coalesce(bp);
8998
8999         return 0;
9000 }
9001
9002 static void bnx2x_get_ringparam(struct net_device *dev,
9003                                 struct ethtool_ringparam *ering)
9004 {
9005         struct bnx2x *bp = netdev_priv(dev);
9006
9007         ering->rx_max_pending = MAX_RX_AVAIL;
9008         ering->rx_mini_max_pending = 0;
9009         ering->rx_jumbo_max_pending = 0;
9010
9011         ering->rx_pending = bp->rx_ring_size;
9012         ering->rx_mini_pending = 0;
9013         ering->rx_jumbo_pending = 0;
9014
9015         ering->tx_max_pending = MAX_TX_AVAIL;
9016         ering->tx_pending = bp->tx_ring_size;
9017 }
9018
9019 static int bnx2x_set_ringparam(struct net_device *dev,
9020                                struct ethtool_ringparam *ering)
9021 {
9022         struct bnx2x *bp = netdev_priv(dev);
9023         int rc = 0;
9024
9025         if ((ering->rx_pending > MAX_RX_AVAIL) ||
9026             (ering->tx_pending > MAX_TX_AVAIL) ||
9027             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9028                 return -EINVAL;
9029
9030         bp->rx_ring_size = ering->rx_pending;
9031         bp->tx_ring_size = ering->tx_pending;
9032
9033         if (netif_running(dev)) {
9034                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9035                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9036         }
9037
9038         return rc;
9039 }
9040
9041 static void bnx2x_get_pauseparam(struct net_device *dev,
9042                                  struct ethtool_pauseparam *epause)
9043 {
9044         struct bnx2x *bp = netdev_priv(dev);
9045
9046         epause->autoneg = (bp->link_params.req_flow_ctrl ==
9047                            BNX2X_FLOW_CTRL_AUTO) &&
9048                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9049
9050         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9051                             BNX2X_FLOW_CTRL_RX);
9052         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9053                             BNX2X_FLOW_CTRL_TX);
9054
9055         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9056            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9057            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9058 }
9059
9060 static int bnx2x_set_pauseparam(struct net_device *dev,
9061                                 struct ethtool_pauseparam *epause)
9062 {
9063         struct bnx2x *bp = netdev_priv(dev);
9064
9065         if (IS_E1HMF(bp))
9066                 return 0;
9067
9068         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9069            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9070            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9071
9072         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9073
9074         if (epause->rx_pause)
9075                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9076
9077         if (epause->tx_pause)
9078                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9079
9080         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9081                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9082
9083         if (epause->autoneg) {
9084                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9085                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
9086                         return -EINVAL;
9087                 }
9088
9089                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9090                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9091         }
9092
9093         DP(NETIF_MSG_LINK,
9094            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9095
9096         if (netif_running(dev)) {
9097                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9098                 bnx2x_link_set(bp);
9099         }
9100
9101         return 0;
9102 }
9103
9104 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9105 {
9106         struct bnx2x *bp = netdev_priv(dev);
9107         int changed = 0;
9108         int rc = 0;
9109
9110         /* TPA requires Rx CSUM offloading */
9111         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9112                 if (!(dev->features & NETIF_F_LRO)) {
9113                         dev->features |= NETIF_F_LRO;
9114                         bp->flags |= TPA_ENABLE_FLAG;
9115                         changed = 1;
9116                 }
9117
9118         } else if (dev->features & NETIF_F_LRO) {
9119                 dev->features &= ~NETIF_F_LRO;
9120                 bp->flags &= ~TPA_ENABLE_FLAG;
9121                 changed = 1;
9122         }
9123
9124         if (changed && netif_running(dev)) {
9125                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9126                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9127         }
9128
9129         return rc;
9130 }
9131
9132 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9133 {
9134         struct bnx2x *bp = netdev_priv(dev);
9135
9136         return bp->rx_csum;
9137 }
9138
9139 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9140 {
9141         struct bnx2x *bp = netdev_priv(dev);
9142         int rc = 0;
9143
9144         bp->rx_csum = data;
9145
9146         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9147            TPA'ed packets will be discarded due to wrong TCP CSUM */
9148         if (!data) {
9149                 u32 flags = ethtool_op_get_flags(dev);
9150
9151                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9152         }
9153
9154         return rc;
9155 }
9156
9157 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9158 {
9159         if (data) {
9160                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9161                 dev->features |= NETIF_F_TSO6;
9162         } else {
9163                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9164                 dev->features &= ~NETIF_F_TSO6;
9165         }
9166
9167         return 0;
9168 }
9169
9170 static const struct {
9171         char string[ETH_GSTRING_LEN];
9172 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9173         { "register_test (offline)" },
9174         { "memory_test (offline)" },
9175         { "loopback_test (offline)" },
9176         { "nvram_test (online)" },
9177         { "interrupt_test (online)" },
9178         { "link_test (online)" },
9179         { "idle check (online)" }
9180 };
9181
9182 static int bnx2x_self_test_count(struct net_device *dev)
9183 {
9184         return BNX2X_NUM_TESTS;
9185 }
9186
9187 static int bnx2x_test_registers(struct bnx2x *bp)
9188 {
9189         int idx, i, rc = -ENODEV;
9190         u32 wr_val = 0;
9191         int port = BP_PORT(bp);
9192         static const struct {
9193                 u32  offset0;
9194                 u32  offset1;
9195                 u32  mask;
9196         } reg_tbl[] = {
9197 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
9198                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
9199                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
9200                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
9201                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
9202                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
9203                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
9204                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
9205                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
9206                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
9207 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
9208                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
9209                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
9210                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
9211                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
9212                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9213                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
9214                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
9215                 { NIG_REG_EGRESS_MNG0_FIFO,           20, 0xffffffff },
9216                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
9217 /* 20 */        { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
9218                 { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
9219                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
9220                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
9221                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
9222                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
9223                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
9224                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
9225                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
9226                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
9227 /* 30 */        { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
9228                 { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
9229                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
9230                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
9231                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9232                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
9233                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9234                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
9235
9236                 { 0xffffffff, 0, 0x00000000 }
9237         };
9238
9239         if (!netif_running(bp->dev))
9240                 return rc;
9241
9242         /* Repeat the test twice:
9243            First by writing 0x00000000, second by writing 0xffffffff */
9244         for (idx = 0; idx < 2; idx++) {
9245
9246                 switch (idx) {
9247                 case 0:
9248                         wr_val = 0;
9249                         break;
9250                 case 1:
9251                         wr_val = 0xffffffff;
9252                         break;
9253                 }
9254
9255                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9256                         u32 offset, mask, save_val, val;
9257
9258                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9259                         mask = reg_tbl[i].mask;
9260
9261                         save_val = REG_RD(bp, offset);
9262
9263                         REG_WR(bp, offset, wr_val);
9264                         val = REG_RD(bp, offset);
9265
9266                         /* Restore the original register's value */
9267                         REG_WR(bp, offset, save_val);
9268
9269                         /* verify that value is as expected value */
9270                         if ((val & mask) != (wr_val & mask))
9271                                 goto test_reg_exit;
9272                 }
9273         }
9274
9275         rc = 0;
9276
9277 test_reg_exit:
9278         return rc;
9279 }
9280
9281 static int bnx2x_test_memory(struct bnx2x *bp)
9282 {
9283         int i, j, rc = -ENODEV;
9284         u32 val;
9285         static const struct {
9286                 u32 offset;
9287                 int size;
9288         } mem_tbl[] = {
9289                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
9290                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9291                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
9292                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
9293                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
9294                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
9295                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
9296
9297                 { 0xffffffff, 0 }
9298         };
9299         static const struct {
9300                 char *name;
9301                 u32 offset;
9302                 u32 e1_mask;
9303                 u32 e1h_mask;
9304         } prty_tbl[] = {
9305                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
9306                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
9307                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
9308                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
9309                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
9310                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
9311
9312                 { NULL, 0xffffffff, 0, 0 }
9313         };
9314
9315         if (!netif_running(bp->dev))
9316                 return rc;
9317
9318         /* Go through all the memories */
9319         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9320                 for (j = 0; j < mem_tbl[i].size; j++)
9321                         REG_RD(bp, mem_tbl[i].offset + j*4);
9322
9323         /* Check the parity status */
9324         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9325                 val = REG_RD(bp, prty_tbl[i].offset);
9326                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9327                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9328                         DP(NETIF_MSG_HW,
9329                            "%s is 0x%x\n", prty_tbl[i].name, val);
9330                         goto test_mem_exit;
9331                 }
9332         }
9333
9334         rc = 0;
9335
9336 test_mem_exit:
9337         return rc;
9338 }
9339
9340 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9341 {
9342         int cnt = 1000;
9343
9344         if (link_up)
9345                 while (bnx2x_link_test(bp) && cnt--)
9346                         msleep(10);
9347 }
9348
9349 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9350 {
9351         unsigned int pkt_size, num_pkts, i;
9352         struct sk_buff *skb;
9353         unsigned char *packet;
9354         struct bnx2x_fastpath *fp = &bp->fp[0];
9355         u16 tx_start_idx, tx_idx;
9356         u16 rx_start_idx, rx_idx;
9357         u16 pkt_prod;
9358         struct sw_tx_bd *tx_buf;
9359         struct eth_tx_bd *tx_bd;
9360         dma_addr_t mapping;
9361         union eth_rx_cqe *cqe;
9362         u8 cqe_fp_flags;
9363         struct sw_rx_bd *rx_buf;
9364         u16 len;
9365         int rc = -ENODEV;
9366
9367         /* check the loopback mode */
9368         switch (loopback_mode) {
9369         case BNX2X_PHY_LOOPBACK:
9370                 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9371                         return -EINVAL;
9372                 break;
9373         case BNX2X_MAC_LOOPBACK:
9374                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
9375                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9376                 break;
9377         default:
9378                 return -EINVAL;
9379         }
9380
9381         /* prepare the loopback packet */
9382         pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9383                      bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
9384         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9385         if (!skb) {
9386                 rc = -ENOMEM;
9387                 goto test_loopback_exit;
9388         }
9389         packet = skb_put(skb, pkt_size);
9390         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9391         memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9392         for (i = ETH_HLEN; i < pkt_size; i++)
9393                 packet[i] = (unsigned char) (i & 0xff);
9394
9395         /* send the loopback packet */
9396         num_pkts = 0;
9397         tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9398         rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9399
9400         pkt_prod = fp->tx_pkt_prod++;
9401         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9402         tx_buf->first_bd = fp->tx_bd_prod;
9403         tx_buf->skb = skb;
9404
9405         tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9406         mapping = pci_map_single(bp->pdev, skb->data,
9407                                  skb_headlen(skb), PCI_DMA_TODEVICE);
9408         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9409         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9410         tx_bd->nbd = cpu_to_le16(1);
9411         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9412         tx_bd->vlan = cpu_to_le16(pkt_prod);
9413         tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9414                                        ETH_TX_BD_FLAGS_END_BD);
9415         tx_bd->general_data = ((UNICAST_ADDRESS <<
9416                                 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9417
9418         wmb();
9419
9420         le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1);
9421         mb(); /* FW restriction: must not reorder writing nbd and packets */
9422         le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
9423         DOORBELL(bp, fp->index, 0);
9424
9425         mmiowb();
9426
9427         num_pkts++;
9428         fp->tx_bd_prod++;
9429         bp->dev->trans_start = jiffies;
9430
9431         udelay(100);
9432
9433         tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9434         if (tx_idx != tx_start_idx + num_pkts)
9435                 goto test_loopback_exit;
9436
9437         rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9438         if (rx_idx != rx_start_idx + num_pkts)
9439                 goto test_loopback_exit;
9440
9441         cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9442         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9443         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9444                 goto test_loopback_rx_exit;
9445
9446         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9447         if (len != pkt_size)
9448                 goto test_loopback_rx_exit;
9449
9450         rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9451         skb = rx_buf->skb;
9452         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9453         for (i = ETH_HLEN; i < pkt_size; i++)
9454                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9455                         goto test_loopback_rx_exit;
9456
9457         rc = 0;
9458
9459 test_loopback_rx_exit:
9460
9461         fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9462         fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9463         fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9464         fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9465
9466         /* Update producers */
9467         bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9468                              fp->rx_sge_prod);
9469
9470 test_loopback_exit:
9471         bp->link_params.loopback_mode = LOOPBACK_NONE;
9472
9473         return rc;
9474 }
9475
9476 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9477 {
9478         int rc = 0, res;
9479
9480         if (!netif_running(bp->dev))
9481                 return BNX2X_LOOPBACK_FAILED;
9482
9483         bnx2x_netif_stop(bp, 1);
9484         bnx2x_acquire_phy_lock(bp);
9485
9486         res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9487         if (res) {
9488                 DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
9489                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9490         }
9491
9492         res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9493         if (res) {
9494                 DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
9495                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9496         }
9497
9498         bnx2x_release_phy_lock(bp);
9499         bnx2x_netif_start(bp);
9500
9501         return rc;
9502 }
9503
9504 #define CRC32_RESIDUAL                  0xdebb20e3
9505
9506 static int bnx2x_test_nvram(struct bnx2x *bp)
9507 {
9508         static const struct {
9509                 int offset;
9510                 int size;
9511         } nvram_tbl[] = {
9512                 {     0,  0x14 }, /* bootstrap */
9513                 {  0x14,  0xec }, /* dir */
9514                 { 0x100, 0x350 }, /* manuf_info */
9515                 { 0x450,  0xf0 }, /* feature_info */
9516                 { 0x640,  0x64 }, /* upgrade_key_info */
9517                 { 0x6a4,  0x64 },
9518                 { 0x708,  0x70 }, /* manuf_key_info */
9519                 { 0x778,  0x70 },
9520                 {     0,     0 }
9521         };
9522         __be32 buf[0x350 / 4];
9523         u8 *data = (u8 *)buf;
9524         int i, rc;
9525         u32 magic, csum;
9526
9527         rc = bnx2x_nvram_read(bp, 0, data, 4);
9528         if (rc) {
9529                 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
9530                 goto test_nvram_exit;
9531         }
9532
9533         magic = be32_to_cpu(buf[0]);
9534         if (magic != 0x669955aa) {
9535                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9536                 rc = -ENODEV;
9537                 goto test_nvram_exit;
9538         }
9539
9540         for (i = 0; nvram_tbl[i].size; i++) {
9541
9542                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9543                                       nvram_tbl[i].size);
9544                 if (rc) {
9545                         DP(NETIF_MSG_PROBE,
9546                            "nvram_tbl[%d] read data (rc %d)\n", i, rc);
9547                         goto test_nvram_exit;
9548                 }
9549
9550                 csum = ether_crc_le(nvram_tbl[i].size, data);
9551                 if (csum != CRC32_RESIDUAL) {
9552                         DP(NETIF_MSG_PROBE,
9553                            "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9554                         rc = -ENODEV;
9555                         goto test_nvram_exit;
9556                 }
9557         }
9558
9559 test_nvram_exit:
9560         return rc;
9561 }
9562
9563 static int bnx2x_test_intr(struct bnx2x *bp)
9564 {
9565         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9566         int i, rc;
9567
9568         if (!netif_running(bp->dev))
9569                 return -ENODEV;
9570
9571         config->hdr.length = 0;
9572         if (CHIP_IS_E1(bp))
9573                 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9574         else
9575                 config->hdr.offset = BP_FUNC(bp);
9576         config->hdr.client_id = bp->fp->cl_id;
9577         config->hdr.reserved1 = 0;
9578
9579         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9580                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9581                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9582         if (rc == 0) {
9583                 bp->set_mac_pending++;
9584                 for (i = 0; i < 10; i++) {
9585                         if (!bp->set_mac_pending)
9586                                 break;
9587                         msleep_interruptible(10);
9588                 }
9589                 if (i == 10)
9590                         rc = -ENODEV;
9591         }
9592
9593         return rc;
9594 }
9595
9596 static void bnx2x_self_test(struct net_device *dev,
9597                             struct ethtool_test *etest, u64 *buf)
9598 {
9599         struct bnx2x *bp = netdev_priv(dev);
9600
9601         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9602
9603         if (!netif_running(dev))
9604                 return;
9605
9606         /* offline tests are not supported in MF mode */
9607         if (IS_E1HMF(bp))
9608                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9609
9610         if (etest->flags & ETH_TEST_FL_OFFLINE) {
9611                 u8 link_up;
9612
9613                 link_up = bp->link_vars.link_up;
9614                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9615                 bnx2x_nic_load(bp, LOAD_DIAG);
9616                 /* wait until link state is restored */
9617                 bnx2x_wait_for_link(bp, link_up);
9618
9619                 if (bnx2x_test_registers(bp) != 0) {
9620                         buf[0] = 1;
9621                         etest->flags |= ETH_TEST_FL_FAILED;
9622                 }
9623                 if (bnx2x_test_memory(bp) != 0) {
9624                         buf[1] = 1;
9625                         etest->flags |= ETH_TEST_FL_FAILED;
9626                 }
9627                 buf[2] = bnx2x_test_loopback(bp, link_up);
9628                 if (buf[2] != 0)
9629                         etest->flags |= ETH_TEST_FL_FAILED;
9630
9631                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9632                 bnx2x_nic_load(bp, LOAD_NORMAL);
9633                 /* wait until link state is restored */
9634                 bnx2x_wait_for_link(bp, link_up);
9635         }
9636         if (bnx2x_test_nvram(bp) != 0) {
9637                 buf[3] = 1;
9638                 etest->flags |= ETH_TEST_FL_FAILED;
9639         }
9640         if (bnx2x_test_intr(bp) != 0) {
9641                 buf[4] = 1;
9642                 etest->flags |= ETH_TEST_FL_FAILED;
9643         }
9644         if (bp->port.pmf)
9645                 if (bnx2x_link_test(bp) != 0) {
9646                         buf[5] = 1;
9647                         etest->flags |= ETH_TEST_FL_FAILED;
9648                 }
9649
9650 #ifdef BNX2X_EXTRA_DEBUG
9651         bnx2x_panic_dump(bp);
9652 #endif
9653 }
9654
9655 static const struct {
9656         long offset;
9657         int size;
9658         u8 string[ETH_GSTRING_LEN];
9659 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9660 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9661         { Q_STATS_OFFSET32(error_bytes_received_hi),
9662                                                 8, "[%d]: rx_error_bytes" },
9663         { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9664                                                 8, "[%d]: rx_ucast_packets" },
9665         { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9666                                                 8, "[%d]: rx_mcast_packets" },
9667         { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9668                                                 8, "[%d]: rx_bcast_packets" },
9669         { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9670         { Q_STATS_OFFSET32(rx_err_discard_pkt),
9671                                          4, "[%d]: rx_phy_ip_err_discards"},
9672         { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9673                                          4, "[%d]: rx_skb_alloc_discard" },
9674         { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9675
9676 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9677         { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9678                                                         8, "[%d]: tx_packets" }
9679 };
9680
9681 static const struct {
9682         long offset;
9683         int size;
9684         u32 flags;
9685 #define STATS_FLAGS_PORT                1
9686 #define STATS_FLAGS_FUNC                2
9687 #define STATS_FLAGS_BOTH                (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
9688         u8 string[ETH_GSTRING_LEN];
9689 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9690 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9691                                 8, STATS_FLAGS_BOTH, "rx_bytes" },
9692         { STATS_OFFSET32(error_bytes_received_hi),
9693                                 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
9694         { STATS_OFFSET32(total_unicast_packets_received_hi),
9695                                 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
9696         { STATS_OFFSET32(total_multicast_packets_received_hi),
9697                                 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
9698         { STATS_OFFSET32(total_broadcast_packets_received_hi),
9699                                 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
9700         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9701                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9702         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9703                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
9704         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9705                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9706         { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9707                                 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9708 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9709                                 8, STATS_FLAGS_PORT, "rx_fragments" },
9710         { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9711                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
9712         { STATS_OFFSET32(no_buff_discard_hi),
9713                                 8, STATS_FLAGS_BOTH, "rx_discards" },
9714         { STATS_OFFSET32(mac_filter_discard),
9715                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9716         { STATS_OFFSET32(xxoverflow_discard),
9717                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9718         { STATS_OFFSET32(brb_drop_hi),
9719                                 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9720         { STATS_OFFSET32(brb_truncate_hi),
9721                                 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9722         { STATS_OFFSET32(pause_frames_received_hi),
9723                                 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9724         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9725                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9726         { STATS_OFFSET32(nig_timer_max),
9727                         4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9728 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9729                                 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9730         { STATS_OFFSET32(rx_skb_alloc_failed),
9731                                 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9732         { STATS_OFFSET32(hw_csum_err),
9733                                 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9734
9735         { STATS_OFFSET32(total_bytes_transmitted_hi),
9736                                 8, STATS_FLAGS_BOTH, "tx_bytes" },
9737         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9738                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9739         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9740                                 8, STATS_FLAGS_BOTH, "tx_packets" },
9741         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9742                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9743         { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9744                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9745         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9746                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9747         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9748                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9749 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9750                                 8, STATS_FLAGS_PORT, "tx_deferred" },
9751         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9752                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9753         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9754                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9755         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9756                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9757         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9758                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9759         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9760                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9761         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9762                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9763         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9764                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9765         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9766                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9767         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9768                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9769 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
9770                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9771         { STATS_OFFSET32(pause_frames_sent_hi),
9772                                 8, STATS_FLAGS_PORT, "tx_pause_frames" }
9773 };
9774
9775 #define IS_PORT_STAT(i) \
9776         ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9777 #define IS_FUNC_STAT(i)         (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9778 #define IS_E1HMF_MODE_STAT(bp) \
9779                         (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
9780
9781 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9782 {
9783         struct bnx2x *bp = netdev_priv(dev);
9784         int i, j, k;
9785
9786         switch (stringset) {
9787         case ETH_SS_STATS:
9788                 if (is_multi(bp)) {
9789                         k = 0;
9790                         for_each_queue(bp, i) {
9791                                 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9792                                         sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9793                                                 bnx2x_q_stats_arr[j].string, i);
9794                                 k += BNX2X_NUM_Q_STATS;
9795                         }
9796                         if (IS_E1HMF_MODE_STAT(bp))
9797                                 break;
9798                         for (j = 0; j < BNX2X_NUM_STATS; j++)
9799                                 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9800                                        bnx2x_stats_arr[j].string);
9801                 } else {
9802                         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9803                                 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9804                                         continue;
9805                                 strcpy(buf + j*ETH_GSTRING_LEN,
9806                                        bnx2x_stats_arr[i].string);
9807                                 j++;
9808                         }
9809                 }
9810                 break;
9811
9812         case ETH_SS_TEST:
9813                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9814                 break;
9815         }
9816 }
9817
9818 static int bnx2x_get_stats_count(struct net_device *dev)
9819 {
9820         struct bnx2x *bp = netdev_priv(dev);
9821         int i, num_stats;
9822
9823         if (is_multi(bp)) {
9824                 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9825                 if (!IS_E1HMF_MODE_STAT(bp))
9826                         num_stats += BNX2X_NUM_STATS;
9827         } else {
9828                 if (IS_E1HMF_MODE_STAT(bp)) {
9829                         num_stats = 0;
9830                         for (i = 0; i < BNX2X_NUM_STATS; i++)
9831                                 if (IS_FUNC_STAT(i))
9832                                         num_stats++;
9833                 } else
9834                         num_stats = BNX2X_NUM_STATS;
9835         }
9836
9837         return num_stats;
9838 }
9839
9840 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9841                                     struct ethtool_stats *stats, u64 *buf)
9842 {
9843         struct bnx2x *bp = netdev_priv(dev);
9844         u32 *hw_stats, *offset;
9845         int i, j, k;
9846
9847         if (is_multi(bp)) {
9848                 k = 0;
9849                 for_each_queue(bp, i) {
9850                         hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9851                         for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9852                                 if (bnx2x_q_stats_arr[j].size == 0) {
9853                                         /* skip this counter */
9854                                         buf[k + j] = 0;
9855                                         continue;
9856                                 }
9857                                 offset = (hw_stats +
9858                                           bnx2x_q_stats_arr[j].offset);
9859                                 if (bnx2x_q_stats_arr[j].size == 4) {
9860                                         /* 4-byte counter */
9861                                         buf[k + j] = (u64) *offset;
9862                                         continue;
9863                                 }
9864                                 /* 8-byte counter */
9865                                 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9866                         }
9867                         k += BNX2X_NUM_Q_STATS;
9868                 }
9869                 if (IS_E1HMF_MODE_STAT(bp))
9870                         return;
9871                 hw_stats = (u32 *)&bp->eth_stats;
9872                 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9873                         if (bnx2x_stats_arr[j].size == 0) {
9874                                 /* skip this counter */
9875                                 buf[k + j] = 0;
9876                                 continue;
9877                         }
9878                         offset = (hw_stats + bnx2x_stats_arr[j].offset);
9879                         if (bnx2x_stats_arr[j].size == 4) {
9880                                 /* 4-byte counter */
9881                                 buf[k + j] = (u64) *offset;
9882                                 continue;
9883                         }
9884                         /* 8-byte counter */
9885                         buf[k + j] = HILO_U64(*offset, *(offset + 1));
9886                 }
9887         } else {
9888                 hw_stats = (u32 *)&bp->eth_stats;
9889                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9890                         if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9891                                 continue;
9892                         if (bnx2x_stats_arr[i].size == 0) {
9893                                 /* skip this counter */
9894                                 buf[j] = 0;
9895                                 j++;
9896                                 continue;
9897                         }
9898                         offset = (hw_stats + bnx2x_stats_arr[i].offset);
9899                         if (bnx2x_stats_arr[i].size == 4) {
9900                                 /* 4-byte counter */
9901                                 buf[j] = (u64) *offset;
9902                                 j++;
9903                                 continue;
9904                         }
9905                         /* 8-byte counter */
9906                         buf[j] = HILO_U64(*offset, *(offset + 1));
9907                         j++;
9908                 }
9909         }
9910 }
9911
9912 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9913 {
9914         struct bnx2x *bp = netdev_priv(dev);
9915         int port = BP_PORT(bp);
9916         int i;
9917
9918         if (!netif_running(dev))
9919                 return 0;
9920
9921         if (!bp->port.pmf)
9922                 return 0;
9923
9924         if (data == 0)
9925                 data = 2;
9926
9927         for (i = 0; i < (data * 2); i++) {
9928                 if ((i % 2) == 0)
9929                         bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9930                                       bp->link_params.hw_led_mode,
9931                                       bp->link_params.chip_id);
9932                 else
9933                         bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9934                                       bp->link_params.hw_led_mode,
9935                                       bp->link_params.chip_id);
9936
9937                 msleep_interruptible(500);
9938                 if (signal_pending(current))
9939                         break;
9940         }
9941
9942         if (bp->link_vars.link_up)
9943                 bnx2x_set_led(bp, port, LED_MODE_OPER,
9944                               bp->link_vars.line_speed,
9945                               bp->link_params.hw_led_mode,
9946                               bp->link_params.chip_id);
9947
9948         return 0;
9949 }
9950
9951 static struct ethtool_ops bnx2x_ethtool_ops = {
9952         .get_settings           = bnx2x_get_settings,
9953         .set_settings           = bnx2x_set_settings,
9954         .get_drvinfo            = bnx2x_get_drvinfo,
9955         .get_wol                = bnx2x_get_wol,
9956         .set_wol                = bnx2x_set_wol,
9957         .get_msglevel           = bnx2x_get_msglevel,
9958         .set_msglevel           = bnx2x_set_msglevel,
9959         .nway_reset             = bnx2x_nway_reset,
9960         .get_link               = ethtool_op_get_link,
9961         .get_eeprom_len         = bnx2x_get_eeprom_len,
9962         .get_eeprom             = bnx2x_get_eeprom,
9963         .set_eeprom             = bnx2x_set_eeprom,
9964         .get_coalesce           = bnx2x_get_coalesce,
9965         .set_coalesce           = bnx2x_set_coalesce,
9966         .get_ringparam          = bnx2x_get_ringparam,
9967         .set_ringparam          = bnx2x_set_ringparam,
9968         .get_pauseparam         = bnx2x_get_pauseparam,
9969         .set_pauseparam         = bnx2x_set_pauseparam,
9970         .get_rx_csum            = bnx2x_get_rx_csum,
9971         .set_rx_csum            = bnx2x_set_rx_csum,
9972         .get_tx_csum            = ethtool_op_get_tx_csum,
9973         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
9974         .set_flags              = bnx2x_set_flags,
9975         .get_flags              = ethtool_op_get_flags,
9976         .get_sg                 = ethtool_op_get_sg,
9977         .set_sg                 = ethtool_op_set_sg,
9978         .get_tso                = ethtool_op_get_tso,
9979         .set_tso                = bnx2x_set_tso,
9980         .self_test_count        = bnx2x_self_test_count,
9981         .self_test              = bnx2x_self_test,
9982         .get_strings            = bnx2x_get_strings,
9983         .phys_id                = bnx2x_phys_id,
9984         .get_stats_count        = bnx2x_get_stats_count,
9985         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
9986 };
9987
9988 /* end of ethtool_ops */
9989
9990 /****************************************************************************
9991 * General service functions
9992 ****************************************************************************/
9993
9994 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9995 {
9996         u16 pmcsr;
9997
9998         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9999
10000         switch (state) {
10001         case PCI_D0:
10002                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10003                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10004                                        PCI_PM_CTRL_PME_STATUS));
10005
10006                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10007                         /* delay required during transition out of D3hot */
10008                         msleep(20);
10009                 break;
10010
10011         case PCI_D3hot:
10012                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10013                 pmcsr |= 3;
10014
10015                 if (bp->wol)
10016                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10017
10018                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10019                                       pmcsr);
10020
10021                 /* No more memory access after this point until
10022                 * device is brought back to D0.
10023                 */
10024                 break;
10025
10026         default:
10027                 return -EINVAL;
10028         }
10029         return 0;
10030 }
10031
10032 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10033 {
10034         u16 rx_cons_sb;
10035
10036         /* Tell compiler that status block fields can change */
10037         barrier();
10038         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10039         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10040                 rx_cons_sb++;
10041         return (fp->rx_comp_cons != rx_cons_sb);
10042 }
10043
10044 /*
10045  * net_device service functions
10046  */
10047
10048 static int bnx2x_poll(struct napi_struct *napi, int budget)
10049 {
10050         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10051                                                  napi);
10052         struct bnx2x *bp = fp->bp;
10053         int work_done = 0;
10054
10055 #ifdef BNX2X_STOP_ON_ERROR
10056         if (unlikely(bp->panic))
10057                 goto poll_panic;
10058 #endif
10059
10060         prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10061         prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10062         prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10063
10064         bnx2x_update_fpsb_idx(fp);
10065
10066         if (bnx2x_has_tx_work(fp))
10067                 bnx2x_tx_int(fp, budget);
10068
10069         if (bnx2x_has_rx_work(fp))
10070                 work_done = bnx2x_rx_int(fp, budget);
10071
10072         rmb(); /* BNX2X_HAS_WORK() reads the status block */
10073
10074         /* must not complete if we consumed full budget */
10075         if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
10076
10077 #ifdef BNX2X_STOP_ON_ERROR
10078 poll_panic:
10079 #endif
10080                 napi_complete(napi);
10081
10082                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10083                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
10084                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10085                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10086         }
10087
10088         return work_done;
10089 }
10090
10091
10092 /* we split the first BD into headers and data BDs
10093  * to ease the pain of our fellow microcode engineers
10094  * we use one mapping for both BDs
10095  * So far this has only been observed to happen
10096  * in Other Operating Systems(TM)
10097  */
10098 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10099                                    struct bnx2x_fastpath *fp,
10100                                    struct eth_tx_bd **tx_bd, u16 hlen,
10101                                    u16 bd_prod, int nbd)
10102 {
10103         struct eth_tx_bd *h_tx_bd = *tx_bd;
10104         struct eth_tx_bd *d_tx_bd;
10105         dma_addr_t mapping;
10106         int old_len = le16_to_cpu(h_tx_bd->nbytes);
10107
10108         /* first fix first BD */
10109         h_tx_bd->nbd = cpu_to_le16(nbd);
10110         h_tx_bd->nbytes = cpu_to_le16(hlen);
10111
10112         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10113            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10114            h_tx_bd->addr_lo, h_tx_bd->nbd);
10115
10116         /* now get a new data BD
10117          * (after the pbd) and fill it */
10118         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10119         d_tx_bd = &fp->tx_desc_ring[bd_prod];
10120
10121         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10122                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10123
10124         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10125         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10126         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10127         d_tx_bd->vlan = 0;
10128         /* this marks the BD as one that has no individual mapping
10129          * the FW ignores this flag in a BD not marked start
10130          */
10131         d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10132         DP(NETIF_MSG_TX_QUEUED,
10133            "TSO split data size is %d (%x:%x)\n",
10134            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10135
10136         /* update tx_bd for marking the last BD flag */
10137         *tx_bd = d_tx_bd;
10138
10139         return bd_prod;
10140 }
10141
10142 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10143 {
10144         if (fix > 0)
10145                 csum = (u16) ~csum_fold(csum_sub(csum,
10146                                 csum_partial(t_header - fix, fix, 0)));
10147
10148         else if (fix < 0)
10149                 csum = (u16) ~csum_fold(csum_add(csum,
10150                                 csum_partial(t_header, -fix, 0)));
10151
10152         return swab16(csum);
10153 }
10154
10155 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10156 {
10157         u32 rc;
10158
10159         if (skb->ip_summed != CHECKSUM_PARTIAL)
10160                 rc = XMIT_PLAIN;
10161
10162         else {
10163                 if (skb->protocol == htons(ETH_P_IPV6)) {
10164                         rc = XMIT_CSUM_V6;
10165                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10166                                 rc |= XMIT_CSUM_TCP;
10167
10168                 } else {
10169                         rc = XMIT_CSUM_V4;
10170                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10171                                 rc |= XMIT_CSUM_TCP;
10172                 }
10173         }
10174
10175         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10176                 rc |= XMIT_GSO_V4;
10177
10178         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10179                 rc |= XMIT_GSO_V6;
10180
10181         return rc;
10182 }
10183
10184 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10185 /* check if packet requires linearization (packet is too fragmented)
10186    no need to check fragmentation if page size > 8K (there will be no
10187    violation to FW restrictions) */
10188 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10189                              u32 xmit_type)
10190 {
10191         int to_copy = 0;
10192         int hlen = 0;
10193         int first_bd_sz = 0;
10194
10195         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10196         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10197
10198                 if (xmit_type & XMIT_GSO) {
10199                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10200                         /* Check if LSO packet needs to be copied:
10201                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10202                         int wnd_size = MAX_FETCH_BD - 3;
10203                         /* Number of windows to check */
10204                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10205                         int wnd_idx = 0;
10206                         int frag_idx = 0;
10207                         u32 wnd_sum = 0;
10208
10209                         /* Headers length */
10210                         hlen = (int)(skb_transport_header(skb) - skb->data) +
10211                                 tcp_hdrlen(skb);
10212
10213                         /* Amount of data (w/o headers) on linear part of SKB*/
10214                         first_bd_sz = skb_headlen(skb) - hlen;
10215
10216                         wnd_sum  = first_bd_sz;
10217
10218                         /* Calculate the first sum - it's special */
10219                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10220                                 wnd_sum +=
10221                                         skb_shinfo(skb)->frags[frag_idx].size;
10222
10223                         /* If there was data on linear skb data - check it */
10224                         if (first_bd_sz > 0) {
10225                                 if (unlikely(wnd_sum < lso_mss)) {
10226                                         to_copy = 1;
10227                                         goto exit_lbl;
10228                                 }
10229
10230                                 wnd_sum -= first_bd_sz;
10231                         }
10232
10233                         /* Others are easier: run through the frag list and
10234                            check all windows */
10235                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10236                                 wnd_sum +=
10237                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10238
10239                                 if (unlikely(wnd_sum < lso_mss)) {
10240                                         to_copy = 1;
10241                                         break;
10242                                 }
10243                                 wnd_sum -=
10244                                         skb_shinfo(skb)->frags[wnd_idx].size;
10245                         }
10246                 } else {
10247                         /* in non-LSO too fragmented packet should always
10248                            be linearized */
10249                         to_copy = 1;
10250                 }
10251         }
10252
10253 exit_lbl:
10254         if (unlikely(to_copy))
10255                 DP(NETIF_MSG_TX_QUEUED,
10256                    "Linearization IS REQUIRED for %s packet. "
10257                    "num_frags %d  hlen %d  first_bd_sz %d\n",
10258                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10259                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10260
10261         return to_copy;
10262 }
10263 #endif
10264
10265 /* called with netif_tx_lock
10266  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10267  * netif_wake_queue()
10268  */
10269 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10270 {
10271         struct bnx2x *bp = netdev_priv(dev);
10272         struct bnx2x_fastpath *fp;
10273         struct netdev_queue *txq;
10274         struct sw_tx_bd *tx_buf;
10275         struct eth_tx_bd *tx_bd;
10276         struct eth_tx_parse_bd *pbd = NULL;
10277         u16 pkt_prod, bd_prod;
10278         int nbd, fp_index;
10279         dma_addr_t mapping;
10280         u32 xmit_type = bnx2x_xmit_type(bp, skb);
10281         int vlan_off = (bp->e1hov ? 4 : 0);
10282         int i;
10283         u8 hlen = 0;
10284
10285 #ifdef BNX2X_STOP_ON_ERROR
10286         if (unlikely(bp->panic))
10287                 return NETDEV_TX_BUSY;
10288 #endif
10289
10290         fp_index = skb_get_queue_mapping(skb);
10291         txq = netdev_get_tx_queue(dev, fp_index);
10292
10293         fp = &bp->fp[fp_index];
10294
10295         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10296                 fp->eth_q_stats.driver_xoff++,
10297                 netif_tx_stop_queue(txq);
10298                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10299                 return NETDEV_TX_BUSY;
10300         }
10301
10302         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
10303            "  gso type %x  xmit_type %x\n",
10304            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10305            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10306
10307 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10308         /* First, check if we need to linearize the skb (due to FW
10309            restrictions). No need to check fragmentation if page size > 8K
10310            (there will be no violation to FW restrictions) */
10311         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10312                 /* Statistics of linearization */
10313                 bp->lin_cnt++;
10314                 if (skb_linearize(skb) != 0) {
10315                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10316                            "silently dropping this SKB\n");
10317                         dev_kfree_skb_any(skb);
10318                         return NETDEV_TX_OK;
10319                 }
10320         }
10321 #endif
10322
10323         /*
10324         Please read carefully. First we use one BD which we mark as start,
10325         then for TSO or xsum we have a parsing info BD,
10326         and only then we have the rest of the TSO BDs.
10327         (don't forget to mark the last one as last,
10328         and to unmap only AFTER you write to the BD ...)
10329         And above all, all pdb sizes are in words - NOT DWORDS!
10330         */
10331
10332         pkt_prod = fp->tx_pkt_prod++;
10333         bd_prod = TX_BD(fp->tx_bd_prod);
10334
10335         /* get a tx_buf and first BD */
10336         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10337         tx_bd = &fp->tx_desc_ring[bd_prod];
10338
10339         tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10340         tx_bd->general_data = (UNICAST_ADDRESS <<
10341                                ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
10342         /* header nbd */
10343         tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
10344
10345         /* remember the first BD of the packet */
10346         tx_buf->first_bd = fp->tx_bd_prod;
10347         tx_buf->skb = skb;
10348
10349         DP(NETIF_MSG_TX_QUEUED,
10350            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
10351            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10352
10353 #ifdef BCM_VLAN
10354         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10355             (bp->flags & HW_VLAN_TX_FLAG)) {
10356                 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10357                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10358                 vlan_off += 4;
10359         } else
10360 #endif
10361                 tx_bd->vlan = cpu_to_le16(pkt_prod);
10362
10363         if (xmit_type) {
10364                 /* turn on parsing and get a BD */
10365                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10366                 pbd = (void *)&fp->tx_desc_ring[bd_prod];
10367
10368                 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10369         }
10370
10371         if (xmit_type & XMIT_CSUM) {
10372                 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
10373
10374                 /* for now NS flag is not used in Linux */
10375                 pbd->global_data =
10376                         (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10377                                  ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
10378
10379                 pbd->ip_hlen = (skb_transport_header(skb) -
10380                                 skb_network_header(skb)) / 2;
10381
10382                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
10383
10384                 pbd->total_hlen = cpu_to_le16(hlen);
10385                 hlen = hlen*2 - vlan_off;
10386
10387                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10388
10389                 if (xmit_type & XMIT_CSUM_V4)
10390                         tx_bd->bd_flags.as_bitfield |=
10391                                                 ETH_TX_BD_FLAGS_IP_CSUM;
10392                 else
10393                         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10394
10395                 if (xmit_type & XMIT_CSUM_TCP) {
10396                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10397
10398                 } else {
10399                         s8 fix = SKB_CS_OFF(skb); /* signed! */
10400
10401                         pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
10402                         pbd->cs_offset = fix / 2;
10403
10404                         DP(NETIF_MSG_TX_QUEUED,
10405                            "hlen %d  offset %d  fix %d  csum before fix %x\n",
10406                            le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10407                            SKB_CS(skb));
10408
10409                         /* HW bug: fixup the CSUM */
10410                         pbd->tcp_pseudo_csum =
10411                                 bnx2x_csum_fix(skb_transport_header(skb),
10412                                                SKB_CS(skb), fix);
10413
10414                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10415                            pbd->tcp_pseudo_csum);
10416                 }
10417         }
10418
10419         mapping = pci_map_single(bp->pdev, skb->data,
10420                                  skb_headlen(skb), PCI_DMA_TODEVICE);
10421
10422         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10423         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10424         nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
10425         tx_bd->nbd = cpu_to_le16(nbd);
10426         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10427
10428         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
10429            "  nbytes %d  flags %x  vlan %x\n",
10430            tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10431            le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10432            le16_to_cpu(tx_bd->vlan));
10433
10434         if (xmit_type & XMIT_GSO) {
10435
10436                 DP(NETIF_MSG_TX_QUEUED,
10437                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
10438                    skb->len, hlen, skb_headlen(skb),
10439                    skb_shinfo(skb)->gso_size);
10440
10441                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10442
10443                 if (unlikely(skb_headlen(skb) > hlen))
10444                         bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10445                                                  bd_prod, ++nbd);
10446
10447                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10448                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
10449                 pbd->tcp_flags = pbd_tcp_flags(skb);
10450
10451                 if (xmit_type & XMIT_GSO_V4) {
10452                         pbd->ip_id = swab16(ip_hdr(skb)->id);
10453                         pbd->tcp_pseudo_csum =
10454                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10455                                                           ip_hdr(skb)->daddr,
10456                                                           0, IPPROTO_TCP, 0));
10457
10458                 } else
10459                         pbd->tcp_pseudo_csum =
10460                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10461                                                         &ipv6_hdr(skb)->daddr,
10462                                                         0, IPPROTO_TCP, 0));
10463
10464                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10465         }
10466
10467         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10468                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
10469
10470                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10471                 tx_bd = &fp->tx_desc_ring[bd_prod];
10472
10473                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10474                                        frag->size, PCI_DMA_TODEVICE);
10475
10476                 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10477                 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10478                 tx_bd->nbytes = cpu_to_le16(frag->size);
10479                 tx_bd->vlan = cpu_to_le16(pkt_prod);
10480                 tx_bd->bd_flags.as_bitfield = 0;
10481
10482                 DP(NETIF_MSG_TX_QUEUED,
10483                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d  flags %x\n",
10484                    i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10485                    le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
10486         }
10487
10488         /* now at last mark the BD as the last BD */
10489         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10490
10491         DP(NETIF_MSG_TX_QUEUED, "last bd @%p  flags %x\n",
10492            tx_bd, tx_bd->bd_flags.as_bitfield);
10493
10494         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10495
10496         /* now send a tx doorbell, counting the next BD
10497          * if the packet contains or ends with it
10498          */
10499         if (TX_BD_POFF(bd_prod) < nbd)
10500                 nbd++;
10501
10502         if (pbd)
10503                 DP(NETIF_MSG_TX_QUEUED,
10504                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
10505                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
10506                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10507                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
10508                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
10509
10510         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
10511
10512         /*
10513          * Make sure that the BD data is updated before updating the producer
10514          * since FW might read the BD right after the producer is updated.
10515          * This is only applicable for weak-ordered memory model archs such
10516          * as IA-64. The following barrier is also mandatory since FW will
10517          * assumes packets must have BDs.
10518          */
10519         wmb();
10520
10521         le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd);
10522         mb(); /* FW restriction: must not reorder writing nbd and packets */
10523         le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
10524         DOORBELL(bp, fp->index, 0);
10525
10526         mmiowb();
10527
10528         fp->tx_bd_prod += nbd;
10529         dev->trans_start = jiffies;
10530
10531         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
10532                 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10533                    if we put Tx into XOFF state. */
10534                 smp_mb();
10535                 netif_tx_stop_queue(txq);
10536                 fp->eth_q_stats.driver_xoff++;
10537                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
10538                         netif_tx_wake_queue(txq);
10539         }
10540         fp->tx_pkt++;
10541
10542         return NETDEV_TX_OK;
10543 }
10544
10545 /* called with rtnl_lock */
10546 static int bnx2x_open(struct net_device *dev)
10547 {
10548         struct bnx2x *bp = netdev_priv(dev);
10549
10550         netif_carrier_off(dev);
10551
10552         bnx2x_set_power_state(bp, PCI_D0);
10553
10554         return bnx2x_nic_load(bp, LOAD_OPEN);
10555 }
10556
10557 /* called with rtnl_lock */
10558 static int bnx2x_close(struct net_device *dev)
10559 {
10560         struct bnx2x *bp = netdev_priv(dev);
10561
10562         /* Unload the driver, release IRQs */
10563         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10564         if (atomic_read(&bp->pdev->enable_cnt) == 1)
10565                 if (!CHIP_REV_IS_SLOW(bp))
10566                         bnx2x_set_power_state(bp, PCI_D3hot);
10567
10568         return 0;
10569 }
10570
10571 /* called with netif_tx_lock from dev_mcast.c */
10572 static void bnx2x_set_rx_mode(struct net_device *dev)
10573 {
10574         struct bnx2x *bp = netdev_priv(dev);
10575         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10576         int port = BP_PORT(bp);
10577
10578         if (bp->state != BNX2X_STATE_OPEN) {
10579                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10580                 return;
10581         }
10582
10583         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10584
10585         if (dev->flags & IFF_PROMISC)
10586                 rx_mode = BNX2X_RX_MODE_PROMISC;
10587
10588         else if ((dev->flags & IFF_ALLMULTI) ||
10589                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10590                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10591
10592         else { /* some multicasts */
10593                 if (CHIP_IS_E1(bp)) {
10594                         int i, old, offset;
10595                         struct dev_mc_list *mclist;
10596                         struct mac_configuration_cmd *config =
10597                                                 bnx2x_sp(bp, mcast_config);
10598
10599                         for (i = 0, mclist = dev->mc_list;
10600                              mclist && (i < dev->mc_count);
10601                              i++, mclist = mclist->next) {
10602
10603                                 config->config_table[i].
10604                                         cam_entry.msb_mac_addr =
10605                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
10606                                 config->config_table[i].
10607                                         cam_entry.middle_mac_addr =
10608                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
10609                                 config->config_table[i].
10610                                         cam_entry.lsb_mac_addr =
10611                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
10612                                 config->config_table[i].cam_entry.flags =
10613                                                         cpu_to_le16(port);
10614                                 config->config_table[i].
10615                                         target_table_entry.flags = 0;
10616                                 config->config_table[i].
10617                                         target_table_entry.client_id = 0;
10618                                 config->config_table[i].
10619                                         target_table_entry.vlan_id = 0;
10620
10621                                 DP(NETIF_MSG_IFUP,
10622                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10623                                    config->config_table[i].
10624                                                 cam_entry.msb_mac_addr,
10625                                    config->config_table[i].
10626                                                 cam_entry.middle_mac_addr,
10627                                    config->config_table[i].
10628                                                 cam_entry.lsb_mac_addr);
10629                         }
10630                         old = config->hdr.length;
10631                         if (old > i) {
10632                                 for (; i < old; i++) {
10633                                         if (CAM_IS_INVALID(config->
10634                                                            config_table[i])) {
10635                                                 /* already invalidated */
10636                                                 break;
10637                                         }
10638                                         /* invalidate */
10639                                         CAM_INVALIDATE(config->
10640                                                        config_table[i]);
10641                                 }
10642                         }
10643
10644                         if (CHIP_REV_IS_SLOW(bp))
10645                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10646                         else
10647                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
10648
10649                         config->hdr.length = i;
10650                         config->hdr.offset = offset;
10651                         config->hdr.client_id = bp->fp->cl_id;
10652                         config->hdr.reserved1 = 0;
10653
10654                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10655                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10656                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10657                                       0);
10658                 } else { /* E1H */
10659                         /* Accept one or more multicasts */
10660                         struct dev_mc_list *mclist;
10661                         u32 mc_filter[MC_HASH_SIZE];
10662                         u32 crc, bit, regidx;
10663                         int i;
10664
10665                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10666
10667                         for (i = 0, mclist = dev->mc_list;
10668                              mclist && (i < dev->mc_count);
10669                              i++, mclist = mclist->next) {
10670
10671                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10672                                    mclist->dmi_addr);
10673
10674                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10675                                 bit = (crc >> 24) & 0xff;
10676                                 regidx = bit >> 5;
10677                                 bit &= 0x1f;
10678                                 mc_filter[regidx] |= (1 << bit);
10679                         }
10680
10681                         for (i = 0; i < MC_HASH_SIZE; i++)
10682                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10683                                        mc_filter[i]);
10684                 }
10685         }
10686
10687         bp->rx_mode = rx_mode;
10688         bnx2x_set_storm_rx_mode(bp);
10689 }
10690
10691 /* called with rtnl_lock */
10692 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10693 {
10694         struct sockaddr *addr = p;
10695         struct bnx2x *bp = netdev_priv(dev);
10696
10697         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
10698                 return -EINVAL;
10699
10700         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10701         if (netif_running(dev)) {
10702                 if (CHIP_IS_E1(bp))
10703                         bnx2x_set_mac_addr_e1(bp, 1);
10704                 else
10705                         bnx2x_set_mac_addr_e1h(bp, 1);
10706         }
10707
10708         return 0;
10709 }
10710
10711 /* called with rtnl_lock */
10712 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10713 {
10714         struct mii_ioctl_data *data = if_mii(ifr);
10715         struct bnx2x *bp = netdev_priv(dev);
10716         int port = BP_PORT(bp);
10717         int err;
10718
10719         switch (cmd) {
10720         case SIOCGMIIPHY:
10721                 data->phy_id = bp->port.phy_addr;
10722
10723                 /* fallthrough */
10724
10725         case SIOCGMIIREG: {
10726                 u16 mii_regval;
10727
10728                 if (!netif_running(dev))
10729                         return -EAGAIN;
10730
10731                 mutex_lock(&bp->port.phy_mutex);
10732                 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
10733                                       DEFAULT_PHY_DEV_ADDR,
10734                                       (data->reg_num & 0x1f), &mii_regval);
10735                 data->val_out = mii_regval;
10736                 mutex_unlock(&bp->port.phy_mutex);
10737                 return err;
10738         }
10739
10740         case SIOCSMIIREG:
10741                 if (!capable(CAP_NET_ADMIN))
10742                         return -EPERM;
10743
10744                 if (!netif_running(dev))
10745                         return -EAGAIN;
10746
10747                 mutex_lock(&bp->port.phy_mutex);
10748                 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
10749                                        DEFAULT_PHY_DEV_ADDR,
10750                                        (data->reg_num & 0x1f), data->val_in);
10751                 mutex_unlock(&bp->port.phy_mutex);
10752                 return err;
10753
10754         default:
10755                 /* do nothing */
10756                 break;
10757         }
10758
10759         return -EOPNOTSUPP;
10760 }
10761
10762 /* called with rtnl_lock */
10763 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10764 {
10765         struct bnx2x *bp = netdev_priv(dev);
10766         int rc = 0;
10767
10768         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10769             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10770                 return -EINVAL;
10771
10772         /* This does not race with packet allocation
10773          * because the actual alloc size is
10774          * only updated as part of load
10775          */
10776         dev->mtu = new_mtu;
10777
10778         if (netif_running(dev)) {
10779                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10780                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10781         }
10782
10783         return rc;
10784 }
10785
10786 static void bnx2x_tx_timeout(struct net_device *dev)
10787 {
10788         struct bnx2x *bp = netdev_priv(dev);
10789
10790 #ifdef BNX2X_STOP_ON_ERROR
10791         if (!bp->panic)
10792                 bnx2x_panic();
10793 #endif
10794         /* This allows the netif to be shutdown gracefully before resetting */
10795         schedule_work(&bp->reset_task);
10796 }
10797
10798 #ifdef BCM_VLAN
10799 /* called with rtnl_lock */
10800 static void bnx2x_vlan_rx_register(struct net_device *dev,
10801                                    struct vlan_group *vlgrp)
10802 {
10803         struct bnx2x *bp = netdev_priv(dev);
10804
10805         bp->vlgrp = vlgrp;
10806
10807         /* Set flags according to the required capabilities */
10808         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10809
10810         if (dev->features & NETIF_F_HW_VLAN_TX)
10811                 bp->flags |= HW_VLAN_TX_FLAG;
10812
10813         if (dev->features & NETIF_F_HW_VLAN_RX)
10814                 bp->flags |= HW_VLAN_RX_FLAG;
10815
10816         if (netif_running(dev))
10817                 bnx2x_set_client_config(bp);
10818 }
10819
10820 #endif
10821
10822 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10823 static void poll_bnx2x(struct net_device *dev)
10824 {
10825         struct bnx2x *bp = netdev_priv(dev);
10826
10827         disable_irq(bp->pdev->irq);
10828         bnx2x_interrupt(bp->pdev->irq, dev);
10829         enable_irq(bp->pdev->irq);
10830 }
10831 #endif
10832
10833 static const struct net_device_ops bnx2x_netdev_ops = {
10834         .ndo_open               = bnx2x_open,
10835         .ndo_stop               = bnx2x_close,
10836         .ndo_start_xmit         = bnx2x_start_xmit,
10837         .ndo_set_multicast_list = bnx2x_set_rx_mode,
10838         .ndo_set_mac_address    = bnx2x_change_mac_addr,
10839         .ndo_validate_addr      = eth_validate_addr,
10840         .ndo_do_ioctl           = bnx2x_ioctl,
10841         .ndo_change_mtu         = bnx2x_change_mtu,
10842         .ndo_tx_timeout         = bnx2x_tx_timeout,
10843 #ifdef BCM_VLAN
10844         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
10845 #endif
10846 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10847         .ndo_poll_controller    = poll_bnx2x,
10848 #endif
10849 };
10850
10851 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10852                                     struct net_device *dev)
10853 {
10854         struct bnx2x *bp;
10855         int rc;
10856
10857         SET_NETDEV_DEV(dev, &pdev->dev);
10858         bp = netdev_priv(dev);
10859
10860         bp->dev = dev;
10861         bp->pdev = pdev;
10862         bp->flags = 0;
10863         bp->func = PCI_FUNC(pdev->devfn);
10864
10865         rc = pci_enable_device(pdev);
10866         if (rc) {
10867                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10868                 goto err_out;
10869         }
10870
10871         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10872                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10873                        " aborting\n");
10874                 rc = -ENODEV;
10875                 goto err_out_disable;
10876         }
10877
10878         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10879                 printk(KERN_ERR PFX "Cannot find second PCI device"
10880                        " base address, aborting\n");
10881                 rc = -ENODEV;
10882                 goto err_out_disable;
10883         }
10884
10885         if (atomic_read(&pdev->enable_cnt) == 1) {
10886                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10887                 if (rc) {
10888                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10889                                " aborting\n");
10890                         goto err_out_disable;
10891                 }
10892
10893                 pci_set_master(pdev);
10894                 pci_save_state(pdev);
10895         }
10896
10897         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10898         if (bp->pm_cap == 0) {
10899                 printk(KERN_ERR PFX "Cannot find power management"
10900                        " capability, aborting\n");
10901                 rc = -EIO;
10902                 goto err_out_release;
10903         }
10904
10905         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10906         if (bp->pcie_cap == 0) {
10907                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10908                        " aborting\n");
10909                 rc = -EIO;
10910                 goto err_out_release;
10911         }
10912
10913         if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10914                 bp->flags |= USING_DAC_FLAG;
10915                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10916                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10917                                " failed, aborting\n");
10918                         rc = -EIO;
10919                         goto err_out_release;
10920                 }
10921
10922         } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10923                 printk(KERN_ERR PFX "System does not support DMA,"
10924                        " aborting\n");
10925                 rc = -EIO;
10926                 goto err_out_release;
10927         }
10928
10929         dev->mem_start = pci_resource_start(pdev, 0);
10930         dev->base_addr = dev->mem_start;
10931         dev->mem_end = pci_resource_end(pdev, 0);
10932
10933         dev->irq = pdev->irq;
10934
10935         bp->regview = pci_ioremap_bar(pdev, 0);
10936         if (!bp->regview) {
10937                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10938                 rc = -ENOMEM;
10939                 goto err_out_release;
10940         }
10941
10942         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10943                                         min_t(u64, BNX2X_DB_SIZE,
10944                                               pci_resource_len(pdev, 2)));
10945         if (!bp->doorbells) {
10946                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10947                 rc = -ENOMEM;
10948                 goto err_out_unmap;
10949         }
10950
10951         bnx2x_set_power_state(bp, PCI_D0);
10952
10953         /* clean indirect addresses */
10954         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10955                                PCICFG_VENDOR_ID_OFFSET);
10956         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10957         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10958         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10959         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10960
10961         dev->watchdog_timeo = TX_TIMEOUT;
10962
10963         dev->netdev_ops = &bnx2x_netdev_ops;
10964         dev->ethtool_ops = &bnx2x_ethtool_ops;
10965         dev->features |= NETIF_F_SG;
10966         dev->features |= NETIF_F_HW_CSUM;
10967         if (bp->flags & USING_DAC_FLAG)
10968                 dev->features |= NETIF_F_HIGHDMA;
10969 #ifdef BCM_VLAN
10970         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10971         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10972 #endif
10973         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10974         dev->features |= NETIF_F_TSO6;
10975
10976         return 0;
10977
10978 err_out_unmap:
10979         if (bp->regview) {
10980                 iounmap(bp->regview);
10981                 bp->regview = NULL;
10982         }
10983         if (bp->doorbells) {
10984                 iounmap(bp->doorbells);
10985                 bp->doorbells = NULL;
10986         }
10987
10988 err_out_release:
10989         if (atomic_read(&pdev->enable_cnt) == 1)
10990                 pci_release_regions(pdev);
10991
10992 err_out_disable:
10993         pci_disable_device(pdev);
10994         pci_set_drvdata(pdev, NULL);
10995
10996 err_out:
10997         return rc;
10998 }
10999
11000 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11001 {
11002         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11003
11004         val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11005         return val;
11006 }
11007
11008 /* return value of 1=2.5GHz 2=5GHz */
11009 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11010 {
11011         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11012
11013         val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11014         return val;
11015 }
11016
11017 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11018                                     const struct pci_device_id *ent)
11019 {
11020         static int version_printed;
11021         struct net_device *dev = NULL;
11022         struct bnx2x *bp;
11023         int rc;
11024
11025         if (version_printed++ == 0)
11026                 printk(KERN_INFO "%s", version);
11027
11028         /* dev zeroed in init_etherdev */
11029         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
11030         if (!dev) {
11031                 printk(KERN_ERR PFX "Cannot allocate net device\n");
11032                 return -ENOMEM;
11033         }
11034
11035         bp = netdev_priv(dev);
11036         bp->msglevel = debug;
11037
11038         rc = bnx2x_init_dev(pdev, dev);
11039         if (rc < 0) {
11040                 free_netdev(dev);
11041                 return rc;
11042         }
11043
11044         pci_set_drvdata(pdev, dev);
11045
11046         rc = bnx2x_init_bp(bp);
11047         if (rc)
11048                 goto init_one_exit;
11049
11050         rc = register_netdev(dev);
11051         if (rc) {
11052                 dev_err(&pdev->dev, "Cannot register net device\n");
11053                 goto init_one_exit;
11054         }
11055
11056         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
11057                " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
11058                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
11059                bnx2x_get_pcie_width(bp),
11060                (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11061                dev->base_addr, bp->pdev->irq);
11062         printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
11063         return 0;
11064
11065 init_one_exit:
11066         if (bp->regview)
11067                 iounmap(bp->regview);
11068
11069         if (bp->doorbells)
11070                 iounmap(bp->doorbells);
11071
11072         free_netdev(dev);
11073
11074         if (atomic_read(&pdev->enable_cnt) == 1)
11075                 pci_release_regions(pdev);
11076
11077         pci_disable_device(pdev);
11078         pci_set_drvdata(pdev, NULL);
11079
11080         return rc;
11081 }
11082
11083 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11084 {
11085         struct net_device *dev = pci_get_drvdata(pdev);
11086         struct bnx2x *bp;
11087
11088         if (!dev) {
11089                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11090                 return;
11091         }
11092         bp = netdev_priv(dev);
11093
11094         unregister_netdev(dev);
11095
11096         if (bp->regview)
11097                 iounmap(bp->regview);
11098
11099         if (bp->doorbells)
11100                 iounmap(bp->doorbells);
11101
11102         free_netdev(dev);
11103
11104         if (atomic_read(&pdev->enable_cnt) == 1)
11105                 pci_release_regions(pdev);
11106
11107         pci_disable_device(pdev);
11108         pci_set_drvdata(pdev, NULL);
11109 }
11110
11111 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11112 {
11113         struct net_device *dev = pci_get_drvdata(pdev);
11114         struct bnx2x *bp;
11115
11116         if (!dev) {
11117                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11118                 return -ENODEV;
11119         }
11120         bp = netdev_priv(dev);
11121
11122         rtnl_lock();
11123
11124         pci_save_state(pdev);
11125
11126         if (!netif_running(dev)) {
11127                 rtnl_unlock();
11128                 return 0;
11129         }
11130
11131         netif_device_detach(dev);
11132
11133         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11134
11135         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
11136
11137         rtnl_unlock();
11138
11139         return 0;
11140 }
11141
11142 static int bnx2x_resume(struct pci_dev *pdev)
11143 {
11144         struct net_device *dev = pci_get_drvdata(pdev);
11145         struct bnx2x *bp;
11146         int rc;
11147
11148         if (!dev) {
11149                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11150                 return -ENODEV;
11151         }
11152         bp = netdev_priv(dev);
11153
11154         rtnl_lock();
11155
11156         pci_restore_state(pdev);
11157
11158         if (!netif_running(dev)) {
11159                 rtnl_unlock();
11160                 return 0;
11161         }
11162
11163         bnx2x_set_power_state(bp, PCI_D0);
11164         netif_device_attach(dev);
11165
11166         rc = bnx2x_nic_load(bp, LOAD_OPEN);
11167
11168         rtnl_unlock();
11169
11170         return rc;
11171 }
11172
11173 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11174 {
11175         int i;
11176
11177         bp->state = BNX2X_STATE_ERROR;
11178
11179         bp->rx_mode = BNX2X_RX_MODE_NONE;
11180
11181         bnx2x_netif_stop(bp, 0);
11182
11183         del_timer_sync(&bp->timer);
11184         bp->stats_state = STATS_STATE_DISABLED;
11185         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11186
11187         /* Release IRQs */
11188         bnx2x_free_irq(bp);
11189
11190         if (CHIP_IS_E1(bp)) {
11191                 struct mac_configuration_cmd *config =
11192                                                 bnx2x_sp(bp, mcast_config);
11193
11194                 for (i = 0; i < config->hdr.length; i++)
11195                         CAM_INVALIDATE(config->config_table[i]);
11196         }
11197
11198         /* Free SKBs, SGEs, TPA pool and driver internals */
11199         bnx2x_free_skbs(bp);
11200         for_each_rx_queue(bp, i)
11201                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
11202         for_each_rx_queue(bp, i)
11203                 netif_napi_del(&bnx2x_fp(bp, i, napi));
11204         bnx2x_free_mem(bp);
11205
11206         bp->state = BNX2X_STATE_CLOSED;
11207
11208         netif_carrier_off(bp->dev);
11209
11210         return 0;
11211 }
11212
11213 static void bnx2x_eeh_recover(struct bnx2x *bp)
11214 {
11215         u32 val;
11216
11217         mutex_init(&bp->port.phy_mutex);
11218
11219         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11220         bp->link_params.shmem_base = bp->common.shmem_base;
11221         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11222
11223         if (!bp->common.shmem_base ||
11224             (bp->common.shmem_base < 0xA0000) ||
11225             (bp->common.shmem_base >= 0xC0000)) {
11226                 BNX2X_DEV_INFO("MCP not active\n");
11227                 bp->flags |= NO_MCP_FLAG;
11228                 return;
11229         }
11230
11231         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11232         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11233                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11234                 BNX2X_ERR("BAD MCP validity signature\n");
11235
11236         if (!BP_NOMCP(bp)) {
11237                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11238                               & DRV_MSG_SEQ_NUMBER_MASK);
11239                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11240         }
11241 }
11242
11243 /**
11244  * bnx2x_io_error_detected - called when PCI error is detected
11245  * @pdev: Pointer to PCI device
11246  * @state: The current pci connection state
11247  *
11248  * This function is called after a PCI bus error affecting
11249  * this device has been detected.
11250  */
11251 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11252                                                 pci_channel_state_t state)
11253 {
11254         struct net_device *dev = pci_get_drvdata(pdev);
11255         struct bnx2x *bp = netdev_priv(dev);
11256
11257         rtnl_lock();
11258
11259         netif_device_detach(dev);
11260
11261         if (netif_running(dev))
11262                 bnx2x_eeh_nic_unload(bp);
11263
11264         pci_disable_device(pdev);
11265
11266         rtnl_unlock();
11267
11268         /* Request a slot reset */
11269         return PCI_ERS_RESULT_NEED_RESET;
11270 }
11271
11272 /**
11273  * bnx2x_io_slot_reset - called after the PCI bus has been reset
11274  * @pdev: Pointer to PCI device
11275  *
11276  * Restart the card from scratch, as if from a cold-boot.
11277  */
11278 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11279 {
11280         struct net_device *dev = pci_get_drvdata(pdev);
11281         struct bnx2x *bp = netdev_priv(dev);
11282
11283         rtnl_lock();
11284
11285         if (pci_enable_device(pdev)) {
11286                 dev_err(&pdev->dev,
11287                         "Cannot re-enable PCI device after reset\n");
11288                 rtnl_unlock();
11289                 return PCI_ERS_RESULT_DISCONNECT;
11290         }
11291
11292         pci_set_master(pdev);
11293         pci_restore_state(pdev);
11294
11295         if (netif_running(dev))
11296                 bnx2x_set_power_state(bp, PCI_D0);
11297
11298         rtnl_unlock();
11299
11300         return PCI_ERS_RESULT_RECOVERED;
11301 }
11302
11303 /**
11304  * bnx2x_io_resume - called when traffic can start flowing again
11305  * @pdev: Pointer to PCI device
11306  *
11307  * This callback is called when the error recovery driver tells us that
11308  * its OK to resume normal operation.
11309  */
11310 static void bnx2x_io_resume(struct pci_dev *pdev)
11311 {
11312         struct net_device *dev = pci_get_drvdata(pdev);
11313         struct bnx2x *bp = netdev_priv(dev);
11314
11315         rtnl_lock();
11316
11317         bnx2x_eeh_recover(bp);
11318
11319         if (netif_running(dev))
11320                 bnx2x_nic_load(bp, LOAD_NORMAL);
11321
11322         netif_device_attach(dev);
11323
11324         rtnl_unlock();
11325 }
11326
11327 static struct pci_error_handlers bnx2x_err_handler = {
11328         .error_detected = bnx2x_io_error_detected,
11329         .slot_reset     = bnx2x_io_slot_reset,
11330         .resume         = bnx2x_io_resume,
11331 };
11332
11333 static struct pci_driver bnx2x_pci_driver = {
11334         .name        = DRV_MODULE_NAME,
11335         .id_table    = bnx2x_pci_tbl,
11336         .probe       = bnx2x_init_one,
11337         .remove      = __devexit_p(bnx2x_remove_one),
11338         .suspend     = bnx2x_suspend,
11339         .resume      = bnx2x_resume,
11340         .err_handler = &bnx2x_err_handler,
11341 };
11342
11343 static int __init bnx2x_init(void)
11344 {
11345         bnx2x_wq = create_singlethread_workqueue("bnx2x");
11346         if (bnx2x_wq == NULL) {
11347                 printk(KERN_ERR PFX "Cannot create workqueue\n");
11348                 return -ENOMEM;
11349         }
11350
11351         return pci_register_driver(&bnx2x_pci_driver);
11352 }
11353
11354 static void __exit bnx2x_cleanup(void)
11355 {
11356         pci_unregister_driver(&bnx2x_pci_driver);
11357
11358         destroy_workqueue(bnx2x_wq);
11359 }
11360
11361 module_init(bnx2x_init);
11362 module_exit(bnx2x_cleanup);
11363